summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x76
-rw-r--r--Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_gpci12
-rw-r--r--Documentation/ABI/testing/sysfs-class-cxl129
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl2
-rw-r--r--Documentation/DocBook/media/v4l/compat.xml6
-rw-r--r--Documentation/DocBook/media/v4l/controls.xml55
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml52
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-dqevent.xml7
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-edid.xml14
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subscribe-event.xml2
-rw-r--r--Documentation/DocBook/writing-an-alsa-driver.tmpl28
-rw-r--r--Documentation/RCU/stallwarn.txt33
-rw-r--r--Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt3
-rw-r--r--Documentation/devicetree/bindings/media/hix5hd2-ir.txt25
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt6
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt3
-rw-r--r--Documentation/devicetree/bindings/net/apm-xgene-enet.txt4
-rw-r--r--Documentation/devicetree/bindings/net/micrel.txt6
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,pci.txt27
-rw-r--r--Documentation/devicetree/bindings/sound/adi,ssm2602.txt19
-rw-r--r--Documentation/devicetree/bindings/sound/cs35l32.txt62
-rw-r--r--Documentation/devicetree/bindings/sound/es8328.txt38
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,esai.txt3
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,ssi.txt8
-rw-r--r--Documentation/devicetree/bindings/sound/fsl-asoc-card.txt82
-rw-r--r--Documentation/devicetree/bindings/sound/fsl-sai.txt30
-rw-r--r--Documentation/devicetree/bindings/sound/imx-audio-es8328.txt60
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/rt5677.txt59
-rw-r--r--Documentation/devicetree/bindings/sound/simple-card.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/ssm4567.txt15
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt5
-rw-r--r--Documentation/devicetree/booting-without-of.txt53
-rw-r--r--Documentation/devicetree/dynamic-resolution-notes.txt25
-rw-r--r--Documentation/devicetree/of_selftest.txt30
-rwxr-xr-xDocumentation/dvb/get_dvb_firmware24
-rw-r--r--Documentation/filesystems/Locking11
-rw-r--r--Documentation/filesystems/vfs.txt9
-rw-r--r--Documentation/hwmon/k10temp2
-rw-r--r--Documentation/hwmon/menf21bmc50
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/kernel-parameters.txt70
-rw-r--r--Documentation/kprobes.txt1
-rw-r--r--Documentation/locking/lockdep-design.txt (renamed from Documentation/lockdep-design.txt)0
-rw-r--r--Documentation/locking/lockstat.txt (renamed from Documentation/lockstat.txt)2
-rw-r--r--Documentation/locking/locktorture.txt147
-rw-r--r--Documentation/locking/mutex-design.txt (renamed from Documentation/mutex-design.txt)6
-rw-r--r--Documentation/locking/rt-mutex-design.txt (renamed from Documentation/rt-mutex-design.txt)0
-rw-r--r--Documentation/locking/rt-mutex.txt (renamed from Documentation/rt-mutex.txt)0
-rw-r--r--Documentation/locking/spinlocks.txt (renamed from Documentation/spinlocks.txt)14
-rw-r--r--Documentation/locking/ww-mutex-design.txt (renamed from Documentation/ww-mutex-design.txt)0
-rw-r--r--Documentation/memory-barriers.txt128
-rw-r--r--Documentation/networking/filter.txt4
-rw-r--r--Documentation/powerpc/00-INDEX2
-rw-r--r--Documentation/powerpc/cxl.txt379
-rw-r--r--Documentation/scheduler/sched-deadline.txt340
-rw-r--r--Documentation/security/keys.txt65
-rw-r--r--Documentation/video4linux/vivid.txt1111
-rw-r--r--Documentation/x86/x86_64/mm.txt2
-rw-r--r--MAINTAINERS68
-rw-r--r--arch/Kconfig11
-rw-r--r--arch/alpha/include/asm/atomic.h217
-rw-r--r--arch/alpha/kernel/osf_sys.c23
-rw-r--r--arch/arc/include/asm/atomic.h184
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts3
-rw-r--r--arch/arm/include/asm/atomic.h307
-rw-r--r--arch/arm/include/asm/dma-mapping.h16
-rw-r--r--arch/arm/kernel/process.c12
-rw-r--r--arch/arm/kernel/ptrace.c7
-rw-r--r--arch/arm/kernel/topology.c4
-rw-r--r--arch/arm/mach-shmobile/board-koelsch.c3
-rw-r--r--arch/arm/mach-shmobile/board-lager.c2
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/boot/dts/apm-mustang.dts4
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi29
-rw-r--r--arch/arm64/include/asm/atomic.h201
-rw-r--r--arch/arm64/kernel/process.c3
-rw-r--r--arch/avr32/include/asm/atomic.h125
-rw-r--r--arch/cris/arch-v10/drivers/sync_serial.c1
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c1
-rw-r--r--arch/cris/include/asm/atomic.h59
-rw-r--r--arch/frv/include/asm/atomic.h2
-rw-r--r--arch/hexagon/include/asm/atomic.h68
-rw-r--r--arch/ia64/include/asm/atomic.h192
-rw-r--r--arch/ia64/include/asm/processor.h1
-rw-r--r--arch/m32r/include/asm/atomic.h145
-rw-r--r--arch/m68k/include/asm/atomic.h111
-rw-r--r--arch/metag/include/asm/atomic_lnkget.h121
-rw-r--r--arch/metag/include/asm/atomic_lock1.h76
-rw-r--r--arch/mips/include/asm/atomic.h561
-rw-r--r--arch/mips/include/asm/processor.h6
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mn10300/include/asm/atomic.h125
-rw-r--r--arch/parisc/include/asm/atomic.h117
-rw-r--r--arch/parisc/include/uapi/asm/signal.h16
-rw-r--r--arch/powerpc/Kconfig11
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/boot/Makefile5
-rw-r--r--arch/powerpc/boot/dts/fsl/t2081si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/t1040rdb.dts48
-rw-r--r--arch/powerpc/boot/dts/t1042rdb.dts48
-rw-r--r--arch/powerpc/boot/dts/t1042rdb_pi.dts57
-rw-r--r--arch/powerpc/boot/dts/t104xrdb.dtsi156
-rw-r--r--arch/powerpc/configs/cell_defconfig1
-rw-r--r--arch/powerpc/configs/celleb_defconfig1
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig2
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig46
-rw-r--r--arch/powerpc/configs/g5_defconfig1
-rw-r--r--arch/powerpc/configs/maple_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig4
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig4
-rw-r--r--arch/powerpc/configs/mpc86xx_defconfig3
-rw-r--r--arch/powerpc/configs/pasemi_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/include/asm/atomic.h198
-rw-r--r--arch/powerpc/include/asm/bug.h1
-rw-r--r--arch/powerpc/include/asm/copro.h29
-rw-r--r--arch/powerpc/include/asm/cputime.h2
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h1
-rw-r--r--arch/powerpc/include/asm/eeh.h39
-rw-r--r--arch/powerpc/include/asm/hydra.h1
-rw-r--r--arch/powerpc/include/asm/irq.h5
-rw-r--r--arch/powerpc/include/asm/kexec.h1
-rw-r--r--arch/powerpc/include/asm/machdep.h2
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h10
-rw-r--r--arch/powerpc/include/asm/opal.h45
-rw-r--r--arch/powerpc/include/asm/page_64.h43
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h6
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-4k.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h6
-rw-r--r--arch/powerpc/include/asm/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h12
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h31
-rw-r--r--arch/powerpc/include/asm/prom.h2
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/include/asm/rio.h1
-rw-r--r--arch/powerpc/include/asm/spu.h5
-rw-r--r--arch/powerpc/include/asm/sstep.h62
-rw-r--r--arch/powerpc/include/asm/tsi108.h4
-rw-r--r--arch/powerpc/include/asm/udbg.h1
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h112
-rw-r--r--arch/powerpc/include/asm/xics.h1
-rw-r--r--arch/powerpc/kernel/Makefile3
-rw-r--r--arch/powerpc/kernel/crash_dump.c1
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c8
-rw-r--r--arch/powerpc/kernel/dma.c47
-rw-r--r--arch/powerpc/kernel/eeh.c269
-rw-r--r--arch/powerpc/kernel/eeh_driver.c106
-rw-r--r--arch/powerpc/kernel/eeh_pe.c23
-rw-r--r--arch/powerpc/kernel/eeh_sysfs.c41
-rw-r--r--arch/powerpc/kernel/head_8xx.S150
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c2
-rw-r--r--arch/powerpc/kernel/ibmebus.c2
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/irq.c6
-rw-r--r--arch/powerpc/kernel/legacy_serial.c2
-rw-r--r--arch/powerpc/kernel/module_32.c31
-rw-r--r--arch/powerpc/kernel/module_64.c36
-rw-r--r--arch/powerpc/kernel/nvram_64.c2
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c3
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c2
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c192
-rw-r--r--arch/powerpc/kernel/ppc_ksyms_32.c61
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/prom.c13
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh22
-rw-r--r--arch/powerpc/kernel/ptrace.c2
-rw-r--r--arch/powerpc/kernel/rtasd.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c7
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c32
-rw-r--r--arch/powerpc/kernel/smp.c11
-rw-r--r--arch/powerpc/kernel/time.c5
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/feature-fixups.c2
-rw-r--r--arch/powerpc/lib/ppc_ksyms.c39
-rw-r--r--arch/powerpc/lib/sstep.c996
-rw-r--r--arch/powerpc/mm/Makefile1
-rw-r--r--arch/powerpc/mm/copro_fault.c (renamed from arch/powerpc/platforms/cell/spu_fault.c)69
-rw-r--r--arch/powerpc/mm/fault.c48
-rw-r--r--arch/powerpc/mm/hash_native_64.c6
-rw-r--r--arch/powerpc/mm/hash_utils_64.c160
-rw-r--r--arch/powerpc/mm/init_32.c4
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/mm/mem.c68
-rw-r--r--arch/powerpc/mm/numa.c27
-rw-r--r--arch/powerpc/mm/pgtable.c2
-rw-r--r--arch/powerpc/mm/slb.c3
-rw-r--r--arch/powerpc/mm/slice.c12
-rw-r--r--arch/powerpc/oprofile/backtrace.c1
-rw-r--r--arch/powerpc/perf/core-book3s.c18
-rw-r--r--arch/powerpc/perf/hv-24x7.c156
-rw-r--r--arch/powerpc/platforms/40x/ep405.c2
-rw-r--r--arch/powerpc/platforms/40x/ppc40x_simple.c2
-rw-r--r--arch/powerpc/platforms/40x/virtex.c2
-rw-r--r--arch/powerpc/platforms/40x/walnut.c2
-rw-r--r--arch/powerpc/platforms/44x/Kconfig6
-rw-r--r--arch/powerpc/platforms/44x/canyonlands.c2
-rw-r--r--arch/powerpc/platforms/44x/ebony.c2
-rw-r--r--arch/powerpc/platforms/44x/iss4xx.c2
-rw-r--r--arch/powerpc/platforms/44x/ppc44x_simple.c2
-rw-r--r--arch/powerpc/platforms/44x/ppc476.c2
-rw-r--r--arch/powerpc/platforms/44x/sam440ep.c2
-rw-r--r--arch/powerpc/platforms/44x/virtex.c2
-rw-r--r--arch/powerpc/platforms/44x/warp.c2
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c2
-rw-r--r--arch/powerpc/platforms/52xx/lite5200.c4
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c12
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c4
-rw-r--r--arch/powerpc/platforms/82xx/ep8248e.c2
-rw-r--r--arch/powerpc/platforms/82xx/km82xx.c2
-rw-r--r--arch/powerpc/platforms/82xx/mpc8272_ads.c2
-rw-r--r--arch/powerpc/platforms/82xx/pq2fads.c2
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c2
-rw-r--r--arch/powerpc/platforms/83xx/misc.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c2
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c4
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/85xx/common.c2
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c14
-rw-r--r--arch/powerpc/platforms/85xx/ppa8548.c2
-rw-r--r--arch/powerpc/platforms/85xx/qemu_e500.c10
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c4
-rw-r--r--arch/powerpc/platforms/86xx/gef_ppc9a.c2
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc310.c2
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c2
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c2
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c2
-rw-r--r--arch/powerpc/platforms/86xx/sbc8641d.c2
-rw-r--r--arch/powerpc/platforms/8xx/adder875.c2
-rw-r--r--arch/powerpc/platforms/8xx/ep88xc.c2
-rw-r--r--arch/powerpc/platforms/8xx/mpc86xads_setup.c2
-rw-r--r--arch/powerpc/platforms/8xx/mpc885ads_setup.c2
-rw-r--r--arch/powerpc/platforms/8xx/tqm8xx_setup.c2
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype6
-rw-r--r--arch/powerpc/platforms/cell/Kconfig1
-rw-r--r--arch/powerpc/platforms/cell/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.c2
-rw-r--r--arch/powerpc/platforms/cell/celleb_setup.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c55
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c4
-rw-r--r--arch/powerpc/platforms/chrp/setup.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/gamecube.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/mvme5100.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/storcenter.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c2
-rw-r--r--arch/powerpc/platforms/pasemi/gpio_mdio.c2
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c2
-rw-r--r--arch/powerpc/platforms/powermac/setup.c8
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c226
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c26
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c18
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-nvram.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-rtc.c15
-rw-r--r--arch/powerpc/platforms/powernv/opal-tracepoints.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S3
-rw-r--r--arch/powerpc/platforms/powernv/opal.c6
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c290
-rw-r--r--arch/powerpc/platforms/powernv/pci.c11
-rw-r--r--arch/powerpc/platforms/powernv/pci.h4
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h6
-rw-r--r--arch/powerpc/platforms/powernv/setup.c11
-rw-r--r--arch/powerpc/platforms/powernv/smp.c8
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c1
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c29
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c40
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c10
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c1
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c51
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c4
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c12
-rw-r--r--arch/powerpc/platforms/pseries/pci.c1
-rw-r--r--arch/powerpc/platforms/pseries/ras.c2
-rw-r--r--arch/powerpc/platforms/pseries/setup.c2
-rw-r--r--arch/powerpc/sysdev/axonram.c2
-rw-r--r--arch/powerpc/sysdev/dcr.c1
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_l2ctlr.c2
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c95
-rw-r--r--arch/powerpc/sysdev/fsl_msi.h4
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c3
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c42
-rw-r--r--arch/powerpc/sysdev/mv64x60_dev.c2
-rw-r--r--arch/powerpc/sysdev/pmi.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c25
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c2
-rw-r--r--arch/powerpc/sysdev/xilinx_pci.c2
-rw-r--r--arch/s390/Kconfig16
-rw-r--r--arch/s390/Makefile17
-rw-r--r--arch/s390/include/asm/barrier.h6
-rw-r--r--arch/s390/include/asm/cputime.h28
-rw-r--r--arch/s390/include/asm/dis.h13
-rw-r--r--arch/s390/include/asm/dma-mapping.h31
-rw-r--r--arch/s390/include/asm/elf.h3
-rw-r--r--arch/s390/include/asm/ftrace.h9
-rw-r--r--arch/s390/include/asm/idle.h26
-rw-r--r--arch/s390/include/asm/ipl.h4
-rw-r--r--arch/s390/include/asm/irq.h1
-rw-r--r--arch/s390/include/asm/kprobes.h4
-rw-r--r--arch/s390/include/asm/lowcore.h21
-rw-r--r--arch/s390/include/asm/nmi.h2
-rw-r--r--arch/s390/include/asm/pgtable.h25
-rw-r--r--arch/s390/include/asm/processor.h12
-rw-r--r--arch/s390/include/asm/ptrace.h6
-rw-r--r--arch/s390/include/asm/setup.h6
-rw-r--r--arch/s390/include/asm/sigp.h6
-rw-r--r--arch/s390/include/asm/smp.h2
-rw-r--r--arch/s390/include/asm/spinlock.h135
-rw-r--r--arch/s390/include/asm/spinlock_types.h1
-rw-r--r--arch/s390/include/asm/switch_to.h61
-rw-r--r--arch/s390/include/asm/thread_info.h3
-rw-r--r--arch/s390/include/asm/uprobes.h42
-rw-r--r--arch/s390/include/asm/vdso.h18
-rw-r--r--arch/s390/include/asm/vtimer.h2
-rw-r--r--arch/s390/include/uapi/asm/sigcontext.h20
-rw-r--r--arch/s390/include/uapi/asm/types.h4
-rw-r--r--arch/s390/include/uapi/asm/ucontext.h15
-rw-r--r--arch/s390/kernel/Makefile8
-rw-r--r--arch/s390/kernel/asm-offsets.c9
-rw-r--r--arch/s390/kernel/compat_linux.h9
-rw-r--r--arch/s390/kernel/compat_signal.c212
-rw-r--r--arch/s390/kernel/crash_dump.c58
-rw-r--r--arch/s390/kernel/dis.c245
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/entry.h6
-rw-r--r--arch/s390/kernel/entry64.S17
-rw-r--r--arch/s390/kernel/ftrace.c139
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/idle.c124
-rw-r--r--arch/s390/kernel/irq.c3
-rw-r--r--arch/s390/kernel/kprobes.c159
-rw-r--r--arch/s390/kernel/machine_kexec.c8
-rw-r--r--arch/s390/kernel/mcount.S86
-rw-r--r--arch/s390/kernel/mcount64.S62
-rw-r--r--arch/s390/kernel/nmi.c16
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/process.c24
-rw-r--r--arch/s390/kernel/processor.c4
-rw-r--r--arch/s390/kernel/ptrace.c256
-rw-r--r--arch/s390/kernel/setup.c13
-rw-r--r--arch/s390/kernel/signal.c296
-rw-r--r--arch/s390/kernel/smp.c80
-rw-r--r--arch/s390/kernel/time.c13
-rw-r--r--arch/s390/kernel/topology.c18
-rw-r--r--arch/s390/kernel/traps.c115
-rw-r--r--arch/s390/kernel/uprobes.c332
-rw-r--r--arch/s390/kernel/vdso32/clock_getres.S11
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S32
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S4
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S8
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S32
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S4
-rw-r--r--arch/s390/kernel/vtime.c77
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/delay.c4
-rw-r--r--arch/s390/lib/probes.c159
-rw-r--r--arch/s390/lib/spinlock.c105
-rw-r--r--arch/s390/mm/dump_pagetables.c5
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/pageattr.c38
-rw-r--r--arch/s390/mm/vmem.c8
-rw-r--r--arch/sh/include/asm/atomic-grb.h119
-rw-r--r--arch/sh/include/asm/atomic-irq.h62
-rw-r--r--arch/sh/include/asm/atomic-llsc.h101
-rw-r--r--arch/sh/include/asm/atomic.h2
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/atomic_32.h19
-rw-r--r--arch/sparc/include/asm/atomic_64.h49
-rw-r--r--arch/sparc/include/asm/dma-mapping.h6
-rw-r--r--arch/sparc/include/asm/hypervisor.h11
-rw-r--r--arch/sparc/include/asm/irq_64.h7
-rw-r--r--arch/sparc/include/asm/ldc.h5
-rw-r--r--arch/sparc/include/asm/page_64.h33
-rw-r--r--arch/sparc/include/asm/pgalloc_64.h28
-rw-r--r--arch/sparc/include/asm/pgtable_64.h100
-rw-r--r--arch/sparc/include/asm/spitfire.h2
-rw-r--r--arch/sparc/include/asm/thread_info_64.h1
-rw-r--r--arch/sparc/include/asm/tsb.h87
-rw-r--r--arch/sparc/include/asm/vio.h16
-rw-r--r--arch/sparc/kernel/cpu.c12
-rw-r--r--arch/sparc/kernel/cpumap.c2
-rw-r--r--arch/sparc/kernel/ds.c4
-rw-r--r--arch/sparc/kernel/head_64.S12
-rw-r--r--arch/sparc/kernel/hvapi.c1
-rw-r--r--arch/sparc/kernel/hvcalls.S16
-rw-r--r--arch/sparc/kernel/ioport.c5
-rw-r--r--arch/sparc/kernel/irq_64.c507
-rw-r--r--arch/sparc/kernel/ktlb.S125
-rw-r--r--arch/sparc/kernel/ldc.c41
-rw-r--r--arch/sparc/kernel/leon_kernel.c31
-rw-r--r--arch/sparc/kernel/pcr.c47
-rw-r--r--arch/sparc/kernel/perf_event.c3
-rw-r--r--arch/sparc/kernel/setup_64.c26
-rw-r--r--arch/sparc/kernel/smp_64.c9
-rw-r--r--arch/sparc/kernel/sun4v_tlb_miss.S35
-rw-r--r--arch/sparc/kernel/traps_64.c15
-rw-r--r--arch/sparc/kernel/vio.c13
-rw-r--r--arch/sparc/kernel/viohs.c4
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S10
-rw-r--r--arch/sparc/lib/atomic32.c29
-rw-r--r--arch/sparc/lib/atomic_64.S163
-rw-r--r--arch/sparc/lib/ksyms.c25
-rw-r--r--arch/sparc/lib/memset.S18
-rw-r--r--arch/sparc/mm/fault_64.c3
-rw-r--r--arch/sparc/mm/init_64.c603
-rw-r--r--arch/sparc/mm/init_64.h18
-rw-r--r--arch/sparc/power/hibernate_asm.S4
-rw-r--r--arch/sparc/prom/bootstr_64.c5
-rw-r--r--arch/sparc/prom/p1275.c7
-rw-r--r--arch/um/drivers/random.c1
-rw-r--r--arch/x86/.gitignore2
-rw-r--r--arch/x86/Kconfig35
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/boot/compressed/aslr.c2
-rw-r--r--arch/x86/boot/mkcpustr.c1
-rw-r--r--arch/x86/ia32/ia32_aout.c21
-rw-r--r--arch/x86/ia32/ia32entry.S18
-rw-r--r--arch/x86/include/asm/atomic.h17
-rw-r--r--arch/x86/include/asm/atomic64_64.h2
-rw-r--r--arch/x86/include/asm/calling.h6
-rw-r--r--arch/x86/include/asm/cpufeature.h52
-rw-r--r--arch/x86/include/asm/disabled-features.h39
-rw-r--r--arch/x86/include/asm/elf.h5
-rw-r--r--arch/x86/include/asm/fpu-internal.h2
-rw-r--r--arch/x86/include/asm/microcode_intel.h2
-rw-r--r--arch/x86/include/asm/numa.h1
-rw-r--r--arch/x86/include/asm/perf_event.h8
-rw-r--r--arch/x86/include/asm/pgtable_32.h3
-rw-r--r--arch/x86/include/asm/pgtable_64.h3
-rw-r--r--arch/x86/include/asm/pgtable_types.h11
-rw-r--r--arch/x86/include/asm/ptrace.h5
-rw-r--r--arch/x86/include/asm/rwlock.h49
-rw-r--r--arch/x86/include/asm/serial.h24
-rw-r--r--arch/x86/include/asm/spinlock.h81
-rw-r--r--arch/x86/include/asm/spinlock_types.h4
-rw-r--r--arch/x86/include/uapi/asm/e820.h5
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/cpu/Makefile4
-rw-r--r--arch/x86/kernel/cpu/common.c33
-rw-r--r--arch/x86/kernel/cpu/intel.c32
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c4
-rw-r--r--arch/x86/kernel/cpu/microcode/amd_early.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c4
-rw-r--r--arch/x86/kernel/cpu/microcode/intel_early.c10
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event.c14
-rw-r--r--arch/x86/kernel/cpu/perf_event.h49
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c229
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c185
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c8
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c3175
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h439
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c1221
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c636
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c2258
-rw-r--r--arch/x86/kernel/e820.c7
-rw-r--r--arch/x86/kernel/entry_64.S51
-rw-r--r--arch/x86/kernel/iosf_mbi.c91
-rw-r--r--arch/x86/kernel/machine_kexec_32.c3
-rw-r--r--arch/x86/kernel/pmc_atom.c11
-rw-r--r--arch/x86/kernel/preempt.S25
-rw-r--r--arch/x86/kernel/process.c16
-rw-r--r--arch/x86/kernel/process_32.c6
-rw-r--r--arch/x86/kernel/process_64.c3
-rw-r--r--arch/x86/kernel/ptrace.c165
-rw-r--r--arch/x86/kernel/quirks.c18
-rw-r--r--arch/x86/kernel/setup.c9
-rw-r--r--arch/x86/kernel/signal.c5
-rw-r--r--arch/x86/kernel/smpboot.c176
-rw-r--r--arch/x86/kernel/vsyscall_64.c2
-rw-r--r--arch/x86/kernel/xsave.c7
-rw-r--r--arch/x86/lib/Makefile3
-rw-r--r--arch/x86/lib/cmpxchg16b_emu.S32
-rw-r--r--arch/x86/lib/cmpxchg8b_emu.S20
-rw-r--r--arch/x86/lib/rwlock.S44
-rw-r--r--arch/x86/lib/thunk_32.S41
-rw-r--r--arch/x86/lib/thunk_64.S7
-rw-r--r--arch/x86/mm/fault.c29
-rw-r--r--arch/x86/mm/init_32.c2
-rw-r--r--arch/x86/mm/init_64.c38
-rw-r--r--arch/x86/mm/numa.c34
-rw-r--r--arch/x86/mm/pgtable_32.c35
-rw-r--r--arch/x86/pci/i386.c2
-rw-r--r--arch/x86/tools/relocs.c2
-rw-r--r--arch/x86/vdso/vdso2c.h12
-rw-r--r--arch/x86/xen/efi.c2
-rw-r--r--arch/x86/xen/enlighten.c19
-rw-r--r--arch/x86/xen/mmu.c48
-rw-r--r--arch/x86/xen/p2m.c23
-rw-r--r--arch/x86/xen/p2m.h15
-rw-r--r--arch/x86/xen/setup.c370
-rw-r--r--arch/x86/xen/smp.c31
-rw-r--r--arch/x86/xen/smp.h8
-rw-r--r--arch/x86/xen/xen-head.S36
-rw-r--r--arch/xtensa/include/asm/atomic.h235
-rw-r--r--crypto/asymmetric_keys/asymmetric_keys.h5
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c265
-rw-r--r--crypto/asymmetric_keys/pkcs7_key_type.c2
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c99
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.h6
-rw-r--r--crypto/asymmetric_keys/pkcs7_trust.c90
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c102
-rw-r--r--crypto/asymmetric_keys/signature.c1
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c57
-rw-r--r--crypto/asymmetric_keys/x509_parser.h8
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c115
-rw-r--r--drivers/block/rsxx/core.c81
-rw-r--r--drivers/block/sunvdc.c167
-rw-r--r--drivers/block/xen-blkback/xenbus.c11
-rw-r--r--drivers/block/xen-blkfront.c5
-rw-r--r--drivers/bluetooth/hci_vhci.c22
-rw-r--r--drivers/char/mem.c56
-rw-r--r--drivers/char/tpm/xen-tpmfront.c13
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle.c15
-rw-r--r--drivers/dma-buf/dma-buf.c2
-rw-r--r--drivers/edac/sb_edac.c40
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/driver.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.h6
-rw-r--r--drivers/gpu/vga/vgaarb.c1
-rw-r--r--drivers/hwmon/Kconfig15
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/ab8500.c5
-rw-r--r--drivers/hwmon/ads1015.c21
-rw-r--r--drivers/hwmon/da9052-hwmon.c54
-rw-r--r--drivers/hwmon/da9055-hwmon.c52
-rw-r--r--drivers/hwmon/k10temp.c157
-rw-r--r--drivers/hwmon/menf21bmc_hwmon.c230
-rw-r--r--drivers/hwmon/ntc_thermistor.c25
-rw-r--r--drivers/hwmon/smsc47b397.c51
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c1
-rw-r--r--drivers/input/misc/xen-kbdfront.c5
-rw-r--r--drivers/leds/Kconfig9
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-menf21bmc.c131
-rw-r--r--drivers/macintosh/adb.c5
-rw-r--r--drivers/macintosh/via-cuda.c2
-rw-r--r--drivers/md/dm-bufio.c1
-rw-r--r--drivers/media/common/b2c2/flexcop.h2
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c3
-rw-r--r--drivers/media/common/siano/sms-cards.c6
-rw-r--r--drivers/media/common/siano/sms-cards.h1
-rw-r--r--drivers/media/common/siano/smscoreapi.c4
-rw-r--r--drivers/media/dvb-core/dmxdev.c7
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h2
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c45
-rw-r--r--drivers/media/dvb-core/dvb_frontend.h2
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.c26
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.h2
-rw-r--r--drivers/media/dvb-frontends/Kconfig20
-rw-r--r--drivers/media/dvb-frontends/Makefile4
-rw-r--r--drivers/media/dvb-frontends/af9013.c24
-rw-r--r--drivers/media/dvb-frontends/af9033.c757
-rw-r--r--drivers/media/dvb-frontends/af9033.h58
-rw-r--r--drivers/media/dvb-frontends/af9033_priv.h1
-rw-r--r--drivers/media/dvb-frontends/as102_fe.c480
-rw-r--r--drivers/media/dvb-frontends/as102_fe.h29
-rw-r--r--drivers/media/dvb-frontends/as102_fe_types.h (renamed from drivers/staging/media/as102/as10x_types.h)6
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c6
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_c.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t.c4
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c2
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c38
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c9
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c37
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c101
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.h35
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c6
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c14
-rw-r--r--drivers/media/dvb-frontends/mt312.c2
-rw-r--r--drivers/media/dvb-frontends/or51211.c2
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c2
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c118
-rw-r--r--drivers/media/dvb-frontends/si2165.c63
-rw-r--r--drivers/media/dvb-frontends/si2165_priv.h2
-rw-r--r--drivers/media/dvb-frontends/si2168.c129
-rw-r--r--drivers/media/dvb-frontends/si2168.h6
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h2
-rw-r--r--drivers/media/dvb-frontends/si21xx.c3
-rw-r--r--drivers/media/dvb-frontends/sp2.c441
-rw-r--r--drivers/media/dvb-frontends/sp2.h53
-rw-r--r--drivers/media/dvb-frontends/sp2_priv.h50
-rw-r--r--drivers/media/dvb-frontends/sp8870.c3
-rw-r--r--drivers/media/dvb-frontends/stv0367.c12
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c7
-rw-r--r--drivers/media/dvb-frontends/stv0900_sw.c3
-rw-r--r--drivers/media/dvb-frontends/tc90522.c840
-rw-r--r--drivers/media/dvb-frontends/tc90522.h42
-rw-r--r--drivers/media/dvb-frontends/tda10071.c2
-rw-r--r--drivers/media/dvb-frontends/zl10039.c2
-rw-r--r--drivers/media/firewire/firedtv-avc.c10
-rw-r--r--drivers/media/i2c/adv7343_regs.h2
-rw-r--r--drivers/media/i2c/adv7604.c2
-rw-r--r--drivers/media/i2c/adv7842.c4
-rw-r--r--drivers/media/i2c/lm3560.c4
-rw-r--r--drivers/media/i2c/ov7670.c14
-rw-r--r--drivers/media/i2c/s5k5baf.c2
-rw-r--r--drivers/media/i2c/saa6752hs.c6
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c143
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h4
-rw-r--r--drivers/media/i2c/soc_camera/mt9t112.c4
-rw-r--r--drivers/media/i2c/soc_camera/ov772x.c5
-rw-r--r--drivers/media/i2c/soc_camera/ov9740.c4
-rw-r--r--drivers/media/i2c/tda7432.c2
-rw-r--r--drivers/media/i2c/tvp7002.c21
-rw-r--r--drivers/media/i2c/vs6624.c14
-rw-r--r--drivers/media/media-device.c6
-rw-r--r--drivers/media/media-devnode.c3
-rw-r--r--drivers/media/parport/pms.c7
-rw-r--r--drivers/media/pci/Kconfig2
-rw-r--r--drivers/media/pci/Makefile3
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c5
-rw-r--r--drivers/media/pci/bt8xx/dst_ca.c4
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-pcm.c2
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.c6
-rw-r--r--drivers/media/pci/cx18/cx18-queue.c2
-rw-r--r--drivers/media/pci/cx23885/Kconfig9
-rw-r--r--drivers/media/pci/cx23885/Makefile1
-rw-r--r--drivers/media/pci/cx23885/altera-ci.c8
-rw-r--r--drivers/media/pci/cx23885/altera-ci.h4
-rw-r--r--drivers/media/pci/cx23885/cimax2.c4
-rw-r--r--drivers/media/pci/cx23885/cimax2.h4
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c503
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c109
-rw-r--r--drivers/media/pci/cx23885/cx23885-av.c5
-rw-r--r--drivers/media/pci/cx23885/cx23885-av.h5
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c32
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c362
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c323
-rw-r--r--drivers/media/pci/cx23885/cx23885-f300.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-i2c.c12
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c5
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.h5
-rw-r--r--drivers/media/pci/cx23885/cx23885-ioctl.c10
-rw-r--r--drivers/media/pci/cx23885/cx23885-ioctl.h4
-rw-r--r--drivers/media/pci/cx23885/cx23885-ir.c5
-rw-r--r--drivers/media/pci/cx23885/cx23885-ir.h5
-rw-r--r--drivers/media/pci/cx23885/cx23885-reg.h4
-rw-r--r--drivers/media/pci/cx23885/cx23885-vbi.c284
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c1294
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.h5
-rw-r--r--drivers/media/pci/cx23885/cx23885.h136
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c5
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.h5
-rw-r--r--drivers/media/pci/cx23885/netup-eeprom.c4
-rw-r--r--drivers/media/pci/cx23885/netup-eeprom.h4
-rw-r--r--drivers/media/pci/cx23885/netup-init.c4
-rw-r--r--drivers/media/pci/cx23885/netup-init.h4
-rw-r--r--drivers/media/pci/cx25821/cx25821-video-upstream.c5
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c632
-rw-r--r--drivers/media/pci/cx88/cx88-video.c3
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c30
-rw-r--r--drivers/media/pci/ddbridge/ddbridge.h12
-rw-r--r--drivers/media/pci/dm1105/dm1105.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.c12
-rw-r--r--drivers/media/pci/mantis/hopper_vp3028.c2
-rw-r--r--drivers/media/pci/mantis/mantis_common.h2
-rw-r--r--drivers/media/pci/mantis/mantis_vp1033.c4
-rw-r--r--drivers/media/pci/mantis/mantis_vp1034.c2
-rw-r--r--drivers/media/pci/mantis/mantis_vp1041.c4
-rw-r--r--drivers/media/pci/mantis/mantis_vp2033.c4
-rw-r--r--drivers/media/pci/mantis/mantis_vp2040.c4
-rw-r--r--drivers/media/pci/mantis/mantis_vp3030.c4
-rw-r--r--drivers/media/pci/ngene/ngene-cards.c2
-rw-r--r--drivers/media/pci/ngene/ngene-core.c14
-rw-r--r--drivers/media/pci/ngene/ngene-dvb.c7
-rw-r--r--drivers/media/pci/ngene/ngene.h2
-rw-r--r--drivers/media/pci/pt3/Kconfig10
-rw-r--r--drivers/media/pci/pt3/Makefile8
-rw-r--r--drivers/media/pci/pt3/pt3.c876
-rw-r--r--drivers/media/pci/pt3/pt3.h186
-rw-r--r--drivers/media/pci/pt3/pt3_dma.c225
-rw-r--r--drivers/media/pci/pt3/pt3_i2c.c240
-rw-r--r--drivers/media/pci/saa7134/Kconfig8
-rw-r--r--drivers/media/pci/saa7134/Makefile2
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c29
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c10
-rw-r--r--drivers/media/pci/saa7134/saa7134-go7007.c531
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134.h5
-rw-r--r--drivers/media/pci/saa7164/saa7164-api.c3
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c6
-rw-r--r--drivers/media/pci/solo6x10/Kconfig1
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-disp.c4
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-eeprom.c8
-rw-r--r--drivers/media/pci/solo6x10/solo6x10.h4
-rw-r--r--drivers/media/pci/sta2x11/Kconfig1
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c2
-rw-r--r--drivers/media/pci/ttpci/Kconfig4
-rw-r--r--drivers/media/pci/ttpci/Makefile2
-rw-r--r--drivers/media/pci/ttpci/av7110.c8
-rw-r--r--drivers/media/pci/tw68/Kconfig10
-rw-r--r--drivers/media/pci/tw68/Makefile3
-rw-r--r--drivers/media/pci/tw68/tw68-core.c434
-rw-r--r--drivers/media/pci/tw68/tw68-reg.h195
-rw-r--r--drivers/media/pci/tw68/tw68-risc.c230
-rw-r--r--drivers/media/pci/tw68/tw68-video.c1051
-rw-r--r--drivers/media/pci/tw68/tw68.h231
-rw-r--r--drivers/media/pci/zoran/zoran_device.c2
-rw-r--r--drivers/media/platform/Kconfig54
-rw-r--r--drivers/media/platform/Makefile8
-rw-r--r--drivers/media/platform/blackfin/Kconfig1
-rw-r--r--drivers/media/platform/coda.c3933
-rw-r--r--drivers/media/platform/coda/Makefile3
-rw-r--r--drivers/media/platform/coda/coda-bit.c1861
-rw-r--r--drivers/media/platform/coda/coda-common.c2052
-rw-r--r--drivers/media/platform/coda/coda-h264.c37
-rw-r--r--drivers/media/platform/coda/coda.h287
-rw-r--r--drivers/media/platform/coda/coda_regs.h (renamed from drivers/media/platform/coda.h)0
-rw-r--r--drivers/media/platform/davinci/Kconfig18
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c2
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c14
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c16
-rw-r--r--drivers/media/platform/davinci/vpif.c1
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c13
-rw-r--r--drivers/media/platform/davinci/vpif_display.c22
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c6
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c3
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-regs.c8
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig5
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-errno.c4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-errno.h4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-param.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c10
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c9
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c4
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c3
-rw-r--r--drivers/media/platform/marvell-ccic/Kconfig2
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c2
-rw-r--r--drivers/media/platform/mx2_emmaprp.c2
-rw-r--r--drivers/media/platform/omap/Kconfig2
-rw-r--r--drivers/media/platform/omap/omap_vout.c16
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c10
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.h18
-rw-r--r--drivers/media/platform/omap3isp/cfa_coef_table.h10
-rw-r--r--drivers/media/platform/omap3isp/gamma_table.h10
-rw-r--r--drivers/media/platform/omap3isp/isp.c20
-rw-r--r--drivers/media/platform/omap3isp/isp.h10
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c424
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.h21
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c10
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.h10
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c10
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.h10
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.c10
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.h10
-rw-r--r--drivers/media/platform/omap3isp/isph3a.h10
-rw-r--r--drivers/media/platform/omap3isp/isph3a_aewb.c10
-rw-r--r--drivers/media/platform/omap3isp/isph3a_af.c10
-rw-r--r--drivers/media/platform/omap3isp/isphist.c10
-rw-r--r--drivers/media/platform/omap3isp/isphist.h10
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c10
-rw-r--r--drivers/media/platform/omap3isp/isppreview.h10
-rw-r--r--drivers/media/platform/omap3isp/ispreg.h20
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c80
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.h13
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c10
-rw-r--r--drivers/media/platform/omap3isp/ispstat.h10
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c59
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h12
-rw-r--r--drivers/media/platform/omap3isp/luma_enhance_table.h10
-rw-r--r--drivers/media/platform/omap3isp/noise_filter_table.h10
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c4
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.c4
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c7
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c11
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c6
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c83
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h6
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c27
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_debug.h6
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c54
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c67
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.h488
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c31
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c491
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c2
-rw-r--r--drivers/media/platform/s5p-tv/Kconfig4
-rw-r--r--drivers/media/platform/s5p-tv/hdmi_drv.c2
-rw-r--r--drivers/media/platform/s5p-tv/sdo_drv.c2
-rw-r--r--drivers/media/platform/s5p-tv/sii9234_drv.c2
-rw-r--r--drivers/media/platform/sh_veu.c4
-rw-r--r--drivers/media/platform/soc_camera/Kconfig16
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c13
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c5
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c2
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c4
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c21
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c4
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c20
-rw-r--r--drivers/media/platform/via-camera.c13
-rw-r--r--drivers/media/platform/vivi.c1542
-rw-r--r--drivers/media/platform/vivid/Kconfig19
-rw-r--r--drivers/media/platform/vivid/Makefile6
-rw-r--r--drivers/media/platform/vivid/vivid-core.c1390
-rw-r--r--drivers/media/platform/vivid/vivid-core.h520
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c1502
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.h34
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c886
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.h26
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.c305
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.h26
-rw-r--r--drivers/media/platform/vivid/vivid-osd.c400
-rw-r--r--drivers/media/platform/vivid/vivid-osd.h27
-rw-r--r--drivers/media/platform/vivid/vivid-radio-common.c189
-rw-r--r--drivers/media/platform/vivid/vivid-radio-common.h40
-rw-r--r--drivers/media/platform/vivid/vivid-radio-rx.c287
-rw-r--r--drivers/media/platform/vivid/vivid-radio-rx.h31
-rw-r--r--drivers/media/platform/vivid/vivid-radio-tx.c141
-rw-r--r--drivers/media/platform/vivid/vivid-radio-tx.h29
-rw-r--r--drivers/media/platform/vivid/vivid-rds-gen.c166
-rw-r--r--drivers/media/platform/vivid/vivid-rds-gen.h53
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c499
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.h34
-rw-r--r--drivers/media/platform/vivid/vivid-tpg-colors.c310
-rw-r--r--drivers/media/platform/vivid/vivid-tpg-colors.h64
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.c1439
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.h439
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-cap.c371
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-cap.h40
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-gen.c323
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-gen.h33
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-out.c248
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-out.h34
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c1730
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.h71
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c571
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.h61
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c1146
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.h56
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-sf16fmi.c6
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c4
-rw-r--r--drivers/media/radio/radio-tea5764.c12
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c4
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c11
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c10
-rw-r--r--drivers/media/radio/wl128x/fmdrv_tx.c2
-rw-r--r--drivers/media/rc/Kconfig15
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/ene_ir.c2
-rw-r--r--drivers/media/rc/fintek-cir.c6
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c6
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.h6
-rw-r--r--drivers/media/rc/imon.c304
-rw-r--r--drivers/media/rc/ir-hix5hd2.c351
-rw-r--r--drivers/media/rc/ite-cir.c3
-rw-r--r--drivers/media/rc/keymaps/Makefile1
-rw-r--r--drivers/media/rc/keymaps/rc-dvbsky.c78
-rw-r--r--drivers/media/rc/lirc_dev.c14
-rw-r--r--drivers/media/rc/mceusb.c15
-rw-r--r--drivers/media/rc/nuvoton-cir.c6
-rw-r--r--drivers/media/rc/st_rc.c16
-rw-r--r--drivers/media/rc/streamzap.c6
-rw-r--r--drivers/media/tuners/Kconfig17
-rw-r--r--drivers/media/tuners/Makefile4
-rw-r--r--drivers/media/tuners/e4000.c75
-rw-r--r--drivers/media/tuners/it913x.c478
-rw-r--r--drivers/media/tuners/it913x.h (renamed from drivers/media/tuners/tuner_it913x.h)41
-rw-r--r--drivers/media/tuners/m88ts2022.c355
-rw-r--r--drivers/media/tuners/m88ts2022_priv.h5
-rw-r--r--drivers/media/tuners/msi001.c56
-rw-r--r--drivers/media/tuners/mt2060.c3
-rw-r--r--drivers/media/tuners/mt2063.c26
-rw-r--r--drivers/media/tuners/mxl301rf.c349
-rw-r--r--drivers/media/tuners/mxl301rf.h26
-rw-r--r--drivers/media/tuners/mxl5005s.c3
-rw-r--r--drivers/media/tuners/qm1d1c0042.c448
-rw-r--r--drivers/media/tuners/qm1d1c0042.h37
-rw-r--r--drivers/media/tuners/si2157.c86
-rw-r--r--drivers/media/tuners/si2157.h2
-rw-r--r--drivers/media/tuners/si2157_priv.h3
-rw-r--r--drivers/media/tuners/tda18212.c272
-rw-r--r--drivers/media/tuners/tda18212.h19
-rw-r--r--drivers/media/tuners/tda18271-common.c19
-rw-r--r--drivers/media/tuners/tda18271-priv.h4
-rw-r--r--drivers/media/tuners/tuner-xc2028.c62
-rw-r--r--drivers/media/tuners/tuner_it913x.c453
-rw-r--r--drivers/media/tuners/tuner_it913x_priv.h78
-rw-r--r--drivers/media/tuners/xc4000.c62
-rw-r--r--drivers/media/tuners/xc5000.c242
-rw-r--r--drivers/media/usb/Kconfig4
-rw-r--r--drivers/media/usb/Makefile4
-rw-r--r--drivers/media/usb/airspy/airspy.c222
-rw-r--r--drivers/media/usb/as102/Kconfig (renamed from drivers/staging/media/as102/Kconfig)0
-rw-r--r--drivers/media/usb/as102/Makefile (renamed from drivers/staging/media/as102/Makefile)3
-rw-r--r--drivers/media/usb/as102/as102_drv.c (renamed from drivers/staging/media/as102/as102_drv.c)152
-rw-r--r--drivers/media/usb/as102/as102_drv.h (renamed from drivers/staging/media/as102/as102_drv.h)26
-rw-r--r--drivers/media/usb/as102/as102_fw.c (renamed from drivers/staging/media/as102/as102_fw.c)4
-rw-r--r--drivers/media/usb/as102/as102_fw.h (renamed from drivers/staging/media/as102/as102_fw.h)4
-rw-r--r--drivers/media/usb/as102/as102_usb_drv.c (renamed from drivers/staging/media/as102/as102_usb_drv.c)53
-rw-r--r--drivers/media/usb/as102/as102_usb_drv.h (renamed from drivers/staging/media/as102/as102_usb_drv.h)4
-rw-r--r--drivers/media/usb/as102/as10x_cmd.c (renamed from drivers/staging/media/as102/as10x_cmd.c)23
-rw-r--r--drivers/media/usb/as102/as10x_cmd.h (renamed from drivers/staging/media/as102/as10x_cmd.h)108
-rw-r--r--drivers/media/usb/as102/as10x_cmd_cfg.c (renamed from drivers/staging/media/as102/as10x_cmd_cfg.c)9
-rw-r--r--drivers/media/usb/as102/as10x_cmd_stream.c (renamed from drivers/staging/media/as102/as10x_cmd_stream.c)4
-rw-r--r--drivers/media/usb/as102/as10x_handle.h (renamed from drivers/staging/media/as102/as10x_handle.h)7
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c36
-rw-r--r--drivers/media/usb/au0828/au0828-core.c84
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c110
-rw-r--r--drivers/media/usb/au0828/au0828-i2c.c15
-rw-r--r--drivers/media/usb/au0828/au0828-input.c36
-rw-r--r--drivers/media/usb/au0828/au0828-vbi.c4
-rw-r--r--drivers/media/usb/au0828/au0828-video.c90
-rw-r--r--drivers/media/usb/au0828/au0828.h34
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c14
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c10
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c8
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig7
-rw-r--r--drivers/media/usb/dvb-usb-v2/Makefile3
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c644
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.h12
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c185
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.h3
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h3
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c28
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c460
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c8
-rw-r--r--drivers/media/usb/dvb-usb/Kconfig2
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c2
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c130
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.h4
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c383
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c12
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c14
-rw-r--r--drivers/media/usb/dvb-usb/opera1.c4
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c8
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c10
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c43
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c47
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c37
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c29
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c11
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c29
-rw-r--r--drivers/media/usb/em28xx/em28xx.h19
-rw-r--r--drivers/media/usb/go7007/go7007-usb.c4
-rw-r--r--drivers/media/usb/gspca/gspca.c5
-rw-r--r--drivers/media/usb/gspca/gspca.h2
-rw-r--r--drivers/media/usb/gspca/kinect.c12
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c12
-rw-r--r--drivers/media/usb/hackrf/Kconfig10
-rw-r--r--drivers/media/usb/hackrf/Makefile1
-rw-r--r--drivers/media/usb/hackrf/hackrf.c1142
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-control.c21
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c27
-rw-r--r--drivers/media/usb/msi2500/msi2500.c174
-rw-r--r--drivers/media/usb/pwc/pwc-v4l.c2
-rw-r--r--drivers/media/usb/s2255/s2255drv.c2
-rw-r--r--drivers/media/usb/siano/smsusb.c6
-rw-r--r--drivers/media/usb/ttusb-dec/ttusbdecfe.c3
-rw-r--r--drivers/media/usb/usbtv/Kconfig3
-rw-r--r--drivers/media/usb/usbtv/Makefile3
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c385
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c17
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c18
-rw-r--r--drivers/media/usb/usbtv/usbtv.h21
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c60
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c20
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c1
-rw-r--r--drivers/media/usb/uvc/uvc_video.c10
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h5
-rw-r--r--drivers/media/v4l2-core/tuner-core.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c9
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c30
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c9
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c11
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c6
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c66
-rw-r--r--drivers/mfd/Kconfig15
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/menf21bmc.c132
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/carma/carma-fpga-program.c30
-rw-r--r--drivers/misc/cxl/Kconfig25
-rw-r--r--drivers/misc/cxl/Makefile3
-rw-r--r--drivers/misc/cxl/base.c86
-rw-r--r--drivers/misc/cxl/context.c193
-rw-r--r--drivers/misc/cxl/cxl.h629
-rw-r--r--drivers/misc/cxl/debugfs.c132
-rw-r--r--drivers/misc/cxl/fault.c291
-rw-r--r--drivers/misc/cxl/file.c518
-rw-r--r--drivers/misc/cxl/irq.c402
-rw-r--r--drivers/misc/cxl/main.c230
-rw-r--r--drivers/misc/cxl/native.c683
-rw-r--r--drivers/misc/cxl/pci.c1000
-rw-r--r--drivers/misc/cxl/sysfs.c385
-rw-r--r--drivers/mmc/card/block.c21
-rw-r--r--drivers/mmc/card/queue.c6
-rw-r--r--drivers/mmc/card/sdio_uart.c9
-rw-r--r--drivers/mmc/core/core.c48
-rw-r--r--drivers/mmc/core/host.c96
-rw-r--r--drivers/mmc/core/mmc.c263
-rw-r--r--drivers/mmc/core/mmc_ops.c24
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/sd.c41
-rw-r--r--drivers/mmc/core/sdio.c19
-rw-r--r--drivers/mmc/core/sdio_bus.c4
-rw-r--r--drivers/mmc/core/sdio_irq.c11
-rw-r--r--drivers/mmc/core/slot-gpio.c70
-rw-r--r--drivers/mmc/host/Kconfig23
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/atmel-mci.c55
-rw-r--r--drivers/mmc/host/au1xmmc.c5
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c3
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c10
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c136
-rw-r--r--drivers/mmc/host/dw_mmc.c297
-rw-r--r--drivers/mmc/host/dw_mmc.h5
-rw-r--r--drivers/mmc/host/jz4740_mmc.c269
-rw-r--r--drivers/mmc/host/mmc_spi.c1
-rw-r--r--drivers/mmc/host/mmci.c115
-rw-r--r--drivers/mmc/host/mmci_qcom_dml.c177
-rw-r--r--drivers/mmc/host/mmci_qcom_dml.h31
-rw-r--r--drivers/mmc/host/moxart-mmc.c1
-rw-r--r--drivers/mmc/host/mxcmmc.c1
-rw-r--r--drivers/mmc/host/mxs-mmc.c1
-rw-r--r--drivers/mmc/host/omap.c1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c15
-rw-r--r--drivers/mmc/host/pxamci.c5
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c9
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c9
-rw-r--r--drivers/mmc/host/s3cmci.c4
-rw-r--r--drivers/mmc/host/sdhci-acpi.c111
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c3
-rw-r--r--drivers/mmc/host/sdhci-bcm2835.c1
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c1
-rw-r--r--drivers/mmc/host/sdhci-dove.c1
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c21
-rw-r--r--drivers/mmc/host/sdhci-msm.c27
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c1
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c1
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c1
-rw-r--r--drivers/mmc/host/sdhci-pci.c70
-rw-r--r--drivers/mmc/host/sdhci-pci.h7
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c10
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c1
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c8
-rw-r--r--drivers/mmc/host/sdhci-s3c.c5
-rw-r--r--drivers/mmc/host/sdhci-sirf.c26
-rw-r--r--drivers/mmc/host/sdhci-spear.c1
-rw-r--r--drivers/mmc/host/sdhci-tegra.c1
-rw-r--r--drivers/mmc/host/sdhci.c155
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mmc/host/sh_mmcif.c1
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c53
-rw-r--r--drivers/mmc/host/sunxi-mmc.c4
-rw-r--r--drivers/mmc/host/tifm_sd.c4
-rw-r--r--drivers/mmc/host/tmio_mmc.c7
-rw-r--r--drivers/mmc/host/tmio_mmc.h31
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c8
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c294
-rw-r--r--drivers/mmc/host/wbsd.c21
-rw-r--r--drivers/net/ethernet/apm/xgene/Makefile3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c28
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c44
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h30
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c86
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h24
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c331
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h57
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c10
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c56
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c68
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h31
-rw-r--r--drivers/net/ethernet/intel/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/marvell/Kconfig3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c4
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c4
-rw-r--r--drivers/net/macvlan.c21
-rw-r--r--drivers/net/phy/micrel.c31
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/r8152.c98
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c34
-rw-r--r--drivers/net/wireless/ath/main.c8
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c36
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h2
-rw-r--r--drivers/net/xen-netback/xenbus.c10
-rw-r--r--drivers/net/xen-netfront.c16
-rw-r--r--drivers/of/Kconfig4
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/base.c3
-rw-r--r--drivers/of/resolver.c336
-rw-r--r--drivers/of/selftest.c120
-rw-r--r--drivers/of/testcase-data/testcases.dts35
-rw-r--r--drivers/parisc/power.c1
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/pci/xen-pcifront.c6
-rw-r--r--drivers/platform/x86/dell-wmi.c12
-rw-r--r--drivers/platform/x86/eeepc-laptop.c206
-rw-r--r--drivers/platform/x86/intel-rst.c23
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c44
-rw-r--r--drivers/platform/x86/toshiba_acpi.c606
-rw-r--r--drivers/power/reset/restart-poweroff.c3
-rw-r--r--drivers/s390/block/dasd.c33
-rw-r--r--drivers/s390/block/dasd_devmap.c24
-rw-r--r--drivers/s390/block/dasd_eckd.c372
-rw-r--r--drivers/s390/block/dasd_eckd.h63
-rw-r--r--drivers/s390/block/dasd_int.h10
-rw-r--r--drivers/s390/char/Kconfig13
-rw-r--r--drivers/s390/char/Makefile3
-rw-r--r--drivers/s390/char/diag_ftp.c237
-rw-r--r--drivers/s390/char/diag_ftp.h21
-rw-r--r--drivers/s390/char/hmcdrv_cache.c252
-rw-r--r--drivers/s390/char/hmcdrv_cache.h24
-rw-r--r--drivers/s390/char/hmcdrv_dev.c370
-rw-r--r--drivers/s390/char/hmcdrv_dev.h14
-rw-r--r--drivers/s390/char/hmcdrv_ftp.c343
-rw-r--r--drivers/s390/char/hmcdrv_ftp.h63
-rw-r--r--drivers/s390/char/hmcdrv_mod.c64
-rw-r--r--drivers/s390/char/sclp.h2
-rw-r--r--drivers/s390/char/sclp_diag.h89
-rw-r--r--drivers/s390/char/sclp_early.c2
-rw-r--r--drivers/s390/char/sclp_ftp.c275
-rw-r--r--drivers/s390/char/sclp_ftp.h21
-rw-r--r--drivers/s390/char/sclp_rw.c13
-rw-r--r--drivers/s390/char/sclp_vt220.c4
-rw-r--r--drivers/s390/char/tape_char.c4
-rw-r--r--drivers/s390/char/vmlogrdr.c1
-rw-r--r--drivers/s390/char/zcore.c18
-rw-r--r--drivers/s390/cio/airq.c2
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c30
-rw-r--r--drivers/s390/crypto/ap_bus.h7
-rw-r--r--drivers/s390/crypto/zcrypt_api.c7
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/fcoe/fcoe.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1
-rw-r--r--drivers/scsi/xen-scsifront.c1026
-rw-r--r--drivers/staging/android/ashmem.c2
-rw-r--r--drivers/staging/android/ion/compat_ion.c2
-rw-r--r--drivers/staging/android/logger.c103
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c3
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c1
-rw-r--r--drivers/staging/lustre/lustre/libcfs/fail.c1
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/as102/as102_fe.c571
-rw-r--r--drivers/staging/media/davinci_vpfe/Kconfig1
-rw-r--r--drivers/staging/media/dt3155v4l/Kconfig1
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c1
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c1
-rw-r--r--drivers/staging/media/omap4iss/Kconfig1
-rw-r--r--drivers/staging/vme/devices/vme_user.c28
-rw-r--r--drivers/tty/bfin_jtag_comm.c1
-rw-r--r--drivers/tty/hvc/hvc_vio.c2
-rw-r--r--drivers/tty/hvc/hvc_xen.c9
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/usb/gadget/function/f_fs.c24
-rw-r--r--drivers/usb/gadget/legacy/inode.c26
-rw-r--r--drivers/vfio/pci/vfio_pci.c136
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c15
-rw-r--r--drivers/vfio/vfio_iommu_type1.c30
-rw-r--r--drivers/vfio/vfio_spapr_eeh.c2
-rw-r--r--drivers/video/backlight/88pm860x_bl.c1
-rw-r--r--drivers/video/backlight/aat2870_bl.c1
-rw-r--r--drivers/video/backlight/adp5520_bl.c2
-rw-r--r--drivers/video/backlight/adp8860_bl.c3
-rw-r--r--drivers/video/backlight/adp8870_bl.c4
-rw-r--r--drivers/video/backlight/ams369fg06.c6
-rw-r--r--drivers/video/backlight/as3711_bl.c1
-rw-r--r--drivers/video/backlight/corgi_lcd.c1
-rw-r--r--drivers/video/backlight/cr_bllcd.c1
-rw-r--r--drivers/video/backlight/da903x_bl.c1
-rw-r--r--drivers/video/backlight/da9052_bl.c1
-rw-r--r--drivers/video/backlight/ep93xx_bl.c1
-rw-r--r--drivers/video/backlight/generic_bl.c18
-rw-r--r--drivers/video/backlight/gpio_backlight.c1
-rw-r--r--drivers/video/backlight/ili922x.c11
-rw-r--r--drivers/video/backlight/jornada720_bl.c6
-rw-r--r--drivers/video/backlight/jornada720_lcd.c6
-rw-r--r--drivers/video/backlight/ld9040.c6
-rw-r--r--drivers/video/backlight/lm3533_bl.c1
-rw-r--r--drivers/video/backlight/lm3639_bl.c2
-rw-r--r--drivers/video/backlight/lms501kf03.c12
-rw-r--r--drivers/video/backlight/lp855x_bl.c2
-rw-r--r--drivers/video/backlight/lp8788_bl.c1
-rw-r--r--drivers/video/backlight/max8925_bl.c1
-rw-r--r--drivers/video/backlight/omap1_bl.c1
-rw-r--r--drivers/video/backlight/ot200_bl.c1
-rw-r--r--drivers/video/backlight/pandora_bl.c1
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c1
-rw-r--r--drivers/video/backlight/platform_lcd.c1
-rw-r--r--drivers/video/backlight/pwm_bl.c1
-rw-r--r--drivers/video/backlight/s6e63m0.c12
-rw-r--r--drivers/video/backlight/tdo24m.c2
-rw-r--r--drivers/video/backlight/tps65217_bl.c1
-rw-r--r--drivers/video/backlight/wm831x_bl.c2
-rw-r--r--drivers/video/fbdev/xen-fbfront.c5
-rw-r--r--drivers/watchdog/Kconfig10
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/alim7101_wdt.c42
-rw-r--r--drivers/watchdog/menf21bmc_wdt.c203
-rw-r--r--drivers/watchdog/moxart_wdt.c32
-rw-r--r--drivers/watchdog/sunxi_wdt.c31
-rw-r--r--drivers/xen/Kconfig9
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/efi.c2
-rw-r--r--drivers/xen/events/events_base.c5
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/xen-pciback/xenbus.c6
-rw-r--r--drivers/xen/xen-scsiback.c2126
-rw-r--r--drivers/xen/xenbus/xenbus_client.c9
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c6
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c8
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c8
-rw-r--r--fs/9p/fid.c4
-rw-r--r--fs/9p/vfs_addr.c4
-rw-r--r--fs/9p/vfs_dentry.c8
-rw-r--r--fs/9p/vfs_dir.c4
-rw-r--r--fs/9p/vfs_file.c8
-rw-r--r--fs/9p/vfs_inode.c32
-rw-r--r--fs/9p/vfs_inode_dotl.c8
-rw-r--r--fs/afs/dir.c5
-rw-r--r--fs/afs/vlocation.c1
-rw-r--r--fs/autofs4/expire.c6
-rw-r--r--fs/binfmt_aout.c25
-rw-r--r--fs/binfmt_elf.c25
-rw-r--r--fs/binfmt_elf_fdpic.c24
-rw-r--r--fs/btrfs/async-thread.c11
-rw-r--r--fs/btrfs/async-thread.h1
-rw-r--r--fs/btrfs/backref.c123
-rw-r--r--fs/btrfs/backref.h3
-rw-r--r--fs/btrfs/btrfs_inode.h27
-rw-r--r--fs/btrfs/check-integrity.c18
-rw-r--r--fs/btrfs/compression.c21
-rw-r--r--fs/btrfs/ctree.c106
-rw-r--r--fs/btrfs/ctree.h93
-rw-r--r--fs/btrfs/delayed-inode.c8
-rw-r--r--fs/btrfs/dev-replace.c82
-rw-r--r--fs/btrfs/dir-item.c12
-rw-r--r--fs/btrfs/disk-io.c284
-rw-r--r--fs/btrfs/disk-io.h16
-rw-r--r--fs/btrfs/export.c4
-rw-r--r--fs/btrfs/extent-tree.c265
-rw-r--r--fs/btrfs/extent_io.c483
-rw-r--r--fs/btrfs/extent_io.h60
-rw-r--r--fs/btrfs/file-item.c30
-rw-r--r--fs/btrfs/file.c151
-rw-r--r--fs/btrfs/free-space-cache.c157
-rw-r--r--fs/btrfs/hash.c4
-rw-r--r--fs/btrfs/inode-item.c12
-rw-r--r--fs/btrfs/inode-map.c68
-rw-r--r--fs/btrfs/inode.c648
-rw-r--r--fs/btrfs/ioctl.c65
-rw-r--r--fs/btrfs/lzo.c3
-rw-r--r--fs/btrfs/orphan.c4
-rw-r--r--fs/btrfs/print-tree.c3
-rw-r--r--fs/btrfs/qgroup.c30
-rw-r--r--fs/btrfs/raid56.c8
-rw-r--r--fs/btrfs/reada.c2
-rw-r--r--fs/btrfs/relocation.c142
-rw-r--r--fs/btrfs/scrub.c67
-rw-r--r--fs/btrfs/send.c47
-rw-r--r--fs/btrfs/super.c137
-rw-r--r--fs/btrfs/sysfs.c41
-rw-r--r--fs/btrfs/sysfs.h16
-rw-r--r--fs/btrfs/tests/free-space-tests.c516
-rw-r--r--fs/btrfs/transaction.c52
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-log.c259
-rw-r--r--fs/btrfs/tree-log.h2
-rw-r--r--fs/btrfs/uuid-tree.c1
-rw-r--r--fs/btrfs/volumes.c676
-rw-r--r--fs/btrfs/volumes.h166
-rw-r--r--fs/btrfs/xattr.c4
-rw-r--r--fs/btrfs/zlib.c141
-rw-r--r--fs/buffer.c5
-rw-r--r--fs/cachefiles/rdwr.c48
-rw-r--r--fs/ceph/dir.c1
-rw-r--r--fs/cifs/cifs_spnego.c1
-rw-r--r--fs/cifs/cifsacl.c1
-rw-r--r--fs/cifs/cifsfs.c7
-rw-r--r--fs/cifs/dir.c14
-rw-r--r--fs/cifs/file.c12
-rw-r--r--fs/cifs/inode.c12
-rw-r--r--fs/cifs/readdir.c6
-rw-r--r--fs/compat.c28
-rw-r--r--fs/dcache.c259
-rw-r--r--fs/dlm/plock.c8
-rw-r--r--fs/ecryptfs/file.c6
-rw-r--r--fs/ecryptfs/inode.c25
-rw-r--r--fs/ecryptfs/keystore.c2
-rw-r--r--fs/ecryptfs/messaging.c3
-rw-r--r--fs/exec.c17
-rw-r--r--fs/ext3/ext3.h12
-rw-r--r--fs/ext3/super.c14
-rw-r--r--fs/fcntl.c21
-rw-r--r--fs/file.c3
-rw-r--r--fs/file_table.c12
-rw-r--r--fs/fuse/dir.c7
-rw-r--r--fs/gfs2/dentry.c3
-rw-r--r--fs/gfs2/dir.c9
-rw-r--r--fs/gfs2/dir.h1
-rw-r--r--fs/gfs2/file.c22
-rw-r--r--fs/gfs2/glock.c4
-rw-r--r--fs/gfs2/glops.c2
-rw-r--r--fs/gfs2/inode.c12
-rw-r--r--fs/gfs2/rgrp.c30
-rw-r--r--fs/gfs2/rgrp.h1
-rw-r--r--fs/gfs2/trans.c2
-rw-r--r--fs/internal.h2
-rw-r--r--fs/jffs2/jffs2_fs_sb.h2
-rw-r--r--fs/jffs2/wbuf.c17
-rw-r--r--fs/jfs/jfs_logmgr.c2
-rw-r--r--fs/jfs/jfs_txnmgr.c3
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/kernfs/dir.c11
-rw-r--r--fs/libfs.c18
-rw-r--r--fs/lockd/svclock.c68
-rw-r--r--fs/locks.c444
-rw-r--r--fs/mount.h25
-rw-r--r--fs/namei.c29
-rw-r--r--fs/namespace.c203
-rw-r--r--fs/ncpfs/dir.c6
-rw-r--r--fs/ncpfs/ncplib_kernel.h14
-rw-r--r--fs/nfs/blocklayout/rpc_pipefs.c1
-rw-r--r--fs/nfs/dir.c7
-rw-r--r--fs/nfs/file.c13
-rw-r--r--fs/nfs/idmap.c2
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/nfs4file.c2
-rw-r--r--fs/nfsd/nfs4recover.c1
-rw-r--r--fs/nfsd/nfs4state.c104
-rw-r--r--fs/nfsd/state.h1
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/notify/dnotify/dnotify.c8
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/ocfs2/quota.h5
-rw-r--r--fs/ocfs2/quota_global.c4
-rw-r--r--fs/ocfs2/quota_local.c33
-rw-r--r--fs/ocfs2/super.c30
-rw-r--r--fs/proc/base.c10
-rw-r--r--fs/proc/fd.c2
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/reiserfs/reiserfs.h5
-rw-r--r--fs/reiserfs/super.c16
-rw-r--r--fs/reiserfs/xattr.h1
-rw-r--r--fs/super.c2
-rw-r--r--fs/udf/file.c9
-rw-r--r--fs/udf/inode.c14
-rw-r--r--fs/udf/super.c10
-rw-r--r--fs/udf/udfdecl.h13
-rw-r--r--fs/udf/udftime.c2
-rw-r--r--fs/xattr.c116
-rw-r--r--fs/xfs/kmem.c1
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c4
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c365
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h7
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c3
-rw-r--r--fs/xfs/libxfs/xfs_da_format.c1
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c67
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h2
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c7
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c49
-rw-r--r--fs/xfs/libxfs/xfs_sb.c7
-rw-r--r--fs/xfs/time.h36
-rw-r--r--fs/xfs/xfs_aops.c23
-rw-r--r--fs/xfs/xfs_bmap_util.c126
-rw-r--r--fs/xfs/xfs_buf.c355
-rw-r--r--fs/xfs/xfs_buf.h15
-rw-r--r--fs/xfs/xfs_buf_item.c10
-rw-r--r--fs/xfs/xfs_file.c178
-rw-r--r--fs/xfs/xfs_fsops.c11
-rw-r--r--fs/xfs/xfs_globals.c4
-rw-r--r--fs/xfs/xfs_icache.c1
-rw-r--r--fs/xfs/xfs_inode.c34
-rw-r--r--fs/xfs/xfs_inode.h2
-rw-r--r--fs/xfs/xfs_inode_item.c2
-rw-r--r--fs/xfs/xfs_ioctl.c28
-rw-r--r--fs/xfs/xfs_ioctl32.c2
-rw-r--r--fs/xfs/xfs_ioctl32.h3
-rw-r--r--fs/xfs/xfs_iomap.c4
-rw-r--r--fs/xfs/xfs_iops.c30
-rw-r--r--fs/xfs/xfs_itable.c3
-rw-r--r--fs/xfs/xfs_linux.h6
-rw-r--r--fs/xfs/xfs_log.c59
-rw-r--r--fs/xfs/xfs_log_cil.c47
-rw-r--r--fs/xfs/xfs_log_recover.c689
-rw-r--r--fs/xfs/xfs_mount.c58
-rw-r--r--fs/xfs/xfs_mru_cache.c3
-rw-r--r--fs/xfs/xfs_qm.c1
-rw-r--r--fs/xfs/xfs_rtalloc.c85
-rw-r--r--fs/xfs/xfs_rtalloc.h4
-rw-r--r--fs/xfs/xfs_super.c39
-rw-r--r--fs/xfs/xfs_symlink.c8
-rw-r--r--fs/xfs/xfs_sysctl.h5
-rw-r--r--fs/xfs/xfs_sysfs.c74
-rw-r--r--fs/xfs/xfs_sysfs.h1
-rw-r--r--fs/xfs/xfs_trace.h3
-rw-r--r--fs/xfs/xfs_trans_buf.c16
-rw-r--r--fs/xfs/xfs_trans_inode.c2
-rw-r--r--include/asm-generic/atomic.h194
-rw-r--r--include/asm-generic/atomic64.h20
-rw-r--r--include/asm-generic/cputime_jiffies.h2
-rw-r--r--include/asm-generic/cputime_nsecs.h2
-rw-r--r--include/asm-generic/dma-mapping-common.h8
-rw-r--r--include/crypto/public_key.h6
-rw-r--r--include/dt-bindings/sound/cs35l32.h26
-rw-r--r--include/keys/asymmetric-type.h41
-rw-r--r--include/keys/user-type.h1
-rw-r--r--include/linux/atmel-mci.h2
-rw-r--r--include/linux/atomic.h36
-rw-r--r--include/linux/bitops.h20
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/dcache.h9
-rw-r--r--include/linux/dma-mapping.h26
-rw-r--r--include/linux/fs.h64
-rw-r--r--include/linux/fsl_ifc.h6
-rw-r--r--include/linux/ftrace.h10
-rw-r--r--include/linux/ima.h4
-rw-r--r--include/linux/init_task.h12
-rw-r--r--include/linux/iommu.h1
-rw-r--r--include/linux/jump_label.h17
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/key-type.h34
-rw-r--r--include/linux/lockd/lockd.h1
-rw-r--r--include/linux/lockdep.h3
-rw-r--r--include/linux/mfd/tmio.h25
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/mmc/card.h10
-rw-r--r--include/linux/mmc/dw_mmc.h4
-rw-r--r--include/linux/mmc/host.h12
-rw-r--r--include/linux/mmc/mmc.h7
-rw-r--r--include/linux/mmc/sdhci.h3
-rw-r--r--include/linux/mmc/slot-gpio.h5
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/of.h3
-rw-r--r--include/linux/pci_ids.h17
-rw-r--r--include/linux/perf_event.h14
-rw-r--r--include/linux/rcupdate.h106
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/reboot.h3
-rw-r--r--include/linux/rwsem.h2
-rw-r--r--include/linux/sched.h45
-rw-r--r--include/linux/seccomp.h25
-rw-r--r--include/linux/security.h10
-rw-r--r--include/linux/seqlock.h19
-rw-r--r--include/linux/smp.h2
-rw-r--r--include/linux/spinlock.h8
-rw-r--r--include/linux/tick.h2
-rw-r--r--include/linux/torture.h5
-rw-r--r--include/linux/tracepoint.h11
-rw-r--r--include/linux/uio.h3
-rw-r--r--include/linux/wait.h16
-rw-r--r--include/media/davinci/dm644x_ccdc.h2
-rw-r--r--include/media/omap3isp.h3
-rw-r--r--include/media/rc-map.h1
-rw-r--r--include/media/videobuf2-core.h15
-rw-r--r--include/misc/cxl.h48
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h157
-rw-r--r--include/sound/pcm.h52
-rw-r--r--include/sound/rt5645.h3
-rw-r--r--include/sound/rt5677.h13
-rw-r--r--include/sound/soc-dapm.h5
-rw-r--r--include/sound/soc.h101
-rw-r--r--include/sound/vx_core.h7
-rw-r--r--include/trace/events/asoc.h6
-rw-r--r--include/trace/events/btrfs.h85
-rw-r--r--include/trace/events/filelock.h14
-rw-r--r--include/trace/events/rcu.h3
-rw-r--r--include/uapi/Kbuild1
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h2
-rw-r--r--include/uapi/linux/smiapp.h29
-rw-r--r--include/uapi/linux/v4l2-controls.h6
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h9
-rw-r--r--include/uapi/linux/vfio.h3
-rw-r--r--include/uapi/linux/videodev2.h13
-rw-r--r--include/uapi/misc/Kbuild2
-rw-r--r--include/uapi/misc/cxl.h88
-rw-r--r--include/uapi/sound/asound.h3
-rw-r--r--include/xen/events.h2
-rw-r--r--include/xen/interface/elfnote.h48
-rw-r--r--include/xen/interface/io/vscsiif.h229
-rw-r--r--include/xen/interface/xen.h272
-rw-r--r--include/xen/xenbus.h21
-rw-r--r--init/Kconfig14
-rw-r--r--init/main.c2
-rw-r--r--kernel/cpu.c16
-rw-r--r--kernel/events/callchain.c2
-rw-r--r--kernel/events/core.c212
-rw-r--r--kernel/exit.c50
-rw-r--r--kernel/fork.c13
-rw-r--r--kernel/locking/locktorture.c529
-rw-r--r--kernel/locking/mcs_spinlock.h3
-rw-r--r--kernel/locking/mutex.c416
-rw-r--r--kernel/locking/mutex.h2
-rw-r--r--kernel/locking/rtmutex.c2
-rw-r--r--kernel/locking/rwsem-xadd.c27
-rw-r--r--kernel/locking/semaphore.c12
-rw-r--r--kernel/printk/printk.c20
-rw-r--r--kernel/rcu/rcutorture.c278
-rw-r--r--kernel/rcu/tiny.c20
-rw-r--r--kernel/rcu/tree.c115
-rw-r--r--kernel/rcu/tree.h18
-rw-r--r--kernel/rcu/tree_plugin.h404
-rw-r--r--kernel/rcu/update.c345
-rw-r--r--kernel/reboot.c81
-rw-r--r--kernel/sched/auto_group.c5
-rw-r--r--kernel/sched/core.c311
-rw-r--r--kernel/sched/cpudeadline.c4
-rw-r--r--kernel/sched/cputime.c64
-rw-r--r--kernel/sched/deadline.c33
-rw-r--r--kernel/sched/debug.c13
-rw-r--r--kernel/sched/fair.c479
-rw-r--r--kernel/sched/idle.c6
-rw-r--r--kernel/sched/rt.c21
-rw-r--r--kernel/sched/sched.h80
-rw-r--r--kernel/sched/stop_task.c2
-rw-r--r--kernel/seccomp.c252
-rw-r--r--kernel/smp.c22
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/sysctl.c9
-rw-r--r--kernel/time/hrtimer.c1
-rw-r--r--kernel/time/posix-cpu-timers.c14
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/torture.c32
-rw-r--r--kernel/trace/ftrace.c416
-rw-r--r--kernel/trace/ring_buffer_benchmark.c3
-rw-r--r--kernel/trace/trace_events.c5
-rw-r--r--kernel/trace/trace_selftest.c51
-rw-r--r--kernel/trace/trace_stack.c4
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--kernel/watchdog.c10
-rw-r--r--kernel/workqueue.c5
-rw-r--r--lib/Kconfig.debug16
-rw-r--r--lib/atomic64.c83
-rw-r--r--lib/hexdump.c16
-rw-r--r--mm/Makefile3
-rw-r--r--mm/iov_iter.c240
-rw-r--r--mm/mlock.c2
-rw-r--r--mm/page_alloc.c2
-rw-r--r--net/Kconfig1
-rw-r--r--net/ceph/crypto.c1
-rw-r--r--net/core/filter.c9
-rw-r--r--net/core/flow_dissector.c36
-rw-r--r--net/core/skbuff.c35
-rw-r--r--net/dns_resolver/dns_key.c18
-rw-r--r--net/netfilter/nft_reject.c10
-rw-r--r--net/netlabel/netlabel_kapi.c1
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/rxrpc/ar-key.c2
-rw-r--r--net/sched/sch_generic.c20
-rw-r--r--net/socket.c3
-rw-r--r--scripts/recordmcount.c4
-rwxr-xr-xscripts/recordmcount.pl7
-rw-r--r--security/capability.c4
-rw-r--r--security/integrity/Kconfig46
-rw-r--r--security/integrity/Makefile6
-rw-r--r--security/integrity/digsig_asymmetric.c7
-rw-r--r--security/integrity/evm/Kconfig8
-rw-r--r--security/integrity/evm/evm_main.c17
-rw-r--r--security/integrity/ima/Kconfig2
-rw-r--r--security/integrity/ima/ima.h24
-rw-r--r--security/integrity/ima/ima_api.c10
-rw-r--r--security/integrity/ima/ima_appraise.c17
-rw-r--r--security/integrity/ima/ima_crypto.c20
-rw-r--r--security/integrity/ima/ima_init.c25
-rw-r--r--security/integrity/ima/ima_main.c123
-rw-r--r--security/integrity/ima/ima_policy.c23
-rw-r--r--security/integrity/ima/ima_template.c30
-rw-r--r--security/integrity/integrity.h2
-rw-r--r--security/keys/big_key.c2
-rw-r--r--security/keys/encrypted-keys/encrypted.c1
-rw-r--r--security/keys/internal.h21
-rw-r--r--security/keys/key.c2
-rw-r--r--security/keys/keyctl.c2
-rw-r--r--security/keys/keyring.c58
-rw-r--r--security/keys/proc.c8
-rw-r--r--security/keys/process_keys.c13
-rw-r--r--security/keys/request_key.c21
-rw-r--r--security/keys/request_key_auth.c10
-rw-r--r--security/keys/trusted.c1
-rw-r--r--security/keys/user_defined.c14
-rw-r--r--security/security.c4
-rw-r--r--security/selinux/hooks.c139
-rw-r--r--security/selinux/include/netif.h4
-rw-r--r--security/selinux/include/objsec.h2
-rw-r--r--security/selinux/netif.c43
-rw-r--r--security/selinux/ss/services.c14
-rw-r--r--security/smack/Kconfig16
-rw-r--r--security/smack/smack.h39
-rw-r--r--security/smack/smack_access.c118
-rw-r--r--security/smack/smack_lsm.c548
-rw-r--r--security/smack/smackfs.c76
-rw-r--r--sound/core/misc.c2
-rw-r--r--sound/core/pcm.c2
-rw-r--r--sound/core/pcm_lib.c10
-rw-r--r--sound/core/pcm_misc.c4
-rw-r--r--sound/core/pcm_native.c136
-rw-r--r--sound/drivers/vx/vx_core.c49
-rw-r--r--sound/drivers/vx/vx_mixer.c12
-rw-r--r--sound/drivers/vx/vx_pcm.c68
-rw-r--r--sound/drivers/vx/vx_uer.c23
-rw-r--r--sound/pci/au88x0/au88x0.c22
-rw-r--r--sound/pci/au88x0/au88x0_a3d.c10
-rw-r--r--sound/pci/au88x0/au88x0_core.c76
-rw-r--r--sound/pci/au88x0/au88x0_eq.c2
-rw-r--r--sound/pci/au88x0/au88x0_game.c2
-rw-r--r--sound/pci/au88x0/au88x0_mpu401.c2
-rw-r--r--sound/pci/au88x0/au88x0_pcm.c8
-rw-r--r--sound/pci/au88x0/au88x0_synth.c26
-rw-r--r--sound/pci/ctxfi/ctamixer.c12
-rw-r--r--sound/pci/ctxfi/ctamixer.h7
-rw-r--r--sound/pci/ctxfi/ctatc.c52
-rw-r--r--sound/pci/ctxfi/ctatc.h2
-rw-r--r--sound/pci/ctxfi/ctdaio.c42
-rw-r--r--sound/pci/ctxfi/ctdaio.h8
-rw-r--r--sound/pci/ctxfi/cthardware.h4
-rw-r--r--sound/pci/ctxfi/cthw20k1.c22
-rw-r--r--sound/pci/ctxfi/cthw20k2.c39
-rw-r--r--sound/pci/ctxfi/ctmixer.c8
-rw-r--r--sound/pci/ctxfi/ctpcm.c9
-rw-r--r--sound/pci/ctxfi/ctresource.c55
-rw-r--r--sound/pci/ctxfi/ctresource.h9
-rw-r--r--sound/pci/ctxfi/ctsrc.c18
-rw-r--r--sound/pci/ctxfi/ctsrc.h7
-rw-r--r--sound/pci/ctxfi/ctvmem.c14
-rw-r--r--sound/pci/ctxfi/xfi.c17
-rw-r--r--sound/pci/hda/hda_auto_parser.c21
-rw-r--r--sound/pci/hda/hda_auto_parser.h1
-rw-r--r--sound/pci/hda/hda_codec.c135
-rw-r--r--sound/pci/hda/hda_codec.h2
-rw-r--r--sound/pci/hda/hda_generic.c49
-rw-r--r--sound/pci/hda/hda_generic.h19
-rw-r--r--sound/pci/hda/hda_jack.c83
-rw-r--r--sound/pci/hda/hda_jack.h44
-rw-r--r--sound/pci/hda/hda_local.h14
-rw-r--r--sound/pci/hda/hda_sysfs.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c74
-rw-r--r--sound/pci/hda/patch_cirrus.c5
-rw-r--r--sound/pci/hda/patch_conexant.c12
-rw-r--r--sound/pci/hda/patch_hdmi.c18
-rw-r--r--sound/pci/hda/patch_realtek.c1356
-rw-r--r--sound/pci/hda/patch_sigmatel.c91
-rw-r--r--sound/pci/hda/patch_via.c30
-rw-r--r--sound/pci/ice1712/ice1712.c2
-rw-r--r--sound/pci/lx6464es/lx6464es.c43
-rw-r--r--sound/pci/lx6464es/lx6464es.h9
-rw-r--r--sound/pci/lx6464es/lx_core.c252
-rw-r--r--sound/pci/lx6464es/lx_core.h4
-rw-r--r--sound/pci/mixart/mixart.c17
-rw-r--r--sound/pci/mixart/mixart.h10
-rw-r--r--sound/pci/mixart/mixart_core.c79
-rw-r--r--sound/pci/mixart/mixart_core.h2
-rw-r--r--sound/pci/oxygen/oxygen_pcm.c10
-rw-r--r--sound/pci/oxygen/virtuoso.c1
-rw-r--r--sound/pci/oxygen/xonar_pcm179x.c166
-rw-r--r--sound/pci/pcxhr/pcxhr.c43
-rw-r--r--sound/pci/pcxhr/pcxhr.h8
-rw-r--r--sound/pci/pcxhr/pcxhr_core.c102
-rw-r--r--sound/pci/pcxhr/pcxhr_core.h2
-rw-r--r--sound/pci/vx222/vx222.c5
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.c13
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.h5
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf_core.c8
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf_irq.c23
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c5
-rw-r--r--sound/pcmcia/vx/vxp_ops.c10
-rw-r--r--sound/pcmcia/vx/vxpocket.c13
-rw-r--r--sound/soc/codecs/88pm860x-codec.c5
-rw-r--r--sound/soc/codecs/Kconfig36
-rw-r--r--sound/soc/codecs/Makefile10
-rw-r--r--sound/soc/codecs/ab8500-codec.c73
-rw-r--r--sound/soc/codecs/ac97.c15
-rw-r--r--sound/soc/codecs/adau1373.c21
-rw-r--r--sound/soc/codecs/adau1761.c2
-rw-r--r--sound/soc/codecs/adau1781.c2
-rw-r--r--sound/soc/codecs/adau17x1.c8
-rw-r--r--sound/soc/codecs/adau17x1.h1
-rw-r--r--sound/soc/codecs/adav80x.c23
-rw-r--r--sound/soc/codecs/cs35l32.c631
-rw-r--r--sound/soc/codecs/cs35l32.h93
-rw-r--r--sound/soc/codecs/cs4265.c1
-rw-r--r--sound/soc/codecs/cs42l52.c24
-rw-r--r--sound/soc/codecs/cs42l56.c27
-rw-r--r--sound/soc/codecs/cs42l73.c25
-rw-r--r--sound/soc/codecs/da732x.c27
-rw-r--r--sound/soc/codecs/es8328-i2c.c60
-rw-r--r--sound/soc/codecs/es8328-spi.c49
-rw-r--r--sound/soc/codecs/es8328.c756
-rw-r--r--sound/soc/codecs/es8328.h314
-rw-r--r--sound/soc/codecs/jz4740.c30
-rw-r--r--sound/soc/codecs/lm49453.c14
-rw-r--r--sound/soc/codecs/max98090.c146
-rw-r--r--sound/soc/codecs/max98090.h13
-rw-r--r--sound/soc/codecs/mc13783.c16
-rw-r--r--sound/soc/codecs/ml26124.c24
-rw-r--r--sound/soc/codecs/rt286.c2
-rw-r--r--sound/soc/codecs/rt5640.c49
-rw-r--r--sound/soc/codecs/rt5640.h3
-rw-r--r--sound/soc/codecs/rt5645.c99
-rw-r--r--sound/soc/codecs/rt5645.h5
-rw-r--r--sound/soc/codecs/rt5677.c327
-rw-r--r--sound/soc/codecs/rt5677.h171
-rw-r--r--sound/soc/codecs/sgtl5000.c42
-rw-r--r--sound/soc/codecs/ssm2518.c13
-rw-r--r--sound/soc/codecs/ssm2602-i2c.c9
-rw-r--r--sound/soc/codecs/ssm2602-spi.c7
-rw-r--r--sound/soc/codecs/ssm2602.c39
-rw-r--r--sound/soc/codecs/ssm4567.c343
-rw-r--r--sound/soc/codecs/tas2552.c70
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c107
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h3
-rw-r--r--sound/soc/codecs/tlv320aic3x.c29
-rw-r--r--sound/soc/codecs/wm8350.c2
-rw-r--r--sound/soc/codecs/wm8741.c1
-rw-r--r--sound/soc/codecs/wm8753.c2
-rw-r--r--sound/soc/codecs/wm8804.c19
-rw-r--r--sound/soc/codecs/wm8971.c2
-rw-r--r--sound/soc/codecs/wm8994.c18
-rw-r--r--sound/soc/codecs/wm8995.c19
-rw-r--r--sound/soc/davinci/Kconfig3
-rw-r--r--sound/soc/davinci/davinci-mcasp.c79
-rw-r--r--sound/soc/davinci/edma-pcm.c2
-rw-r--r--sound/soc/fsl/Kconfig26
-rw-r--r--sound/soc/fsl/Makefile4
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c574
-rw-r--r--sound/soc/fsl/fsl_asrc.c6
-rw-r--r--sound/soc/fsl/fsl_esai.c19
-rw-r--r--sound/soc/fsl/fsl_esai.h8
-rw-r--r--sound/soc/fsl/fsl_sai.c58
-rw-r--r--sound/soc/fsl/fsl_sai.h8
-rw-r--r--sound/soc/fsl/fsl_spdif.c6
-rw-r--r--sound/soc/fsl/fsl_ssi.c89
-rw-r--r--sound/soc/fsl/imx-es8328.c233
-rw-r--r--sound/soc/generic/simple-card.c229
-rw-r--r--sound/soc/intel/Makefile3
-rw-r--r--sound/soc/intel/byt-max98090.c1
-rw-r--r--sound/soc/intel/byt-rt5640.c83
-rw-r--r--sound/soc/intel/sst-atom-controls.c218
-rw-r--r--sound/soc/intel/sst-atom-controls.h416
-rw-r--r--sound/soc/intel/sst-haswell-pcm.c56
-rw-r--r--sound/soc/intel/sst-mfld-platform-compress.c38
-rw-r--r--sound/soc/intel/sst-mfld-platform-pcm.c106
-rw-r--r--sound/soc/intel/sst-mfld-platform.h58
-rw-r--r--sound/soc/omap/rx51.c2
-rw-r--r--sound/soc/rockchip/Kconfig3
-rw-r--r--sound/soc/rockchip/Makefile2
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c32
-rw-r--r--sound/soc/samsung/idma.c4
-rw-r--r--sound/soc/samsung/odroidx2_max98090.c4
-rw-r--r--sound/soc/samsung/speyside.c6
-rw-r--r--sound/soc/sh/fsi.c7
-rw-r--r--sound/soc/sh/rcar/core.c6
-rw-r--r--sound/soc/sh/siu_pcm.c4
-rw-r--r--sound/soc/sirf/sirf-usp.c24
-rw-r--r--sound/soc/soc-core.c673
-rw-r--r--sound/soc/soc-dapm.c51
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c4
-rw-r--r--sound/soc/soc-io.c28
-rw-r--r--sound/soc/soc-pcm.c2
-rw-r--r--sound/soc/tegra/tegra_max98090.c40
-rw-r--r--sound/soc/txx9/txx9aclc.c14
-rw-r--r--sound/usb/caiaq/audio.c5
-rw-r--r--sound/usb/midi.c11
-rw-r--r--sound/usb/quirks.c16
-rw-r--r--tools/lib/api/Makefile7
-rw-r--r--tools/lib/api/fd/array.c127
-rw-r--r--tools/lib/api/fd/array.h46
-rw-r--r--tools/perf/.gitignore1
-rw-r--r--tools/perf/Documentation/perf-probe.txt3
-rw-r--r--tools/perf/Documentation/perf-report.txt5
-rw-r--r--tools/perf/Documentation/perf-top.txt9
-rw-r--r--tools/perf/Makefile.perf11
-rw-r--r--tools/perf/arch/arm/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/arm/util/unwind-libunwind.c1
-rw-r--r--tools/perf/arch/arm64/include/perf_regs.h2
-rw-r--r--tools/perf/arch/arm64/util/unwind-libunwind.c1
-rw-r--r--tools/perf/arch/common.c9
-rw-r--r--tools/perf/arch/powerpc/Makefile2
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c1
-rw-r--r--tools/perf/bench/futex-hash.c7
-rw-r--r--tools/perf/bench/futex-requeue.c28
-rw-r--r--tools/perf/bench/futex-wake.c15
-rw-r--r--tools/perf/bench/sched-messaging.c2
-rw-r--r--tools/perf/builtin-annotate.c77
-rw-r--r--tools/perf/builtin-buildid-cache.c44
-rw-r--r--tools/perf/builtin-diff.c6
-rw-r--r--tools/perf/builtin-evlist.c2
-rw-r--r--tools/perf/builtin-help.c20
-rw-r--r--tools/perf/builtin-inject.c33
-rw-r--r--tools/perf/builtin-kmem.c56
-rw-r--r--tools/perf/builtin-kvm.c98
-rw-r--r--tools/perf/builtin-lock.c7
-rw-r--r--tools/perf/builtin-mem.c6
-rw-r--r--tools/perf/builtin-probe.c10
-rw-r--r--tools/perf/builtin-record.c146
-rw-r--r--tools/perf/builtin-report.c23
-rw-r--r--tools/perf/builtin-sched.c9
-rw-r--r--tools/perf/builtin-script.c74
-rw-r--r--tools/perf/builtin-stat.c11
-rw-r--r--tools/perf/builtin-timechart.c8
-rw-r--r--tools/perf/builtin-top.c87
-rw-r--r--tools/perf/builtin-trace.c57
-rw-r--r--tools/perf/config/Makefile52
-rw-r--r--tools/perf/config/feature-checks/Makefile18
-rw-r--r--tools/perf/config/utilities.mak2
-rw-r--r--tools/perf/perf-with-kcore.sh259
-rw-r--r--tools/perf/perf.c10
-rw-r--r--tools/perf/perf.h3
-rw-r--r--tools/perf/tests/builtin-test.c18
-rw-r--r--tools/perf/tests/fdarray.c174
-rw-r--r--tools/perf/tests/mmap-basic.c7
-rw-r--r--tools/perf/tests/open-syscall-all-cpus.c5
-rw-r--r--tools/perf/tests/open-syscall-tp-fields.c9
-rw-r--r--tools/perf/tests/open-syscall.c3
-rw-r--r--tools/perf/tests/perf-record.c15
-rw-r--r--tools/perf/tests/pmu.c2
-rw-r--r--tools/perf/tests/rdpmc.c6
-rw-r--r--tools/perf/tests/sw-clock.c6
-rw-r--r--tools/perf/tests/switch-tracking.c572
-rw-r--r--tools/perf/tests/task-exit.c8
-rw-r--r--tools/perf/tests/tests.h3
-rw-r--r--tools/perf/ui/browsers/hists.c384
-rw-r--r--tools/perf/ui/gtk/hists.c18
-rw-r--r--tools/perf/ui/hist.c284
-rw-r--r--tools/perf/ui/stdio/hist.c4
-rw-r--r--tools/perf/util/annotate.c21
-rw-r--r--tools/perf/util/cache.h1
-rw-r--r--tools/perf/util/callchain.c240
-rw-r--r--tools/perf/util/callchain.h6
-rw-r--r--tools/perf/util/cloexec.c35
-rw-r--r--tools/perf/util/color.c16
-rw-r--r--tools/perf/util/color.h1
-rw-r--r--tools/perf/util/comm.c7
-rw-r--r--tools/perf/util/comm.h6
-rw-r--r--tools/perf/util/config.c40
-rw-r--r--tools/perf/util/data.c8
-rw-r--r--tools/perf/util/debug.c36
-rw-r--r--tools/perf/util/debug.h11
-rw-r--r--tools/perf/util/dso.c121
-rw-r--r--tools/perf/util/dso.h16
-rw-r--r--tools/perf/util/event.c14
-rw-r--r--tools/perf/util/event.h2
-rw-r--r--tools/perf/util/evlist.c236
-rw-r--r--tools/perf/util/evlist.h21
-rw-r--r--tools/perf/util/evsel.c67
-rw-r--r--tools/perf/util/evsel.h2
-rw-r--r--tools/perf/util/header.c32
-rw-r--r--tools/perf/util/hist.c22
-rw-r--r--tools/perf/util/hist.h18
-rw-r--r--tools/perf/util/kvm-stat.h1
-rw-r--r--tools/perf/util/machine.c96
-rw-r--r--tools/perf/util/machine.h26
-rw-r--r--tools/perf/util/map.c1
-rw-r--r--tools/perf/util/ordered-events.c245
-rw-r--r--tools/perf/util/ordered-events.h51
-rw-r--r--tools/perf/util/parse-events.c29
-rw-r--r--tools/perf/util/parse-events.y10
-rw-r--r--tools/perf/util/pmu.c121
-rw-r--r--tools/perf/util/pmu.h25
-rw-r--r--tools/perf/util/probe-event.c181
-rw-r--r--tools/perf/util/probe-event.h3
-rw-r--r--tools/perf/util/probe-finder.c23
-rw-r--r--tools/perf/util/python.c6
-rw-r--r--tools/perf/util/record.c40
-rw-r--r--tools/perf/util/run-command.c9
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c6
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c120
-rw-r--r--tools/perf/util/session.c300
-rw-r--r--tools/perf/util/session.h32
-rw-r--r--tools/perf/util/sort.c119
-rw-r--r--tools/perf/util/sort.h1
-rw-r--r--tools/perf/util/string.c90
-rw-r--r--tools/perf/util/symbol-elf.c31
-rw-r--r--tools/perf/util/symbol.c43
-rw-r--r--tools/perf/util/symbol.h14
-rw-r--r--tools/perf/util/thread.c24
-rw-r--r--tools/perf/util/thread.h10
-rw-r--r--tools/perf/util/tool.h2
-rw-r--r--tools/perf/util/trace-event-scripting.c7
-rw-r--r--tools/perf/util/trace-event.h1
-rw-r--r--tools/perf/util/util.c54
-rw-r--r--tools/perf/util/util.h23
-rwxr-xr-xtools/testing/ktest/ktest.pl61
-rw-r--r--tools/testing/ktest/sample.conf10
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/ftrace/Makefile7
-rw-r--r--tools/testing/selftests/ftrace/README82
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest253
-rw-r--r--tools/testing/selftests/ftrace/samples/fail.tc4
-rw-r--r--tools/testing/selftests/ftrace/samples/pass.tc3
-rw-r--r--tools/testing/selftests/ftrace/samples/unresolved.tc4
-rw-r--r--tools/testing/selftests/ftrace/samples/unsupported.tc3
-rw-r--r--tools/testing/selftests/ftrace/samples/untested.tc3
-rw-r--r--tools/testing/selftests/ftrace/samples/xfail.tc3
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/basic1.tc3
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/basic2.tc7
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/basic3.tc8
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/add_and_remove.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc13
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc16
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc15
-rw-r--r--tools/testing/selftests/ftrace/test.d/template9
-rw-r--r--tools/testing/selftests/memfd/Makefile21
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c36
-rw-r--r--tools/testing/selftests/powerpc/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/primitives/Makefile17
l---------tools/testing/selftests/powerpc/primitives/asm/asm-compat.h1
-rw-r--r--tools/testing/selftests/powerpc/primitives/asm/ppc-opcode.h0
-rw-r--r--tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c147
l---------tools/testing/selftests/powerpc/primitives/word-at-a-time.h1
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/rcutorture/bin/config2frag.sh4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configcheck.sh4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh4
-rw-r--r--tools/testing/selftests/rcutorture/bin/functions.sh20
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh5
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/rcutorture/bin/kvm.sh6
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-build.sh5
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh9
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-torture.sh5
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/CFLIST3
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK026
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK02.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK036
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK03.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK046
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK04.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFLIST3
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS019
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS025
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS02.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS0313
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS03.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE014
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE073
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE07.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh2
-rw-r--r--tools/testing/selftests/rcutorture/doc/initrd.txt1
2001 files changed, 91928 insertions, 36940 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 27e67a98b7be..1750fcef1ab4 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -287,6 +287,8 @@ local_ops.txt
- semantics and behavior of local atomic operations.
lockdep-design.txt
- documentation on the runtime locking correctness validator.
+locking/
+ - directory with info about kernel locking primitives
lockstat.txt
- info on collecting statistics on locks (and contention).
lockup-watchdogs.txt
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7
index e78ee798d7bd..32f3f5f8bba2 100644
--- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7
@@ -1,6 +1,6 @@
What: /sys/bus/event_source/devices/hv_24x7/interface/catalog
Date: February 2014
-Contact: Cody P Schafer <cody@linux.vnet.ibm.com>
+Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
Description:
Provides access to the binary "24x7 catalog" provided by the
hypervisor on POWER7 and 8 systems. This catalog lists events
@@ -10,14 +10,14 @@ Description:
What: /sys/bus/event_source/devices/hv_24x7/interface/catalog_length
Date: February 2014
-Contact: Cody P Schafer <cody@linux.vnet.ibm.com>
+Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
Description:
A number equal to the length in bytes of the catalog. This is
also extractable from the provided binary "catalog" sysfs entry.
What: /sys/bus/event_source/devices/hv_24x7/interface/catalog_version
Date: February 2014
-Contact: Cody P Schafer <cody@linux.vnet.ibm.com>
+Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
Description:
Exposes the "version" field of the 24x7 catalog. This is also
extractable from the provided binary "catalog" sysfs entry.
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_gpci b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_gpci
index 3fa58c23f13b..3ca4e554d2f9 100644
--- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_gpci
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_gpci
@@ -1,6 +1,6 @@
What: /sys/bus/event_source/devices/hv_gpci/interface/collect_privileged
Date: February 2014
-Contact: Cody P Schafer <cody@linux.vnet.ibm.com>
+Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
Description:
'0' if the hypervisor is configured to forbid access to event
counters being accumulated by other guests and to physical
@@ -9,35 +9,35 @@ Description:
What: /sys/bus/event_source/devices/hv_gpci/interface/ga
Date: February 2014
-Contact: Cody P Schafer <cody@linux.vnet.ibm.com>
+Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
Description:
0 or 1. Indicates whether we have access to "GA" events (listed
in arch/powerpc/perf/hv-gpci.h).
What: /sys/bus/event_source/devices/hv_gpci/interface/expanded
Date: February 2014
-Contact: Cody P Schafer <cody@linux.vnet.ibm.com>
+Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
Description:
0 or 1. Indicates whether we have access to "EXPANDED" events (listed
in arch/powerpc/perf/hv-gpci.h).
What: /sys/bus/event_source/devices/hv_gpci/interface/lab
Date: February 2014
-Contact: Cody P Schafer <cody@linux.vnet.ibm.com>
+Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
Description:
0 or 1. Indicates whether we have access to "LAB" events (listed
in arch/powerpc/perf/hv-gpci.h).
What: /sys/bus/event_source/devices/hv_gpci/interface/version
Date: February 2014
-Contact: Cody P Schafer <cody@linux.vnet.ibm.com>
+Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
Description:
A number indicating the version of the gpci interface that the
hypervisor reports supporting.
What: /sys/bus/event_source/devices/hv_gpci/interface/kernel_version
Date: February 2014
-Contact: Cody P Schafer <cody@linux.vnet.ibm.com>
+Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
Description:
A number indicating the latest version of the gpci interface
that the kernel is aware of.
diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl
new file mode 100644
index 000000000000..554405ec1955
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-cxl
@@ -0,0 +1,129 @@
+Slave contexts (eg. /sys/class/cxl/afu0.0s):
+
+What: /sys/class/cxl/<afu>/irqs_max
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read/write
+ Decimal value of maximum number of interrupts that can be
+ requested by userspace. The default on probe is the maximum
+ that hardware can support (eg. 2037). Write values will limit
+ userspace applications to that many userspace interrupts. Must
+ be >= irqs_min.
+
+What: /sys/class/cxl/<afu>/irqs_min
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Decimal value of the minimum number of interrupts that
+ userspace must request on a CXL_START_WORK ioctl. Userspace may
+ omit the num_interrupts field in the START_WORK IOCTL to get
+ this minimum automatically.
+
+What: /sys/class/cxl/<afu>/mmio_size
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Decimal value of the size of the MMIO space that may be mmaped
+ by userspace.
+
+What: /sys/class/cxl/<afu>/modes_supported
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ List of the modes this AFU supports. One per line.
+ Valid entries are: "dedicated_process" and "afu_directed"
+
+What: /sys/class/cxl/<afu>/mode
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read/write
+ The current mode the AFU is using. Will be one of the modes
+ given in modes_supported. Writing will change the mode
+ provided that no user contexts are attached.
+
+
+What: /sys/class/cxl/<afu>/prefault_mode
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read/write
+ Set the mode for prefaulting in segments into the segment table
+ when performing the START_WORK ioctl. Possible values:
+ none: No prefaulting (default)
+ work_element_descriptor: Treat the work element
+ descriptor as an effective address and
+ prefault what it points to.
+ all: all segments process calling START_WORK maps.
+
+What: /sys/class/cxl/<afu>/reset
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: write only
+ Writing 1 here will reset the AFU provided there are not
+ contexts active on the AFU.
+
+What: /sys/class/cxl/<afu>/api_version
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Decimal value of the current version of the kernel/user API.
+
+What: /sys/class/cxl/<afu>/api_version_com
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Decimal value of the the lowest version of the userspace API
+ this this kernel supports.
+
+
+
+Master contexts (eg. /sys/class/cxl/afu0.0m)
+
+What: /sys/class/cxl/<afu>m/mmio_size
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Decimal value of the size of the MMIO space that may be mmaped
+ by userspace. This includes all slave contexts space also.
+
+What: /sys/class/cxl/<afu>m/pp_mmio_len
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Decimal value of the Per Process MMIO space length.
+
+What: /sys/class/cxl/<afu>m/pp_mmio_off
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Decimal value of the Per Process MMIO space offset.
+
+
+Card info (eg. /sys/class/cxl/card0)
+
+What: /sys/class/cxl/<card>/caia_version
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Identifies the CAIA Version the card implements.
+
+What: /sys/class/cxl/<card>/psl_version
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Identifies the revision level of the PSL.
+
+What: /sys/class/cxl/<card>/base_image
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Identifies the revision level of the base image for devices
+ that support loadable PSLs. For FPGAs this field identifies
+ the image contained in the on-adapter flash which is loaded
+ during the initial program load.
+
+What: /sys/class/cxl/<card>/image_loaded
+Date: September 2014
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: read only
+ Will return "user" or "factory" depending on the image loaded
+ onto the card.
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index e584ee12a1e7..7c9cc4846cb6 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1972,7 +1972,7 @@ machines due to caching.
<itemizedlist>
<listitem>
<para>
- <filename>Documentation/spinlocks.txt</filename>:
+ <filename>Documentation/locking/spinlocks.txt</filename>:
Linus Torvalds' spinlocking tutorial in the kernel sources.
</para>
</listitem>
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml
index 3a626d1b8f2e..07ffc76553ba 100644
--- a/Documentation/DocBook/media/v4l/compat.xml
+++ b/Documentation/DocBook/media/v4l/compat.xml
@@ -2566,6 +2566,12 @@ fields changed from _s32 to _u32.
<para>Added compound control types and &VIDIOC-QUERY-EXT-CTRL;.
</para>
</listitem>
+ <title>V4L2 in Linux 3.18</title>
+ <orderedlist>
+ <listitem>
+ <para>Added <constant>V4L2_CID_PAN_SPEED</constant> and
+ <constant>V4L2_CID_TILT_SPEED</constant> camera controls.</para>
+ </listitem>
</orderedlist>
</section>
diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml
index 9f5ffd85560b..e013e4bf244c 100644
--- a/Documentation/DocBook/media/v4l/controls.xml
+++ b/Documentation/DocBook/media/v4l/controls.xml
@@ -3965,6 +3965,27 @@ by exposure, white balance or focus controls.</entry>
</row>
<row><entry></entry></row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_PAN_SPEED</constant>&nbsp;</entry>
+ <entry>integer</entry>
+ </row><row><entry spanname="descr">This control turns the
+camera horizontally at the specific speed. The unit is undefined. A
+positive value moves the camera to the right (clockwise when viewed
+from above), a negative value to the left. A value of zero stops the motion
+if one is in progress and has no effect otherwise.</entry>
+ </row>
+ <row><entry></entry></row>
+
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_TILT_SPEED</constant>&nbsp;</entry>
+ <entry>integer</entry>
+ </row><row><entry spanname="descr">This control turns the
+camera vertically at the specified speed. The unit is undefined. A
+positive value moves the camera up, a negative value down. A value of zero
+stops the motion if one is in progress and has no effect otherwise.</entry>
+ </row>
+ <row><entry></entry></row>
+
</tbody>
</tgroup>
</table>
@@ -4790,6 +4811,40 @@ interface and may change in the future.</para>
conversion.
</entry>
</row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_TEST_PATTERN_RED</constant></entry>
+ <entry>integer</entry>
+ </row>
+ <row>
+ <entry spanname="descr">Test pattern red colour component.
+ </entry>
+ </row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_TEST_PATTERN_GREENR</constant></entry>
+ <entry>integer</entry>
+ </row>
+ <row>
+ <entry spanname="descr">Test pattern green (next to red)
+ colour component.
+ </entry>
+ </row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_TEST_PATTERN_BLUE</constant></entry>
+ <entry>integer</entry>
+ </row>
+ <row>
+ <entry spanname="descr">Test pattern blue colour component.
+ </entry>
+ </row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_TEST_PATTERN_GREENB</constant></entry>
+ <entry>integer</entry>
+ </row>
+ <row>
+ <entry spanname="descr">Test pattern green (next to blue)
+ colour component.
+ </entry>
+ </row>
<row><entry></entry></row>
</tbody>
</tgroup>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml b/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml
index 2aae8e9452a4..6ab4f0f3db64 100644
--- a/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml
@@ -237,9 +237,9 @@ for a pixel lie next to each other in memory.</para>
<entry>g<subscript>4</subscript></entry>
<entry>g<subscript>3</subscript></entry>
</row>
- <row id="V4L2-PIX-FMT-RGB555X">
- <entry><constant>V4L2_PIX_FMT_RGB555X</constant></entry>
- <entry>'RGBQ'</entry>
+ <row id="V4L2-PIX-FMT-ARGB555X">
+ <entry><constant>V4L2_PIX_FMT_ARGB555X</constant></entry>
+ <entry>'AR15' | (1 &lt;&lt; 31)</entry>
<entry></entry>
<entry>a</entry>
<entry>r<subscript>4</subscript></entry>
@@ -259,6 +259,28 @@ for a pixel lie next to each other in memory.</para>
<entry>b<subscript>1</subscript></entry>
<entry>b<subscript>0</subscript></entry>
</row>
+ <row id="V4L2-PIX-FMT-XRGB555X">
+ <entry><constant>V4L2_PIX_FMT_XRGB555X</constant></entry>
+ <entry>'XR15' | (1 &lt;&lt; 31)</entry>
+ <entry></entry>
+ <entry>-</entry>
+ <entry>r<subscript>4</subscript></entry>
+ <entry>r<subscript>3</subscript></entry>
+ <entry>r<subscript>2</subscript></entry>
+ <entry>r<subscript>1</subscript></entry>
+ <entry>r<subscript>0</subscript></entry>
+ <entry>g<subscript>4</subscript></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry></entry>
+ <entry>g<subscript>2</subscript></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>g<subscript>0</subscript></entry>
+ <entry>b<subscript>4</subscript></entry>
+ <entry>b<subscript>3</subscript></entry>
+ <entry>b<subscript>2</subscript></entry>
+ <entry>b<subscript>1</subscript></entry>
+ <entry>b<subscript>0</subscript></entry>
+ </row>
<row id="V4L2-PIX-FMT-RGB565X">
<entry><constant>V4L2_PIX_FMT_RGB565X</constant></entry>
<entry>'RGBR'</entry>
@@ -464,7 +486,7 @@ for a pixel lie next to each other in memory.</para>
</row>
<row id="V4L2-PIX-FMT-ARGB32">
<entry><constant>V4L2_PIX_FMT_ARGB32</constant></entry>
- <entry>'AX24'</entry>
+ <entry>'BA24'</entry>
<entry></entry>
<entry>a<subscript>7</subscript></entry>
<entry>a<subscript>6</subscript></entry>
@@ -800,6 +822,28 @@ image</title>
<entry>g<subscript>4</subscript></entry>
<entry>g<subscript>3</subscript></entry>
</row>
+ <row id="V4L2-PIX-FMT-RGB555X">
+ <entry><constant>V4L2_PIX_FMT_RGB555X</constant></entry>
+ <entry>'RGBQ'</entry>
+ <entry></entry>
+ <entry>a</entry>
+ <entry>r<subscript>4</subscript></entry>
+ <entry>r<subscript>3</subscript></entry>
+ <entry>r<subscript>2</subscript></entry>
+ <entry>r<subscript>1</subscript></entry>
+ <entry>r<subscript>0</subscript></entry>
+ <entry>g<subscript>4</subscript></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry></entry>
+ <entry>g<subscript>2</subscript></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>g<subscript>0</subscript></entry>
+ <entry>b<subscript>4</subscript></entry>
+ <entry>b<subscript>3</subscript></entry>
+ <entry>b<subscript>2</subscript></entry>
+ <entry>b<subscript>1</subscript></entry>
+ <entry>b<subscript>0</subscript></entry>
+ </row>
<row id="V4L2-PIX-FMT-BGR32">
<entry><constant>V4L2_PIX_FMT_BGR32</constant></entry>
<entry>'BGR4'</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-dqevent.xml b/Documentation/DocBook/media/v4l/vidioc-dqevent.xml
index cb7732582f03..b036f8963353 100644
--- a/Documentation/DocBook/media/v4l/vidioc-dqevent.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-dqevent.xml
@@ -76,21 +76,22 @@
<entry></entry>
<entry>&v4l2-event-vsync;</entry>
<entry><structfield>vsync</structfield></entry>
- <entry>Event data for event V4L2_EVENT_VSYNC.
+ <entry>Event data for event <constant>V4L2_EVENT_VSYNC</constant>.
</entry>
</row>
<row>
<entry></entry>
<entry>&v4l2-event-ctrl;</entry>
<entry><structfield>ctrl</structfield></entry>
- <entry>Event data for event V4L2_EVENT_CTRL.
+ <entry>Event data for event <constant>V4L2_EVENT_CTRL</constant>.
</entry>
</row>
<row>
<entry></entry>
<entry>&v4l2-event-frame-sync;</entry>
<entry><structfield>frame_sync</structfield></entry>
- <entry>Event data for event V4L2_EVENT_FRAME_SYNC.</entry>
+ <entry>Event data for event
+ <constant>V4L2_EVENT_FRAME_SYNC</constant>.</entry>
</row>
<row>
<entry></entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-edid.xml b/Documentation/DocBook/media/v4l/vidioc-g-edid.xml
index ce4563b87131..6df40db4c8ba 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-edid.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-edid.xml
@@ -24,7 +24,7 @@
<funcdef>int <function>ioctl</function></funcdef>
<paramdef>int <parameter>fd</parameter></paramdef>
<paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>const struct v4l2_edid *<parameter>argp</parameter></paramdef>
+ <paramdef>struct v4l2_edid *<parameter>argp</parameter></paramdef>
</funcprototype>
</funcsynopsis>
</refsynopsisdiv>
@@ -125,17 +125,17 @@
<structfield>blocks</structfield> is 0, then the EDID is disabled or erased.</entry>
</row>
<row>
- <entry>__u8&nbsp;*</entry>
- <entry><structfield>edid</structfield></entry>
- <entry>Pointer to memory that contains the EDID. The minimum size is
- <structfield>blocks</structfield>&nbsp;*&nbsp;128.</entry>
- </row>
- <row>
<entry>__u32</entry>
<entry><structfield>reserved</structfield>[5]</entry>
<entry>Reserved for future extensions. Applications and drivers must
set the array to zero.</entry>
</row>
+ <row>
+ <entry>__u8&nbsp;*</entry>
+ <entry><structfield>edid</structfield></entry>
+ <entry>Pointer to memory that contains the EDID. The minimum size is
+ <structfield>blocks</structfield>&nbsp;*&nbsp;128.</entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subscribe-event.xml b/Documentation/DocBook/media/v4l/vidioc-subscribe-event.xml
index 9f6095608837..d7c9365ecdbe 100644
--- a/Documentation/DocBook/media/v4l/vidioc-subscribe-event.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-subscribe-event.xml
@@ -176,7 +176,7 @@
</row>
<row>
<entry><constant>V4L2_EVENT_MOTION_DET</constant></entry>
- <entry>5</entry>
+ <entry>6</entry>
<entry>
<para>Triggered whenever the motion detection state for one or more of the regions
changes. This event has a &v4l2-event-motion-det; associated with it.</para>
diff --git a/Documentation/DocBook/writing-an-alsa-driver.tmpl b/Documentation/DocBook/writing-an-alsa-driver.tmpl
index 6f639d9530b5..784793df81ed 100644
--- a/Documentation/DocBook/writing-an-alsa-driver.tmpl
+++ b/Documentation/DocBook/writing-an-alsa-driver.tmpl
@@ -2742,7 +2742,9 @@ struct _snd_pcm_runtime {
<para>
Another note is that this callback is non-atomic
- (schedulable). This is important, because the
+ (schedulable) as default, i.e. when no
+ <structfield>nonatomic</structfield> flag set.
+ This is important, because the
<structfield>trigger</structfield> callback
is atomic (non-schedulable). That is, mutexes or any
schedule-related functions are not available in
@@ -2900,8 +2902,9 @@ struct _snd_pcm_runtime {
</para>
<para>
- As mentioned, this callback is atomic. You cannot call
- functions which may sleep.
+ As mentioned, this callback is atomic as default unless
+ <structfield>nonatomic</structfield> flag set, and
+ you cannot call functions which may sleep.
The trigger callback should be as minimal as possible,
just really triggering the DMA. The other stuff should be
initialized hw_params and prepare callbacks properly
@@ -2936,7 +2939,7 @@ struct _snd_pcm_runtime {
</para>
<para>
- This callback is also atomic.
+ This callback is also atomic as default.
</para>
</section>
@@ -2972,7 +2975,7 @@ struct _snd_pcm_runtime {
is useful only for such a purpose.
</para>
<para>
- This callback is atomic.
+ This callback is atomic as default.
</para>
</section>
@@ -3175,6 +3178,21 @@ struct _snd_pcm_runtime {
called with local interrupts disabled.
</para>
+ <para>
+ The recent changes in PCM core code, however, allow all PCM
+ operations to be non-atomic. This assumes that the all caller
+ sides are in non-atomic contexts. For example, the function
+ <function>snd_pcm_period_elapsed()</function> is called
+ typically from the interrupt handler. But, if you set up the
+ driver to use a threaded interrupt handler, this call can be in
+ non-atomic context, too. In such a case, you can set
+ <structfield>nonatomic</structfield> filed of
+ <structname>snd_pcm</structname> object after creating it.
+ When this flag is set, mutex and rwsem are used internally in
+ the PCM core instead of spin and rwlocks, so that you can call
+ all PCM functions safely in a non-atomic context.
+ </para>
+
</section>
<section id="pcm-interface-constraints">
<title>Constraints</title>
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 68fe3ad27015..ef5a2fd4ff70 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -56,8 +56,20 @@ RCU_STALL_RAT_DELAY
two jiffies. (This is a cpp macro, not a kernel configuration
parameter.)
-When a CPU detects that it is stalling, it will print a message similar
-to the following:
+rcupdate.rcu_task_stall_timeout
+
+ This boot/sysfs parameter controls the RCU-tasks stall warning
+ interval. A value of zero or less suppresses RCU-tasks stall
+ warnings. A positive value sets the stall-warning interval
+ in jiffies. An RCU-tasks stall warning starts wtih the line:
+
+ INFO: rcu_tasks detected stalls on tasks:
+
+ And continues with the output of sched_show_task() for each
+ task stalling the current RCU-tasks grace period.
+
+For non-RCU-tasks flavors of RCU, when a CPU detects that it is stalling,
+it will print a message similar to the following:
INFO: rcu_sched_state detected stall on CPU 5 (t=2500 jiffies)
@@ -174,8 +186,12 @@ o A CPU looping with preemption disabled. This condition can
o A CPU looping with bottom halves disabled. This condition can
result in RCU-sched and RCU-bh stalls.
-o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
- without invoking schedule().
+o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the
+ kernel without invoking schedule(). Note that cond_resched()
+ does not necessarily prevent RCU CPU stall warnings. Therefore,
+ if the looping in the kernel is really expected and desirable
+ behavior, you might need to replace some of the cond_resched()
+ calls with calls to cond_resched_rcu_qs().
o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
happen to preempt a low-priority task in the middle of an RCU
@@ -208,11 +224,10 @@ o A hardware failure. This is quite unlikely, but has occurred
This resulted in a series of RCU CPU stall warnings, eventually
leading the realization that the CPU had failed.
-The RCU, RCU-sched, and RCU-bh implementations have CPU stall warning.
-SRCU does not have its own CPU stall warnings, but its calls to
-synchronize_sched() will result in RCU-sched detecting RCU-sched-related
-CPU stalls. Please note that RCU only detects CPU stalls when there is
-a grace period in progress. No grace period, no CPU stall warnings.
+The RCU, RCU-sched, RCU-bh, and RCU-tasks implementations have CPU stall
+warning. Note that SRCU does -not- have CPU stall warnings. Please note
+that RCU only detects CPU stalls when there is a grace period in progress.
+No grace period, no CPU stall warnings.
To diagnose the cause of the stall, inspect the stack traces.
The offending function will usually be near the top of the stack.
diff --git a/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt b/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
index 2391e5c41999..fcca8e744f41 100644
--- a/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
+++ b/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
@@ -25,6 +25,9 @@ Requires node properties:
- "io-channels" Channel node of ADC to be used for
conversion.
+Optional node properties:
+- "#thermal-sensor-cells" Used to expose itself to thermal fw.
+
Read more about iio bindings at
Documentation/devicetree/bindings/iio/iio-bindings.txt
diff --git a/Documentation/devicetree/bindings/media/hix5hd2-ir.txt b/Documentation/devicetree/bindings/media/hix5hd2-ir.txt
new file mode 100644
index 000000000000..fb5e7606643a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/hix5hd2-ir.txt
@@ -0,0 +1,25 @@
+Device-Tree bindings for hix5hd2 ir IP
+
+Required properties:
+ - compatible: Should contain "hisilicon,hix5hd2-ir".
+ - reg: Base physical address of the controller and length of memory
+ mapped region.
+ - interrupts: interrupt-specifier for the sole interrupt generated by
+ the device. The interrupt specifier format depends on the interrupt
+ controller parent.
+ - clocks: clock phandle and specifier pair.
+ - hisilicon,power-syscon: phandle of syscon used to control power.
+
+Optional properties:
+ - linux,rc-map-name : Remote control map name.
+
+Example node:
+
+ ir: ir@f8001000 {
+ compatible = "hisilicon,hix5hd2-ir";
+ reg = <0xf8001000 0x1000>;
+ interrupts = <0 47 4>;
+ clocks = <&clock HIX5HD2_FIXED_24M>;
+ hisilicon,power-syscon = <&sysctrl>;
+ linux,rc-map-name = "rc-tivo";
+ };
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
index 431716e37a39..b52628b18a53 100644
--- a/Documentation/devicetree/bindings/mmc/mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -40,6 +40,8 @@ Optional properties:
- mmc-hs200-1_2v: eMMC HS200 mode(1.2V I/O) is supported
- mmc-hs400-1_8v: eMMC HS400 mode(1.8V I/O) is supported
- mmc-hs400-1_2v: eMMC HS400 mode(1.2V I/O) is supported
+- dsr: Value the card's (optional) Driver Stage Register (DSR) should be
+ programmed with. Valid range: [0 .. 0xffff].
*NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
polarity properties, we have to fix the meaning of the "normal" and "inverted"
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index c559f3f36309..c327c2d6f23d 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -10,12 +10,14 @@ extensions to the Synopsys Designware Mobile Storage Host Controller.
Required Properties:
* compatible: should be
- - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following
+ - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
+ before RK3288
+ - "rockchip,rk3288-dw-mshc": for Rockchip RK3288
Example:
rkdwmmc0@12200000 {
- compatible = "rockchip,rk2928-dw-mshc";
+ compatible = "rockchip,rk3288-dw-mshc";
reg = <0x12200000 0x1000>;
interrupts = <0 75 0>;
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index fa0f327cde01..400b640fabc7 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -19,6 +19,9 @@ Required properties:
"renesas,sdhi-r8a7779" - SDHI IP on R8A7779 SoC
"renesas,sdhi-r8a7790" - SDHI IP on R8A7790 SoC
"renesas,sdhi-r8a7791" - SDHI IP on R8A7791 SoC
+ "renesas,sdhi-r8a7792" - SDHI IP on R8A7792 SoC
+ "renesas,sdhi-r8a7793" - SDHI IP on R8A7793 SoC
+ "renesas,sdhi-r8a7794" - SDHI IP on R8A7794 SoC
Optional properties:
- toshiba,mmc-wrprotect-disable: write-protect detection is unavailable
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
index ebcad25efd0a..cfcc52705ed8 100644
--- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
+++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
@@ -3,7 +3,7 @@ APM X-Gene SoC Ethernet nodes
Ethernet nodes are defined to describe on-chip ethernet interfaces in
APM X-Gene SoC.
-Required properties:
+Required properties for all the ethernet interfaces:
- compatible: Should be "apm,xgene-enet"
- reg: Address and length of the register set for the device. It contains the
information of registers in the same order as described by reg-names
@@ -15,6 +15,8 @@ Required properties:
- clocks: Reference to the clock entry.
- local-mac-address: MAC address assigned to this device
- phy-connection-type: Interface type between ethernet device and PHY device
+
+Required properties for ethernet interfaces that have external PHY:
- phy-handle: Reference to a PHY node connected to this device
- mdio: Device tree subnode with the following required properties:
diff --git a/Documentation/devicetree/bindings/net/micrel.txt b/Documentation/devicetree/bindings/net/micrel.txt
index 98a3e61f9ee8..e1d99b95c4ec 100644
--- a/Documentation/devicetree/bindings/net/micrel.txt
+++ b/Documentation/devicetree/bindings/net/micrel.txt
@@ -16,3 +16,9 @@ Optional properties:
KSZ8051: register 0x1f, bits 5..4
See the respective PHY datasheet for the mode values.
+
+ - clocks, clock-names: contains clocks according to the common clock bindings.
+
+ supported clocks:
+ - KSZ8021, KSZ8031: "rmii-ref": The RMII refence input clock. Used
+ to determine the XI input clock.
diff --git a/Documentation/devicetree/bindings/pci/fsl,pci.txt b/Documentation/devicetree/bindings/pci/fsl,pci.txt
new file mode 100644
index 000000000000..d8ac4a768e7e
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/fsl,pci.txt
@@ -0,0 +1,27 @@
+* Bus Enumeration by Freescale PCI-X Agent
+
+Typically any Freescale PCI-X bridge hardware strapped into Agent mode
+is prevented from enumerating the bus. The PrPMC form-factor requires
+all mezzanines to be PCI-X Agents, but one per system may still
+enumerate the bus.
+
+The property defined below will allow a PCI-X bridge to be used for bus
+enumeration despite being strapped into Agent mode.
+
+Required properties:
+- fsl,pci-agent-force-enum : There is no value associated with this
+ property. The property itself is treated as a boolean.
+
+Example:
+
+ /* PCI-X bridge known to be PrPMC Monarch */
+ pci0: pci@ef008000 {
+ fsl,pci-agent-force-enum;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
+ device_type = "pci";
+ ...
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/sound/adi,ssm2602.txt b/Documentation/devicetree/bindings/sound/adi,ssm2602.txt
new file mode 100644
index 000000000000..3b3302fe399b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/adi,ssm2602.txt
@@ -0,0 +1,19 @@
+Analog Devices SSM2602, SSM2603 and SSM2604 I2S audio CODEC devices
+
+SSM2602 support both I2C and SPI as the configuration interface,
+the selection is made by the MODE strap-in pin.
+SSM2603 and SSM2604 only support I2C as the configuration interface.
+
+Required properties:
+
+ - compatible : One of "adi,ssm2602", "adi,ssm2603" or "adi,ssm2604"
+
+ - reg : the I2C address of the device for I2C, the chip select
+ number for SPI.
+
+ Example:
+
+ ssm2602: ssm2602@1a {
+ compatible = "adi,ssm2602";
+ reg = <0x1a>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/cs35l32.txt b/Documentation/devicetree/bindings/sound/cs35l32.txt
new file mode 100644
index 000000000000..1417d3f5cc22
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs35l32.txt
@@ -0,0 +1,62 @@
+CS35L32 audio CODEC
+
+Required properties:
+
+ - compatible : "cirrus,cs35l32"
+
+ - reg : the I2C address of the device for I2C. Address is determined by the level
+ of the AD0 pin. Level 0 is 0x40 while Level 1 is 0x41.
+
+ - VA-supply, VP-supply : power supplies for the device,
+ as covered in Documentation/devicetree/bindings/regulator/regulator.txt.
+
+Optional properties:
+
+ - reset-gpios : a GPIO spec for the reset pin. If specified, it will be
+ deasserted before communication to the codec starts.
+
+ - cirrus,boost-manager : Boost voltage control.
+ 0 = Automatically managed. Boost-converter output voltage is the higher
+ of the two: Class G or adaptive LED voltage.
+ 1 = Automatically managed irrespective of audio, adapting for low-power
+ dissipation when LEDs are ON, and operating in Fixed-Boost Bypass Mode
+ if LEDs are OFF (VBST = VP).
+ 2 = (Default) Boost voltage fixed in Bypass Mode (VBST = VP).
+ 3 = Boost voltage fixed at 5 V.
+
+ - cirrus,sdout-datacfg : Data configuration for dual CS35L32 applications only.
+ Determines the data packed in a two-CS35L32 configuration.
+ 0 = Left/right channels VMON[11:0], IMON[11:0], VPMON[7:0].
+ 1 = Left/right channels VMON[11:0], IMON[11:0], STATUS.
+ 2 = (Default) left/right channels VMON[15:0], IMON [15:0].
+ 3 = Left/right channels VPMON[7:0], STATUS.
+
+ - cirrus,sdout-share : SDOUT sharing. Determines whether one or two CS35L32
+ devices are on board sharing SDOUT.
+ 0 = (Default) One IC.
+ 1 = Two IC's.
+
+ - cirrus,battery-recovery : Low battery nominal recovery threshold, rising VP.
+ 0 = 3.1V
+ 1 = 3.2V
+ 2 = 3.3V (Default)
+ 3 = 3.4V
+
+ - cirrus,battery-threshold : Low battery nominal threshold, falling VP.
+ 0 = 3.1V
+ 1 = 3.2V
+ 2 = 3.3V
+ 3 = 3.4V (Default)
+ 4 = 3.5V
+ 5 = 3.6V
+
+Example:
+
+codec: codec@40 {
+ compatible = "cirrus,cs35l32";
+ reg = <0x40>;
+ reset-gpios = <&gpio 10 0>;
+ cirrus,boost-manager = <0x03>;
+ cirrus,sdout-datacfg = <0x02>;
+ VA-supply = <&reg_audio>;
+};
diff --git a/Documentation/devicetree/bindings/sound/es8328.txt b/Documentation/devicetree/bindings/sound/es8328.txt
new file mode 100644
index 000000000000..30ea8a318ae9
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/es8328.txt
@@ -0,0 +1,38 @@
+Everest ES8328 audio CODEC
+
+This device supports both I2C and SPI.
+
+Required properties:
+
+ - compatible : "everest,es8328"
+ - DVDD-supply : Regulator providing digital core supply voltage 1.8 - 3.6V
+ - AVDD-supply : Regulator providing analog supply voltage 3.3V
+ - PVDD-supply : Regulator providing digital IO supply voltage 1.8 - 3.6V
+ - IPVDD-supply : Regulator providing analog output voltage 3.3V
+ - clocks : A 22.5792 or 11.2896 MHz clock
+ - reg : the I2C address of the device for I2C, the chip select number for SPI
+
+Pins on the device (for linking into audio routes):
+
+ * LOUT1
+ * LOUT2
+ * ROUT1
+ * ROUT2
+ * LINPUT1
+ * RINPUT1
+ * LINPUT2
+ * RINPUT2
+ * Mic Bias
+
+
+Example:
+
+codec: es8328@11 {
+ compatible = "everest,es8328";
+ DVDD-supply = <&reg_3p3v>;
+ AVDD-supply = <&reg_3p3v>;
+ PVDD-supply = <&reg_3p3v>;
+ HPVDD-supply = <&reg_3p3v>;
+ clocks = <&clks 169>;
+ reg = <0x11>;
+};
diff --git a/Documentation/devicetree/bindings/sound/fsl,esai.txt b/Documentation/devicetree/bindings/sound/fsl,esai.txt
index aeb8c4a0b88d..52f5b6bf3e8e 100644
--- a/Documentation/devicetree/bindings/sound/fsl,esai.txt
+++ b/Documentation/devicetree/bindings/sound/fsl,esai.txt
@@ -7,7 +7,8 @@ other DSPs. It has up to six transmitters and four receivers.
Required properties:
- - compatible : Compatible list, must contain "fsl,imx35-esai".
+ - compatible : Compatible list, must contain "fsl,imx35-esai" or
+ "fsl,vf610-esai"
- reg : Offset and length of the register set for the device.
diff --git a/Documentation/devicetree/bindings/sound/fsl,ssi.txt b/Documentation/devicetree/bindings/sound/fsl,ssi.txt
index 3aa4a8f528f4..5b76be45d18b 100644
--- a/Documentation/devicetree/bindings/sound/fsl,ssi.txt
+++ b/Documentation/devicetree/bindings/sound/fsl,ssi.txt
@@ -58,13 +58,7 @@ Optional properties:
Documentation/devicetree/bindings/dma/dma.txt.
- dma-names: Two dmas have to be defined, "tx" and "rx", if fsl,imx-fiq
is not defined.
-- fsl,mode: The operating mode for the SSI interface.
- "i2s-slave" - I2S mode, SSI is clock slave
- "i2s-master" - I2S mode, SSI is clock master
- "lj-slave" - left-justified mode, SSI is clock slave
- "lj-master" - l.j. mode, SSI is clock master
- "rj-slave" - right-justified mode, SSI is clock slave
- "rj-master" - r.j., SSI is clock master
+- fsl,mode: The operating mode for the AC97 interface only.
"ac97-slave" - AC97 mode, SSI is clock slave
"ac97-master" - AC97 mode, SSI is clock master
diff --git a/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt b/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
new file mode 100644
index 000000000000..a96774c194c8
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
@@ -0,0 +1,82 @@
+Freescale Generic ASoC Sound Card with ASRC support
+
+The Freescale Generic ASoC Sound Card can be used, ideally, for all Freescale
+SoCs connecting with external CODECs.
+
+The idea of this generic sound card is a bit like ASoC Simple Card. However,
+for Freescale SoCs (especially those released in recent years), most of them
+have ASRC (Documentation/devicetree/bindings/sound/fsl,asrc.txt) inside. And
+this is a specific feature that might be painstakingly controlled and merged
+into the Simple Card.
+
+So having this generic sound card allows all Freescale SoC users to benefit
+from the simplification of a new card support and the capability of the wide
+sample rates support through ASRC.
+
+Note: The card is initially designed for those sound cards who use I2S and
+ PCM DAI formats. However, it'll be also possible to support those non
+ I2S/PCM type sound cards, such as S/PDIF audio and HDMI audio, as long
+ as the driver has been properly upgraded.
+
+
+The compatible list for this generic sound card currently:
+ "fsl,imx-audio-cs42888"
+
+ "fsl,imx-audio-wm8962"
+ (compatible with Documentation/devicetree/bindings/sound/imx-audio-wm8962.txt)
+
+ "fsl,imx-audio-sgtl5000"
+ (compatible with Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt)
+
+Required properties:
+
+ - compatible : Contains one of entries in the compatible list.
+
+ - model : The user-visible name of this sound complex
+
+ - audio-cpu : The phandle of an CPU DAI controller
+
+ - audio-codec : The phandle of an audio codec
+
+ - audio-routing : A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source. There're a few pre-designed board connectors:
+ * Line Out Jack
+ * Line In Jack
+ * Headphone Jack
+ * Mic Jack
+ * Ext Spk
+ * AMIC (stands for Analog Microphone Jack)
+ * DMIC (stands for Digital Microphone Jack)
+
+ Note: The "Mic Jack" and "AMIC" are redundant while
+ coexsiting in order to support the old bindings
+ of wm8962 and sgtl5000.
+
+Optional properties:
+
+ - audio-asrc : The phandle of ASRC. It can be absent if there's no
+ need to add ASRC support via DPCM.
+
+Example:
+sound-cs42888 {
+ compatible = "fsl,imx-audio-cs42888";
+ model = "cs42888-audio";
+ audio-cpu = <&esai>;
+ audio-asrc = <&asrc>;
+ audio-codec = <&cs42888>;
+ audio-routing =
+ "Line Out Jack", "AOUT1L",
+ "Line Out Jack", "AOUT1R",
+ "Line Out Jack", "AOUT2L",
+ "Line Out Jack", "AOUT2R",
+ "Line Out Jack", "AOUT3L",
+ "Line Out Jack", "AOUT3R",
+ "Line Out Jack", "AOUT4L",
+ "Line Out Jack", "AOUT4R",
+ "AIN1L", "Line In Jack",
+ "AIN1R", "Line In Jack",
+ "AIN2L", "Line In Jack",
+ "AIN2R", "Line In Jack";
+};
diff --git a/Documentation/devicetree/bindings/sound/fsl-sai.txt b/Documentation/devicetree/bindings/sound/fsl-sai.txt
index 0f4e23828190..4956b14d4b06 100644
--- a/Documentation/devicetree/bindings/sound/fsl-sai.txt
+++ b/Documentation/devicetree/bindings/sound/fsl-sai.txt
@@ -18,12 +18,26 @@ Required properties:
- pinctrl-names: Must contain a "default" entry.
- pinctrl-NNN: One property must exist for each entry in pinctrl-names.
See ../pinctrl/pinctrl-bindings.txt for details of the property values.
-- big-endian-regs: If this property is absent, the little endian mode will
- be in use as default, or the big endian mode will be in use for all the
- device registers.
-- big-endian-data: If this property is absent, the little endian mode will
- be in use as default, or the big endian mode will be in use for all the
- fifo data.
+- big-endian: Boolean property, required if all the FTM_PWM registers
+ are big-endian rather than little-endian.
+- lsb-first: Configures whether the LSB or the MSB is transmitted first for
+ the fifo data. If this property is absent, the MSB is transmitted first as
+ default, or the LSB is transmitted first.
+- fsl,sai-synchronous-rx: This is a boolean property. If present, indicating
+ that SAI will work in the synchronous mode (sync Tx with Rx) which means
+ both the transimitter and receiver will send and receive data by following
+ receiver's bit clocks and frame sync clocks.
+- fsl,sai-asynchronous: This is a boolean property. If present, indicating
+ that SAI will work in the asynchronous mode, which means both transimitter
+ and receiver will send and receive data by following their own bit clocks
+ and frame sync clocks separately.
+
+Note:
+- If both fsl,sai-asynchronous and fsl,sai-synchronous-rx are absent, the
+ default synchronous mode (sync Rx with Tx) will be used, which means both
+ transimitter and receiver will send and receive data by following clocks
+ of transimitter.
+- fsl,sai-asynchronous and fsl,sai-synchronous-rx are exclusive.
Example:
sai2: sai@40031000 {
@@ -38,6 +52,6 @@ sai2: sai@40031000 {
dma-names = "tx", "rx";
dmas = <&edma0 0 VF610_EDMA_MUXID0_SAI2_TX>,
<&edma0 0 VF610_EDMA_MUXID0_SAI2_RX>;
- big-endian-regs;
- big-endian-data;
+ big-endian;
+ lsb-first;
};
diff --git a/Documentation/devicetree/bindings/sound/imx-audio-es8328.txt b/Documentation/devicetree/bindings/sound/imx-audio-es8328.txt
new file mode 100644
index 000000000000..07b68ab206fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/imx-audio-es8328.txt
@@ -0,0 +1,60 @@
+Freescale i.MX audio complex with ES8328 codec
+
+Required properties:
+- compatible : "fsl,imx-audio-es8328"
+- model : The user-visible name of this sound complex
+- ssi-controller : The phandle of the i.MX SSI controller
+- jack-gpio : Optional GPIO for headphone jack
+- audio-amp-supply : Power regulator for speaker amps
+- audio-codec : The phandle of the ES8328 audio codec
+- audio-routing : A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source. Valid names could be power supplies, ES8328
+ pins, and the jacks on the board:
+
+ Power supplies:
+ * audio-amp
+
+ ES8328 pins:
+ * LOUT1
+ * LOUT2
+ * ROUT1
+ * ROUT2
+ * LINPUT1
+ * LINPUT2
+ * RINPUT1
+ * RINPUT2
+ * Mic PGA
+
+ Board connectors:
+ * Headphone
+ * Speaker
+ * Mic Jack
+- mux-int-port : The internal port of the i.MX audio muxer (AUDMUX)
+- mux-ext-port : The external port of the i.MX audio muxer (AUDMIX)
+
+Note: The AUDMUX port numbering should start at 1, which is consistent with
+hardware manual.
+
+Example:
+
+sound {
+ compatible = "fsl,imx-audio-es8328";
+ model = "imx-audio-es8328";
+ ssi-controller = <&ssi1>;
+ audio-codec = <&codec>;
+ jack-gpio = <&gpio5 15 0>;
+ audio-amp-supply = <&reg_audio_amp>;
+ audio-routing =
+ "Speaker", "LOUT2",
+ "Speaker", "ROUT2",
+ "Speaker", "audio-amp",
+ "Headphone", "ROUT1",
+ "Headphone", "LOUT1",
+ "LINPUT1", "Mic Jack",
+ "RINPUT1", "Mic Jack",
+ "Mic Jack", "Mic Bias";
+ mux-int-port = <1>;
+ mux-ext-port = <3>;
+};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.txt
index 9c7c55c71370..c949abc2992f 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.txt
@@ -25,6 +25,7 @@ Required properties:
Optional properties:
- nvidia,hp-det-gpios : The GPIO that detect headphones are plugged in
+- nvidia,mic-det-gpios : The GPIO that detect microphones are plugged in
Example:
diff --git a/Documentation/devicetree/bindings/sound/rt5677.txt b/Documentation/devicetree/bindings/sound/rt5677.txt
new file mode 100644
index 000000000000..0701b834fc73
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rt5677.txt
@@ -0,0 +1,59 @@
+RT5677 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+- compatible : "realtek,rt5677".
+
+- reg : The I2C address of the device.
+
+- interrupts : The CODEC's interrupt output.
+
+- gpio-controller : Indicates this device is a GPIO controller.
+
+- #gpio-cells : Should be two. The first cell is the pin number and the
+ second cell is used to specify optional parameters (currently unused).
+
+Optional properties:
+
+- realtek,pow-ldo2-gpio : The GPIO that controls the CODEC's POW_LDO2 pin.
+
+- realtek,in1-differential
+- realtek,in2-differential
+- realtek,lout1-differential
+- realtek,lout2-differential
+- realtek,lout3-differential
+ Boolean. Indicate MIC1/2 input and LOUT1/2/3 outputs are differential,
+ rather than single-ended.
+
+Pins on the device (for linking into audio routes):
+
+ * IN1P
+ * IN1N
+ * IN2P
+ * IN2N
+ * MICBIAS1
+ * DMIC1
+ * DMIC2
+ * DMIC3
+ * DMIC4
+ * LOUT1
+ * LOUT2
+ * LOUT3
+
+Example:
+
+rt5677 {
+ compatible = "realtek,rt5677";
+ reg = <0x2c>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(W, 3) GPIO_ACTIVE_HIGH>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ realtek,pow-ldo2-gpio =
+ <&gpio TEGRA_GPIO(V, 3) GPIO_ACTIVE_HIGH>;
+ realtek,in1-differential = "true";
+};
diff --git a/Documentation/devicetree/bindings/sound/simple-card.txt b/Documentation/devicetree/bindings/sound/simple-card.txt
index c2e9841dfce4..c3cba600bf11 100644
--- a/Documentation/devicetree/bindings/sound/simple-card.txt
+++ b/Documentation/devicetree/bindings/sound/simple-card.txt
@@ -17,6 +17,10 @@ Optional properties:
source.
- simple-audio-card,mclk-fs : Multiplication factor between stream rate and codec
mclk.
+- simple-audio-card,hp-det-gpio : Reference to GPIO that signals when
+ headphones are attached.
+- simple-audio-card,mic-det-gpio : Reference to GPIO that signals when
+ a microphone is attached.
Optional subnodes:
diff --git a/Documentation/devicetree/bindings/sound/ssm4567.txt b/Documentation/devicetree/bindings/sound/ssm4567.txt
new file mode 100644
index 000000000000..ec3d9e7004b5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ssm4567.txt
@@ -0,0 +1,15 @@
+Analog Devices SSM4567 audio amplifier
+
+This device supports I2C only.
+
+Required properties:
+ - compatible : Must be "adi,ssm4567"
+ - reg : the I2C address of the device. This will either be 0x34 (LR_SEL/ADDR connected to AGND),
+ 0x35 (LR_SEL/ADDR connected to IOVDD) or 0x36 (LR_SEL/ADDR open).
+
+Example:
+
+ ssm4567: ssm4567@34 {
+ compatible = "adi,ssm4567";
+ reg = <0x34>;
+ };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 653beaa392dc..f67e3f84e8bc 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -46,11 +46,13 @@ dmo Data Modul AG
ebv EBV Elektronik
edt Emerging Display Technologies
emmicro EM Microelectronic
+energymicro Silicon Laboratories (formerly Energy Micro AS)
epcos EPCOS AG
epfl Ecole Polytechnique Fédérale de Lausanne
epson Seiko Epson Corp.
est ESTeem Wireless Modems
eukrea Eukréa Electromatique
+everest Everest Semiconductor Co. Ltd.
excito Excito
fcs Fairchild Semiconductor
fsl Freescale Semiconductor
@@ -61,6 +63,7 @@ globalscale Globalscale Technologies, Inc.
gmt Global Mixed-mode Technology, Inc.
google Google, Inc.
gumstix Gumstix, Inc.
+gw Gateworks Corporation
haoyu Haoyu Microelectronic Co. Ltd.
hisilicon Hisilicon Limited.
honeywell Honeywell
@@ -70,6 +73,7 @@ ibm International Business Machines (IBM)
idt Integrated Device Technologies, Inc.
iom Iomega Corporation
img Imagination Technologies Ltd.
+innolux Innolux Corporation
intel Intel Corporation
intercontrol Inter Control Group
isee ISEE 2007 S.L.
@@ -131,6 +135,7 @@ simtek
sii Seiko Instruments, Inc.
silergy Silergy Corp.
sirf SiRF Technology, Inc.
+sitronix Sitronix Technology Corporation
smsc Standard Microsystems Corporation
snps Synopsys, Inc.
solidrun SolidRun
diff --git a/Documentation/devicetree/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt
index 1f013bd0d320..77685185cf3b 100644
--- a/Documentation/devicetree/booting-without-of.txt
+++ b/Documentation/devicetree/booting-without-of.txt
@@ -51,6 +51,8 @@ Table of Contents
VIII - Specifying device power management information (sleep property)
+ IX - Specifying dma bus information
+
Appendix A - Sample SOC node for MPC8540
@@ -1332,6 +1334,57 @@ reasonably grouped in this manner, then create a virtual sleep controller
(similar to an interrupt nexus, except that defining a standardized
sleep-map should wait until its necessity is demonstrated).
+IX - Specifying dma bus information
+
+Some devices may have DMA memory range shifted relatively to the beginning of
+RAM, or even placed outside of kernel RAM. For example, the Keystone 2 SoC
+worked in LPAE mode with 4G memory has:
+- RAM range: [0x8 0000 0000, 0x8 FFFF FFFF]
+- DMA range: [ 0x8000 0000, 0xFFFF FFFF]
+and DMA range is aliased into first 2G of RAM in HW.
+
+In such cases, DMA addresses translation should be performed between CPU phys
+and DMA addresses. The "dma-ranges" property is intended to be used
+for describing the configuration of such system in DT.
+
+In addition, each DMA master device on the DMA bus may or may not support
+coherent DMA operations. The "dma-coherent" property is intended to be used
+for identifying devices supported coherent DMA operations in DT.
+
+* DMA Bus master
+Optional property:
+- dma-ranges: <prop-encoded-array> encoded as arbitrary number of triplets of
+ (child-bus-address, parent-bus-address, length). Each triplet specified
+ describes a contiguous DMA address range.
+ The dma-ranges property is used to describe the direct memory access (DMA)
+ structure of a memory-mapped bus whose device tree parent can be accessed
+ from DMA operations originating from the bus. It provides a means of
+ defining a mapping or translation between the physical address space of
+ the bus and the physical address space of the parent of the bus.
+ (for more information see ePAPR specification)
+
+* DMA Bus child
+Optional property:
+- dma-ranges: <empty> value. if present - It means that DMA addresses
+ translation has to be enabled for this device.
+- dma-coherent: Present if dma operations are coherent
+
+Example:
+soc {
+ compatible = "ti,keystone","simple-bus";
+ ranges = <0x0 0x0 0x0 0xc0000000>;
+ dma-ranges = <0x80000000 0x8 0x00000000 0x80000000>;
+
+ [...]
+
+ usb: usb@2680000 {
+ compatible = "ti,keystone-dwc3";
+
+ [...]
+ dma-coherent;
+ };
+};
+
Appendix A - Sample SOC node for MPC8540
========================================
diff --git a/Documentation/devicetree/dynamic-resolution-notes.txt b/Documentation/devicetree/dynamic-resolution-notes.txt
new file mode 100644
index 000000000000..083d23262abe
--- /dev/null
+++ b/Documentation/devicetree/dynamic-resolution-notes.txt
@@ -0,0 +1,25 @@
+Device Tree Dynamic Resolver Notes
+----------------------------------
+
+This document describes the implementation of the in-kernel
+Device Tree resolver, residing in drivers/of/resolver.c and is a
+companion document to Documentation/devicetree/dt-object-internal.txt[1]
+
+How the resolver works
+----------------------
+
+The resolver is given as an input an arbitrary tree compiled with the
+proper dtc option and having a /plugin/ tag. This generates the
+appropriate __fixups__ & __local_fixups__ nodes as described in [1].
+
+In sequence the resolver works by the following steps:
+
+1. Get the maximum device tree phandle value from the live tree + 1.
+2. Adjust all the local phandles of the tree to resolve by that amount.
+3. Using the __local__fixups__ node information adjust all local references
+ by the same amount.
+4. For each property in the __fixups__ node locate the node it references
+ in the live tree. This is the label used to tag the node.
+5. Retrieve the phandle of the target of the fixup.
+6. For each fixup in the property locate the node:property:offset location
+ and replace it with the phandle value.
diff --git a/Documentation/devicetree/of_selftest.txt b/Documentation/devicetree/of_selftest.txt
index 3a2f54d07fc5..1e3d5c92b5e3 100644
--- a/Documentation/devicetree/of_selftest.txt
+++ b/Documentation/devicetree/of_selftest.txt
@@ -67,14 +67,14 @@ struct device_node {
...
};
-Figure 1, describes a generic structure of machine’s un-flattened device tree
+Figure 1, describes a generic structure of machine's un-flattened device tree
considering only child and sibling pointers. There exists another pointer,
*parent, that is used to traverse the tree in the reverse direction. So, at
a particular level the child node and all the sibling nodes will have a parent
-pointer pointing to a common node (e.g. child1, sibling2, sibling3, sibling4’s
+pointer pointing to a common node (e.g. child1, sibling2, sibling3, sibling4's
parent points to root node)
-root (‘/’)
+root ('/')
|
child1 -> sibling2 -> sibling3 -> sibling4 -> null
| | | |
@@ -113,8 +113,8 @@ via the following kernel symbols:
__dtb_testcases_begin - address marking the start of test data blob
__dtb_testcases_end - address marking the end of test data blob
-Secondly, it calls of_fdt_unflatten_device_tree() to unflatten the flattened
-blob. And finally, if the machine’s device tree (i.e live tree) is present,
+Secondly, it calls of_fdt_unflatten_tree() to unflatten the flattened
+blob. And finally, if the machine's device tree (i.e live tree) is present,
then it attaches the unflattened test data tree to the live tree, else it
attaches itself as a live device tree.
@@ -122,7 +122,7 @@ attach_node_and_children() uses of_attach_node() to attach the nodes into the
live tree as explained below. To explain the same, the test data tree described
in Figure 2 is attached to the live tree described in Figure 1.
-root (‘/’)
+root ('/')
|
testcase-data
|
@@ -138,8 +138,8 @@ root->testcase-data->test-child0->test-child01->test-sibling1->test-sibling2
Figure 2: Example test data tree to be attached to live tree.
-According to the scenario above, the live tree is already present so it isn’t
-required to attach the root(‘/’) node. All other nodes are attached by calling
+According to the scenario above, the live tree is already present so it isn't
+required to attach the root('/') node. All other nodes are attached by calling
of_attach_node() on each node.
In the function of_attach_node(), the new node is attached as the child of the
@@ -148,7 +148,7 @@ replaces the current child and turns it into its sibling. So, when the testcase
data node is attached to the live tree above (Figure 1), the final structure is
as shown in Figure 3.
-root (‘/’)
+root ('/')
|
testcase-data -> child1 -> sibling2 -> sibling3 -> sibling4 -> null
| | | | |
@@ -170,7 +170,7 @@ testcase-data -> child1 -> sibling2 -> sibling3 -> sibling4 -> null
null
-----------------------------------------------------------------------
-root (‘/’)
+root ('/')
|
testcase-data -> child1 -> sibling2 -> sibling3 -> sibling4 -> null
| | | | |
@@ -191,8 +191,8 @@ test-child0 the test-sibling1 is attached that pushes the child node
as mentioned above.
If a duplicate node is found (i.e. if a node with same full_name property is
-already present in the live tree), then the node isn’t attached rather its
-properties are updated to the live tree’s node by calling the function
+already present in the live tree), then the node isn't attached rather its
+properties are updated to the live tree's node by calling the function
update_node_properties().
@@ -205,7 +205,7 @@ whole tree). selftest_data_remove() calls detach_node_and_children() that uses
of_detach_node() to detach the nodes from the live device tree.
To detach a node, of_detach_node() first updates all_next linked list, by
-attaching the previous node’s allnext to current node’s allnext pointer. And
-then, it either updates the child pointer of given node’s parent to its
-sibling or attaches the previous sibling to the given node’s sibling, as
+attaching the previous node's allnext to current node's allnext pointer. And
+then, it either updates the child pointer of given node's parent to its
+sibling or attaches the previous sibling to the given node's sibling, as
appropriate. That is it :)
diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware
index 26c623dd3aa3..91b43d2738c7 100755
--- a/Documentation/dvb/get_dvb_firmware
+++ b/Documentation/dvb/get_dvb_firmware
@@ -708,23 +708,25 @@ sub drxk_terratec_htc_stick {
}
sub it9135 {
- my $sourcefile = "dvb-usb-it9135.zip";
- my $url = "http://www.ite.com.tw/uploads/firmware/v3.6.0.0/$sourcefile";
- my $hash = "1e55f6c8833f1d0ae067c2bb2953e6a9";
- my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 0);
- my $outfile = "dvb-usb-it9135.fw";
+ my $url = "http://www.ite.com.tw/uploads/firmware/v3.25.0.0/";
+ my $file1 = "dvb-usb-it9135-01.zip";
my $fwfile1 = "dvb-usb-it9135-01.fw";
+ my $hash1 = "02fcf11174eda84745dae7e61c5ff9ba";
+ my $file2 = "dvb-usb-it9135-02.zip";
my $fwfile2 = "dvb-usb-it9135-02.fw";
+ my $hash2 = "d5e1437dc24358578e07999475d4cac9";
checkstandard();
- wgetfile($sourcefile, $url);
- unzip($sourcefile, $tmpdir);
- verify("$tmpdir/$outfile", $hash);
- extract("$tmpdir/$outfile", 64, 8128, "$fwfile1");
- extract("$tmpdir/$outfile", 12866, 5817, "$fwfile2");
+ wgetfile($file1, $url . $file1);
+ unzip($file1, "");
+ verify("$fwfile1", $hash1);
+
+ wgetfile($file2, $url . $file2);
+ unzip($file2, "");
+ verify("$fwfile2", $hash2);
- "$fwfile1 $fwfile2"
+ "$file1 $file2"
}
sub tda10071 {
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index f1997e9da61f..94d93b1f8b53 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -464,15 +464,12 @@ prototypes:
size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *,
size_t, unsigned int);
- int (*setlease)(struct file *, long, struct file_lock **);
+ int (*setlease)(struct file *, long, struct file_lock **, void **);
long (*fallocate)(struct file *, int, loff_t, loff_t);
};
locking rules:
- All may block except for ->setlease.
- No VFS locks held on entry except for ->setlease.
-
-->setlease has the file_list_lock held and must not sleep.
+ All may block.
->llseek() locking has moved from llseek to the individual llseek
implementations. If your fs is not using generic_file_llseek, you
@@ -496,6 +493,10 @@ components. And there are other reasons why the current interface is a mess...
->read on directories probably must go away - we should just enforce -EISDIR
in sys_read() and friends.
+->setlease operations should call generic_setlease() before or after setting
+the lease within the individual filesystem to record the result of the
+operation
+
--------------------------- dquot_operations -------------------------------
prototypes:
int (*write_dquot) (struct dquot *);
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 61d65cc65c54..fceff7c00a3c 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -237,7 +237,7 @@ noted. This means that most methods can block safely. All methods are
only called from a process context (i.e. not from an interrupt handler
or bottom half).
- alloc_inode: this method is called by inode_alloc() to allocate memory
+ alloc_inode: this method is called by alloc_inode() to allocate memory
for struct inode and initialize it. If this function is not
defined, a simple 'struct inode' is allocated. Normally
alloc_inode will be used to allocate a larger structure which
@@ -826,7 +826,7 @@ struct file_operations {
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, struct pipe_inode_info *, size_t, unsigned int);
- int (*setlease)(struct file *, long arg, struct file_lock **);
+ int (*setlease)(struct file *, long arg, struct file_lock **, void **);
long (*fallocate)(struct file *, int mode, loff_t offset, loff_t len);
int (*show_fdinfo)(struct seq_file *m, struct file *f);
};
@@ -895,8 +895,9 @@ otherwise noted.
splice_read: called by the VFS to splice data from file to a pipe. This
method is used by the splice(2) system call
- setlease: called by the VFS to set or release a file lock lease.
- setlease has the file_lock_lock held and must not sleep.
+ setlease: called by the VFS to set or release a file lock lease. setlease
+ implementations should call generic_setlease to record or remove
+ the lease in the inode after setting it.
fallocate: called by the VFS to preallocate blocks or punch a hole.
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp
index ee6d30ec1522..254d2f55345a 100644
--- a/Documentation/hwmon/k10temp
+++ b/Documentation/hwmon/k10temp
@@ -11,7 +11,7 @@ Supported chips:
Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
* AMD Family 12h processors: "Llano" (E2/A4/A6/A8-Series)
* AMD Family 14h processors: "Brazos" (C/E/G/Z-Series)
-* AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity", "Kaveri"
+* AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity", "Kaveri", "Carrizo"
* AMD Family 16h processors: "Kabini", "Mullins"
Prefix: 'k10temp'
diff --git a/Documentation/hwmon/menf21bmc b/Documentation/hwmon/menf21bmc
new file mode 100644
index 000000000000..2a273a065c5e
--- /dev/null
+++ b/Documentation/hwmon/menf21bmc
@@ -0,0 +1,50 @@
+Kernel driver menf21bmc_hwmon
+=============================
+
+Supported chips:
+ * MEN 14F021P00
+ Prefix: 'menf21bmc_hwmon'
+ Adresses scanned: -
+
+Author: Andreas Werner <andreas.werner@men.de>
+
+Description
+-----------
+
+The menf21bmc is a Board Management Controller (BMC) which provides an I2C
+interface to the host to access the features implemented in the BMC.
+
+This driver gives access to the voltage monitoring feature of the main
+voltages of the board.
+The voltage sensors are connected to the ADC inputs of the BMC which is
+a PIC16F917 Mikrocontroller.
+
+Usage Notes
+-----------
+
+This driver is part of the MFD driver named "menf21bmc" and does
+not auto-detect devices.
+You will have to instantiate the MFD driver explicitly.
+Please see Documentation/i2c/instantiating-devices for
+details.
+
+Sysfs entries
+-------------
+
+The following attributes are supported. All attributes are read only
+The Limits are read once by the driver.
+
+in0_input +3.3V input voltage
+in1_input +5.0V input voltage
+in2_input +12.0V input voltage
+in3_input +5V Standby input voltage
+in4_input VBAT (on board battery)
+
+in[0-4]_min Minimum voltage limit
+in[0-4]_max Maximum voltage limit
+
+in0_label "MON_3_3V"
+in1_label "MON_5V"
+in2_label "MON_12V"
+in3_label "5V_STANDBY"
+in4_label "VBAT"
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 7e240a7c9ab1..8136e1fd30fd 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -313,6 +313,7 @@ Code Seq#(hex) Include File Comments
0xB1 00-1F PPPoX <mailto:mostrows@styx.uwaterloo.ca>
0xB3 00 linux/mmc/ioctl.h
0xC0 00-0F linux/usb/iowarrior.h
+0xCA 00-0F uapi/misc/cxl.h
0xCB 00-1F CBM serial IEC bus in development:
<mailto:michael.klein@puffin.lb.shuttle.de>
0xCD 01 linux/reiserfs_fs.h
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 41f7ec1fcf61..04e9f5505faa 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1323,7 +1323,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Set number of hash buckets for inode cache.
ima_appraise= [IMA] appraise integrity measurements
- Format: { "off" | "enforce" | "fix" }
+ Format: { "off" | "enforce" | "fix" | "log" }
default: "enforce"
ima_appraise_tcb [IMA]
@@ -1723,6 +1723,49 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
lockd.nlm_udpport=M [NFS] Assign UDP port.
Format: <integer>
+ locktorture.nreaders_stress= [KNL]
+ Set the number of locking read-acquisition kthreads.
+ Defaults to being automatically set based on the
+ number of online CPUs.
+
+ locktorture.nwriters_stress= [KNL]
+ Set the number of locking write-acquisition kthreads.
+
+ locktorture.onoff_holdoff= [KNL]
+ Set time (s) after boot for CPU-hotplug testing.
+
+ locktorture.onoff_interval= [KNL]
+ Set time (s) between CPU-hotplug operations, or
+ zero to disable CPU-hotplug testing.
+
+ locktorture.shuffle_interval= [KNL]
+ Set task-shuffle interval (jiffies). Shuffling
+ tasks allows some CPUs to go into dyntick-idle
+ mode during the locktorture test.
+
+ locktorture.shutdown_secs= [KNL]
+ Set time (s) after boot system shutdown. This
+ is useful for hands-off automated testing.
+
+ locktorture.stat_interval= [KNL]
+ Time (s) between statistics printk()s.
+
+ locktorture.stutter= [KNL]
+ Time (s) to stutter testing, for example,
+ specifying five seconds causes the test to run for
+ five seconds, wait for five seconds, and so on.
+ This tests the locking primitive's ability to
+ transition abruptly to and from idle.
+
+ locktorture.torture_runnable= [BOOT]
+ Start locktorture running at boot time.
+
+ locktorture.torture_type= [KNL]
+ Specify the locking implementation to test.
+
+ locktorture.verbose= [KNL]
+ Enable additional printk() statements.
+
logibm.irq= [HW,MOUSE] Logitech Bus Mouse Driver
Format: <irq>
@@ -2900,6 +2943,24 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Lazy RCU callbacks are those which RCU can
prove do nothing more than free memory.
+ rcutorture.cbflood_inter_holdoff= [KNL]
+ Set holdoff time (jiffies) between successive
+ callback-flood tests.
+
+ rcutorture.cbflood_intra_holdoff= [KNL]
+ Set holdoff time (jiffies) between successive
+ bursts of callbacks within a given callback-flood
+ test.
+
+ rcutorture.cbflood_n_burst= [KNL]
+ Set the number of bursts making up a given
+ callback-flood test. Set this to zero to
+ disable callback-flood testing.
+
+ rcutorture.cbflood_n_per_burst= [KNL]
+ Set the number of callbacks to be registered
+ in a given burst of a callback-flood test.
+
rcutorture.fqs_duration= [KNL]
Set duration of force_quiescent_state bursts.
@@ -2939,7 +3000,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Set time (s) between CPU-hotplug operations, or
zero to disable CPU-hotplug testing.
- rcutorture.rcutorture_runnable= [BOOT]
+ rcutorture.torture_runnable= [BOOT]
Start rcutorture running at boot time.
rcutorture.shuffle_interval= [KNL]
@@ -3001,6 +3062,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
rcupdate.rcu_cpu_stall_timeout= [KNL]
Set timeout for RCU CPU stall warning messages.
+ rcupdate.rcu_task_stall_timeout= [KNL]
+ Set timeout in jiffies for RCU task stall warning
+ messages. Disable with a value less than or equal
+ to zero.
+
rdinit= [KNL]
Format: <full_path>
Run specified binary instead of /init from the ramdisk,
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 4bbeca8483ed..4227ec2e3ab2 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -300,6 +300,7 @@ architectures:
- arm
- ppc
- mips
+- s390
3. Configuring Kprobes
diff --git a/Documentation/lockdep-design.txt b/Documentation/locking/lockdep-design.txt
index 5dbc99c04f6e..5dbc99c04f6e 100644
--- a/Documentation/lockdep-design.txt
+++ b/Documentation/locking/lockdep-design.txt
diff --git a/Documentation/lockstat.txt b/Documentation/locking/lockstat.txt
index 72d010689751..7428773a1e69 100644
--- a/Documentation/lockstat.txt
+++ b/Documentation/locking/lockstat.txt
@@ -12,7 +12,7 @@ Because things like lock contention can severely impact performance.
- HOW
Lockdep already has hooks in the lock functions and maps lock instances to
-lock classes. We build on that (see Documentation/lockdep-design.txt).
+lock classes. We build on that (see Documentation/lokcing/lockdep-design.txt).
The graph below shows the relation between the lock functions and the various
hooks therein.
diff --git a/Documentation/locking/locktorture.txt b/Documentation/locking/locktorture.txt
new file mode 100644
index 000000000000..619f2bb136a5
--- /dev/null
+++ b/Documentation/locking/locktorture.txt
@@ -0,0 +1,147 @@
+Kernel Lock Torture Test Operation
+
+CONFIG_LOCK_TORTURE_TEST
+
+The CONFIG LOCK_TORTURE_TEST config option provides a kernel module
+that runs torture tests on core kernel locking primitives. The kernel
+module, 'locktorture', may be built after the fact on the running
+kernel to be tested, if desired. The tests periodically output status
+messages via printk(), which can be examined via the dmesg (perhaps
+grepping for "torture"). The test is started when the module is loaded,
+and stops when the module is unloaded. This program is based on how RCU
+is tortured, via rcutorture.
+
+This torture test consists of creating a number of kernel threads which
+acquire the lock and hold it for specific amount of time, thus simulating
+different critical region behaviors. The amount of contention on the lock
+can be simulated by either enlarging this critical region hold time and/or
+creating more kthreads.
+
+
+MODULE PARAMETERS
+
+This module has the following parameters:
+
+
+ ** Locktorture-specific **
+
+nwriters_stress Number of kernel threads that will stress exclusive lock
+ ownership (writers). The default value is twice the number
+ of online CPUs.
+
+nreaders_stress Number of kernel threads that will stress shared lock
+ ownership (readers). The default is the same amount of writer
+ locks. If the user did not specify nwriters_stress, then
+ both readers and writers be the amount of online CPUs.
+
+torture_type Type of lock to torture. By default, only spinlocks will
+ be tortured. This module can torture the following locks,
+ with string values as follows:
+
+ o "lock_busted": Simulates a buggy lock implementation.
+
+ o "spin_lock": spin_lock() and spin_unlock() pairs.
+
+ o "spin_lock_irq": spin_lock_irq() and spin_unlock_irq()
+ pairs.
+
+ o "rw_lock": read/write lock() and unlock() rwlock pairs.
+
+ o "rw_lock_irq": read/write lock_irq() and unlock_irq()
+ rwlock pairs.
+
+ o "mutex_lock": mutex_lock() and mutex_unlock() pairs.
+
+ o "rwsem_lock": read/write down() and up() semaphore pairs.
+
+torture_runnable Start locktorture at boot time in the case where the
+ module is built into the kernel, otherwise wait for
+ torture_runnable to be set via sysfs before starting.
+ By default it will begin once the module is loaded.
+
+
+ ** Torture-framework (RCU + locking) **
+
+shutdown_secs The number of seconds to run the test before terminating
+ the test and powering off the system. The default is
+ zero, which disables test termination and system shutdown.
+ This capability is useful for automated testing.
+
+onoff_interval The number of seconds between each attempt to execute a
+ randomly selected CPU-hotplug operation. Defaults
+ to zero, which disables CPU hotplugging. In
+ CONFIG_HOTPLUG_CPU=n kernels, locktorture will silently
+ refuse to do any CPU-hotplug operations regardless of
+ what value is specified for onoff_interval.
+
+onoff_holdoff The number of seconds to wait until starting CPU-hotplug
+ operations. This would normally only be used when
+ locktorture was built into the kernel and started
+ automatically at boot time, in which case it is useful
+ in order to avoid confusing boot-time code with CPUs
+ coming and going. This parameter is only useful if
+ CONFIG_HOTPLUG_CPU is enabled.
+
+stat_interval Number of seconds between statistics-related printk()s.
+ By default, locktorture will report stats every 60 seconds.
+ Setting the interval to zero causes the statistics to
+ be printed -only- when the module is unloaded, and this
+ is the default.
+
+stutter The length of time to run the test before pausing for this
+ same period of time. Defaults to "stutter=5", so as
+ to run and pause for (roughly) five-second intervals.
+ Specifying "stutter=0" causes the test to run continuously
+ without pausing, which is the old default behavior.
+
+shuffle_interval The number of seconds to keep the test threads affinitied
+ to a particular subset of the CPUs, defaults to 3 seconds.
+ Used in conjunction with test_no_idle_hz.
+
+verbose Enable verbose debugging printing, via printk(). Enabled
+ by default. This extra information is mostly related to
+ high-level errors and reports from the main 'torture'
+ framework.
+
+
+STATISTICS
+
+Statistics are printed in the following format:
+
+spin_lock-torture: Writes: Total: 93746064 Max/Min: 0/0 Fail: 0
+ (A) (B) (C) (D) (E)
+
+(A): Lock type that is being tortured -- torture_type parameter.
+
+(B): Number of writer lock acquisitions. If dealing with a read/write primitive
+ a second "Reads" statistics line is printed.
+
+(C): Number of times the lock was acquired.
+
+(D): Min and max number of times threads failed to acquire the lock.
+
+(E): true/false values if there were errors acquiring the lock. This should
+ -only- be positive if there is a bug in the locking primitive's
+ implementation. Otherwise a lock should never fail (i.e., spin_lock()).
+ Of course, the same applies for (C), above. A dummy example of this is
+ the "lock_busted" type.
+
+USAGE
+
+The following script may be used to torture locks:
+
+ #!/bin/sh
+
+ modprobe locktorture
+ sleep 3600
+ rmmod locktorture
+ dmesg | grep torture:
+
+The output can be manually inspected for the error flag of "!!!".
+One could of course create a more elaborate script that automatically
+checked for such errors. The "rmmod" command forces a "SUCCESS",
+"FAILURE", or "RCU_HOTPLUG" indication to be printk()ed. The first
+two are self-explanatory, while the last indicates that while there
+were no locking failures, CPU-hotplug problems were detected.
+
+Also see: Documentation/RCU/torture.txt
diff --git a/Documentation/mutex-design.txt b/Documentation/locking/mutex-design.txt
index ee231ed09ec6..60c482df1a38 100644
--- a/Documentation/mutex-design.txt
+++ b/Documentation/locking/mutex-design.txt
@@ -145,9 +145,9 @@ Disadvantages
Unlike its original design and purpose, 'struct mutex' is larger than
most locks in the kernel. E.g: on x86-64 it is 40 bytes, almost twice
-as large as 'struct semaphore' (24 bytes) and 8 bytes shy of the
-'struct rw_semaphore' variant. Larger structure sizes mean more CPU
-cache and memory footprint.
+as large as 'struct semaphore' (24 bytes) and tied, along with rwsems,
+for the largest lock in the kernel. Larger structure sizes mean more
+CPU cache and memory footprint.
When to use mutexes
-------------------
diff --git a/Documentation/rt-mutex-design.txt b/Documentation/locking/rt-mutex-design.txt
index 8666070d3189..8666070d3189 100644
--- a/Documentation/rt-mutex-design.txt
+++ b/Documentation/locking/rt-mutex-design.txt
diff --git a/Documentation/rt-mutex.txt b/Documentation/locking/rt-mutex.txt
index 243393d882ee..243393d882ee 100644
--- a/Documentation/rt-mutex.txt
+++ b/Documentation/locking/rt-mutex.txt
diff --git a/Documentation/spinlocks.txt b/Documentation/locking/spinlocks.txt
index 97eaf5727178..ff35e40bdf5b 100644
--- a/Documentation/spinlocks.txt
+++ b/Documentation/locking/spinlocks.txt
@@ -105,9 +105,9 @@ never used in interrupt handlers, you can use the non-irq versions:
spin_unlock(&lock);
(and the equivalent read-write versions too, of course). The spinlock will
-guarantee the same kind of exclusive access, and it will be much faster.
+guarantee the same kind of exclusive access, and it will be much faster.
This is useful if you know that the data in question is only ever
-manipulated from a "process context", ie no interrupts involved.
+manipulated from a "process context", ie no interrupts involved.
The reasons you mustn't use these versions if you have interrupts that
play with the spinlock is that you can get deadlocks:
@@ -122,21 +122,21 @@ the other interrupt happens on another CPU, but it is _not_ ok if the
interrupt happens on the same CPU that already holds the lock, because the
lock will obviously never be released (because the interrupt is waiting
for the lock, and the lock-holder is interrupted by the interrupt and will
-not continue until the interrupt has been processed).
+not continue until the interrupt has been processed).
(This is also the reason why the irq-versions of the spinlocks only need
to disable the _local_ interrupts - it's ok to use spinlocks in interrupts
on other CPU's, because an interrupt on another CPU doesn't interrupt the
CPU that holds the lock, so the lock-holder can continue and eventually
-releases the lock).
+releases the lock).
Note that you can be clever with read-write locks and interrupts. For
example, if you know that the interrupt only ever gets a read-lock, then
you can use a non-irq version of read locks everywhere - because they
-don't block on each other (and thus there is no dead-lock wrt interrupts.
-But when you do the write-lock, you have to use the irq-safe version.
+don't block on each other (and thus there is no dead-lock wrt interrupts.
+But when you do the write-lock, you have to use the irq-safe version.
-For an example of being clever with rw-locks, see the "waitqueue_lock"
+For an example of being clever with rw-locks, see the "waitqueue_lock"
handling in kernel/sched/core.c - nothing ever _changes_ a wait-queue from
within an interrupt, they only read the queue in order to know whom to
wake up. So read-locks are safe (which is good: they are very common
diff --git a/Documentation/ww-mutex-design.txt b/Documentation/locking/ww-mutex-design.txt
index 8a112dc304c3..8a112dc304c3 100644
--- a/Documentation/ww-mutex-design.txt
+++ b/Documentation/locking/ww-mutex-design.txt
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index a4de88fb55f0..22a969cdd476 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -574,30 +574,14 @@ However, stores are not speculated. This means that ordering -is- provided
in the following example:
q = ACCESS_ONCE(a);
- if (ACCESS_ONCE(q)) {
- ACCESS_ONCE(b) = p;
- }
-
-Please note that ACCESS_ONCE() is not optional! Without the ACCESS_ONCE(),
-the compiler is within its rights to transform this example:
-
- q = a;
if (q) {
- b = p; /* BUG: Compiler can reorder!!! */
- do_something();
- } else {
- b = p; /* BUG: Compiler can reorder!!! */
- do_something_else();
+ ACCESS_ONCE(b) = p;
}
-into this, which of course defeats the ordering:
-
- b = p;
- q = a;
- if (q)
- do_something();
- else
- do_something_else();
+Please note that ACCESS_ONCE() is not optional! Without the
+ACCESS_ONCE(), might combine the load from 'a' with other loads from
+'a', and the store to 'b' with other stores to 'b', with possible highly
+counterintuitive effects on ordering.
Worse yet, if the compiler is able to prove (say) that the value of
variable 'a' is always non-zero, it would be well within its rights
@@ -605,11 +589,12 @@ to optimize the original example by eliminating the "if" statement
as follows:
q = a;
- b = p; /* BUG: Compiler can reorder!!! */
- do_something();
+ b = p; /* BUG: Compiler and CPU can both reorder!!! */
+
+So don't leave out the ACCESS_ONCE().
-The solution is again ACCESS_ONCE() and barrier(), which preserves the
-ordering between the load from variable 'a' and the store to variable 'b':
+It is tempting to try to enforce ordering on identical stores on both
+branches of the "if" statement as follows:
q = ACCESS_ONCE(a);
if (q) {
@@ -622,18 +607,11 @@ ordering between the load from variable 'a' and the store to variable 'b':
do_something_else();
}
-The initial ACCESS_ONCE() is required to prevent the compiler from
-proving the value of 'a', and the pair of barrier() invocations are
-required to prevent the compiler from pulling the two identical stores
-to 'b' out from the legs of the "if" statement.
-
-It is important to note that control dependencies absolutely require a
-a conditional. For example, the following "optimized" version of
-the above example breaks ordering, which is why the barrier() invocations
-are absolutely required if you have identical stores in both legs of
-the "if" statement:
+Unfortunately, current compilers will transform this as follows at high
+optimization levels:
q = ACCESS_ONCE(a);
+ barrier();
ACCESS_ONCE(b) = p; /* BUG: No ordering vs. load from a!!! */
if (q) {
/* ACCESS_ONCE(b) = p; -- moved up, BUG!!! */
@@ -643,21 +621,36 @@ the "if" statement:
do_something_else();
}
-It is of course legal for the prior load to be part of the conditional,
-for example, as follows:
+Now there is no conditional between the load from 'a' and the store to
+'b', which means that the CPU is within its rights to reorder them:
+The conditional is absolutely required, and must be present in the
+assembly code even after all compiler optimizations have been applied.
+Therefore, if you need ordering in this example, you need explicit
+memory barriers, for example, smp_store_release():
- if (ACCESS_ONCE(a) > 0) {
- barrier();
- ACCESS_ONCE(b) = q / 2;
+ q = ACCESS_ONCE(a);
+ if (q) {
+ smp_store_release(&b, p);
do_something();
} else {
- barrier();
- ACCESS_ONCE(b) = q / 3;
+ smp_store_release(&b, p);
+ do_something_else();
+ }
+
+In contrast, without explicit memory barriers, two-legged-if control
+ordering is guaranteed only when the stores differ, for example:
+
+ q = ACCESS_ONCE(a);
+ if (q) {
+ ACCESS_ONCE(b) = p;
+ do_something();
+ } else {
+ ACCESS_ONCE(b) = r;
do_something_else();
}
-This will again ensure that the load from variable 'a' is ordered before the
-stores to variable 'b'.
+The initial ACCESS_ONCE() is still required to prevent the compiler from
+proving the value of 'a'.
In addition, you need to be careful what you do with the local variable 'q',
otherwise the compiler might be able to guess the value and again remove
@@ -665,12 +658,10 @@ the needed conditional. For example:
q = ACCESS_ONCE(a);
if (q % MAX) {
- barrier();
ACCESS_ONCE(b) = p;
do_something();
} else {
- barrier();
- ACCESS_ONCE(b) = p;
+ ACCESS_ONCE(b) = r;
do_something_else();
}
@@ -682,9 +673,12 @@ transform the above code into the following:
ACCESS_ONCE(b) = p;
do_something_else();
-This transformation loses the ordering between the load from variable 'a'
-and the store to variable 'b'. If you are relying on this ordering, you
-should do something like the following:
+Given this transformation, the CPU is not required to respect the ordering
+between the load from variable 'a' and the store to variable 'b'. It is
+tempting to add a barrier(), but this does not help. The conditional
+is gone, and the barrier won't bring it back. Therefore, if you are
+relying on this ordering, you should make sure that MAX is greater than
+one, perhaps as follows:
q = ACCESS_ONCE(a);
BUILD_BUG_ON(MAX <= 1); /* Order load from a with store to b. */
@@ -692,35 +686,45 @@ should do something like the following:
ACCESS_ONCE(b) = p;
do_something();
} else {
- ACCESS_ONCE(b) = p;
+ ACCESS_ONCE(b) = r;
do_something_else();
}
+Please note once again that the stores to 'b' differ. If they were
+identical, as noted earlier, the compiler could pull this store outside
+of the 'if' statement.
+
Finally, control dependencies do -not- provide transitivity. This is
-demonstrated by two related examples:
+demonstrated by two related examples, with the initial values of
+x and y both being zero:
CPU 0 CPU 1
===================== =====================
r1 = ACCESS_ONCE(x); r2 = ACCESS_ONCE(y);
- if (r1 >= 0) if (r2 >= 0)
+ if (r1 > 0) if (r2 > 0)
ACCESS_ONCE(y) = 1; ACCESS_ONCE(x) = 1;
assert(!(r1 == 1 && r2 == 1));
The above two-CPU example will never trigger the assert(). However,
if control dependencies guaranteed transitivity (which they do not),
-then adding the following two CPUs would guarantee a related assertion:
+then adding the following CPU would guarantee a related assertion:
- CPU 2 CPU 3
- ===================== =====================
- ACCESS_ONCE(x) = 2; ACCESS_ONCE(y) = 2;
+ CPU 2
+ =====================
+ ACCESS_ONCE(x) = 2;
+
+ assert(!(r1 == 2 && r2 == 1 && x == 2)); /* FAILS!!! */
- assert(!(r1 == 2 && r2 == 2 && x == 1 && y == 1)); /* FAILS!!! */
+But because control dependencies do -not- provide transitivity, the above
+assertion can fail after the combined three-CPU example completes. If you
+need the three-CPU example to provide ordering, you will need smp_mb()
+between the loads and stores in the CPU 0 and CPU 1 code fragments,
+that is, just before or just after the "if" statements.
-But because control dependencies do -not- provide transitivity, the
-above assertion can fail after the combined four-CPU example completes.
-If you need the four-CPU example to provide ordering, you will need
-smp_mb() between the loads and stores in the CPU 0 and CPU 1 code fragments.
+These two examples are the LB and WWC litmus tests from this paper:
+http://www.cl.cam.ac.uk/users/pes20/ppc-supplemental/test6.pdf and this
+site: https://www.cl.cam.ac.uk/~pes20/ppcmem/index.html.
In summary:
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index b1935f9ce081..58d08f8d8d80 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -700,11 +700,11 @@ Some core changes of the new internal format:
bpf_exit
If f2 is JITed and the pointer stored to '_f2'. The calls f1 -> f2 -> f3 and
- returns will be seamless. Without JIT, __sk_run_filter() interpreter needs to
+ returns will be seamless. Without JIT, __bpf_prog_run() interpreter needs to
be used to call into f2.
For practical reasons all eBPF programs have only one argument 'ctx' which is
- already placed into R1 (e.g. on __sk_run_filter() startup) and the programs
+ already placed into R1 (e.g. on __bpf_prog_run() startup) and the programs
can call kernel functions with up to 5 arguments. Calls with 6 or more arguments
are currently not supported, but these restrictions can be lifted if necessary
in the future.
diff --git a/Documentation/powerpc/00-INDEX b/Documentation/powerpc/00-INDEX
index a68784d0a1ee..6fd0e8bb8140 100644
--- a/Documentation/powerpc/00-INDEX
+++ b/Documentation/powerpc/00-INDEX
@@ -11,6 +11,8 @@ bootwrapper.txt
cpu_features.txt
- info on how we support a variety of CPUs with minimal compile-time
options.
+cxl.txt
+ - Overview of the CXL driver.
eeh-pci-error-recovery.txt
- info on PCI Bus EEH Error Recovery
firmware-assisted-dump.txt
diff --git a/Documentation/powerpc/cxl.txt b/Documentation/powerpc/cxl.txt
new file mode 100644
index 000000000000..2c71ecc519d9
--- /dev/null
+++ b/Documentation/powerpc/cxl.txt
@@ -0,0 +1,379 @@
+Coherent Accelerator Interface (CXL)
+====================================
+
+Introduction
+============
+
+ The coherent accelerator interface is designed to allow the
+ coherent connection of accelerators (FPGAs and other devices) to a
+ POWER system. These devices need to adhere to the Coherent
+ Accelerator Interface Architecture (CAIA).
+
+ IBM refers to this as the Coherent Accelerator Processor Interface
+ or CAPI. In the kernel it's referred to by the name CXL to avoid
+ confusion with the ISDN CAPI subsystem.
+
+ Coherent in this context means that the accelerator and CPUs can
+ both access system memory directly and with the same effective
+ addresses.
+
+
+Hardware overview
+=================
+
+ POWER8 FPGA
+ +----------+ +---------+
+ | | | |
+ | CPU | | AFU |
+ | | | |
+ | | | |
+ | | | |
+ +----------+ +---------+
+ | PHB | | |
+ | +------+ | PSL |
+ | | CAPP |<------>| |
+ +---+------+ PCIE +---------+
+
+ The POWER8 chip has a Coherently Attached Processor Proxy (CAPP)
+ unit which is part of the PCIe Host Bridge (PHB). This is managed
+ by Linux by calls into OPAL. Linux doesn't directly program the
+ CAPP.
+
+ The FPGA (or coherently attached device) consists of two parts.
+ The POWER Service Layer (PSL) and the Accelerator Function Unit
+ (AFU). The AFU is used to implement specific functionality behind
+ the PSL. The PSL, among other things, provides memory address
+ translation services to allow each AFU direct access to userspace
+ memory.
+
+ The AFU is the core part of the accelerator (eg. the compression,
+ crypto etc function). The kernel has no knowledge of the function
+ of the AFU. Only userspace interacts directly with the AFU.
+
+ The PSL provides the translation and interrupt services that the
+ AFU needs. This is what the kernel interacts with. For example, if
+ the AFU needs to read a particular effective address, it sends
+ that address to the PSL, the PSL then translates it, fetches the
+ data from memory and returns it to the AFU. If the PSL has a
+ translation miss, it interrupts the kernel and the kernel services
+ the fault. The context to which this fault is serviced is based on
+ who owns that acceleration function.
+
+
+AFU Modes
+=========
+
+ There are two programming modes supported by the AFU. Dedicated
+ and AFU directed. AFU may support one or both modes.
+
+ When using dedicated mode only one MMU context is supported. In
+ this mode, only one userspace process can use the accelerator at
+ time.
+
+ When using AFU directed mode, up to 16K simultaneous contexts can
+ be supported. This means up to 16K simultaneous userspace
+ applications may use the accelerator (although specific AFUs may
+ support fewer). In this mode, the AFU sends a 16 bit context ID
+ with each of its requests. This tells the PSL which context is
+ associated with each operation. If the PSL can't translate an
+ operation, the ID can also be accessed by the kernel so it can
+ determine the userspace context associated with an operation.
+
+
+MMIO space
+==========
+
+ A portion of the accelerator MMIO space can be directly mapped
+ from the AFU to userspace. Either the whole space can be mapped or
+ just a per context portion. The hardware is self describing, hence
+ the kernel can determine the offset and size of the per context
+ portion.
+
+
+Interrupts
+==========
+
+ AFUs may generate interrupts that are destined for userspace. These
+ are received by the kernel as hardware interrupts and passed onto
+ userspace by a read syscall documented below.
+
+ Data storage faults and error interrupts are handled by the kernel
+ driver.
+
+
+Work Element Descriptor (WED)
+=============================
+
+ The WED is a 64-bit parameter passed to the AFU when a context is
+ started. Its format is up to the AFU hence the kernel has no
+ knowledge of what it represents. Typically it will be the
+ effective address of a work queue or status block where the AFU
+ and userspace can share control and status information.
+
+
+
+
+User API
+========
+
+ For AFUs operating in AFU directed mode, two character device
+ files will be created. /dev/cxl/afu0.0m will correspond to a
+ master context and /dev/cxl/afu0.0s will correspond to a slave
+ context. Master contexts have access to the full MMIO space an
+ AFU provides. Slave contexts have access to only the per process
+ MMIO space an AFU provides.
+
+ For AFUs operating in dedicated process mode, the driver will
+ only create a single character device per AFU called
+ /dev/cxl/afu0.0d. This will have access to the entire MMIO space
+ that the AFU provides (like master contexts in AFU directed).
+
+ The types described below are defined in include/uapi/misc/cxl.h
+
+ The following file operations are supported on both slave and
+ master devices.
+
+
+open
+----
+
+ Opens the device and allocates a file descriptor to be used with
+ the rest of the API.
+
+ A dedicated mode AFU only has one context and only allows the
+ device to be opened once.
+
+ An AFU directed mode AFU can have many contexts, the device can be
+ opened once for each context that is available.
+
+ When all available contexts are allocated the open call will fail
+ and return -ENOSPC.
+
+ Note: IRQs need to be allocated for each context, which may limit
+ the number of contexts that can be created, and therefore
+ how many times the device can be opened. The POWER8 CAPP
+ supports 2040 IRQs and 3 are used by the kernel, so 2037 are
+ left. If 1 IRQ is needed per context, then only 2037
+ contexts can be allocated. If 4 IRQs are needed per context,
+ then only 2037/4 = 509 contexts can be allocated.
+
+
+ioctl
+-----
+
+ CXL_IOCTL_START_WORK:
+ Starts the AFU context and associates it with the current
+ process. Once this ioctl is successfully executed, all memory
+ mapped into this process is accessible to this AFU context
+ using the same effective addresses. No additional calls are
+ required to map/unmap memory. The AFU memory context will be
+ updated as userspace allocates and frees memory. This ioctl
+ returns once the AFU context is started.
+
+ Takes a pointer to a struct cxl_ioctl_start_work:
+
+ struct cxl_ioctl_start_work {
+ __u64 flags;
+ __u64 work_element_descriptor;
+ __u64 amr;
+ __s16 num_interrupts;
+ __s16 reserved1;
+ __s32 reserved2;
+ __u64 reserved3;
+ __u64 reserved4;
+ __u64 reserved5;
+ __u64 reserved6;
+ };
+
+ flags:
+ Indicates which optional fields in the structure are
+ valid.
+
+ work_element_descriptor:
+ The Work Element Descriptor (WED) is a 64-bit argument
+ defined by the AFU. Typically this is an effective
+ address pointing to an AFU specific structure
+ describing what work to perform.
+
+ amr:
+ Authority Mask Register (AMR), same as the powerpc
+ AMR. This field is only used by the kernel when the
+ corresponding CXL_START_WORK_AMR value is specified in
+ flags. If not specified the kernel will use a default
+ value of 0.
+
+ num_interrupts:
+ Number of userspace interrupts to request. This field
+ is only used by the kernel when the corresponding
+ CXL_START_WORK_NUM_IRQS value is specified in flags.
+ If not specified the minimum number required by the
+ AFU will be allocated. The min and max number can be
+ obtained from sysfs.
+
+ reserved fields:
+ For ABI padding and future extensions
+
+ CXL_IOCTL_GET_PROCESS_ELEMENT:
+ Get the current context id, also known as the process element.
+ The value is returned from the kernel as a __u32.
+
+
+mmap
+----
+
+ An AFU may have an MMIO space to facilitate communication with the
+ AFU. If it does, the MMIO space can be accessed via mmap. The size
+ and contents of this area are specific to the particular AFU. The
+ size can be discovered via sysfs.
+
+ In AFU directed mode, master contexts are allowed to map all of
+ the MMIO space and slave contexts are allowed to only map the per
+ process MMIO space associated with the context. In dedicated
+ process mode the entire MMIO space can always be mapped.
+
+ This mmap call must be done after the START_WORK ioctl.
+
+ Care should be taken when accessing MMIO space. Only 32 and 64-bit
+ accesses are supported by POWER8. Also, the AFU will be designed
+ with a specific endianness, so all MMIO accesses should consider
+ endianness (recommend endian(3) variants like: le64toh(),
+ be64toh() etc). These endian issues equally apply to shared memory
+ queues the WED may describe.
+
+
+read
+----
+
+ Reads events from the AFU. Blocks if no events are pending
+ (unless O_NONBLOCK is supplied). Returns -EIO in the case of an
+ unrecoverable error or if the card is removed.
+
+ read() will always return an integral number of events.
+
+ The buffer passed to read() must be at least 4K bytes.
+
+ The result of the read will be a buffer of one or more events,
+ each event is of type struct cxl_event, of varying size.
+
+ struct cxl_event {
+ struct cxl_event_header header;
+ union {
+ struct cxl_event_afu_interrupt irq;
+ struct cxl_event_data_storage fault;
+ struct cxl_event_afu_error afu_error;
+ };
+ };
+
+ The struct cxl_event_header is defined as:
+
+ struct cxl_event_header {
+ __u16 type;
+ __u16 size;
+ __u16 process_element;
+ __u16 reserved1;
+ };
+
+ type:
+ This defines the type of event. The type determines how
+ the rest of the event is structured. These types are
+ described below and defined by enum cxl_event_type.
+
+ size:
+ This is the size of the event in bytes including the
+ struct cxl_event_header. The start of the next event can
+ be found at this offset from the start of the current
+ event.
+
+ process_element:
+ Context ID of the event.
+
+ reserved field:
+ For future extensions and padding.
+
+ If the event type is CXL_EVENT_AFU_INTERRUPT then the event
+ structure is defined as:
+
+ struct cxl_event_afu_interrupt {
+ __u16 flags;
+ __u16 irq; /* Raised AFU interrupt number */
+ __u32 reserved1;
+ };
+
+ flags:
+ These flags indicate which optional fields are present
+ in this struct. Currently all fields are mandatory.
+
+ irq:
+ The IRQ number sent by the AFU.
+
+ reserved field:
+ For future extensions and padding.
+
+ If the event type is CXL_EVENT_DATA_STORAGE then the event
+ structure is defined as:
+
+ struct cxl_event_data_storage {
+ __u16 flags;
+ __u16 reserved1;
+ __u32 reserved2;
+ __u64 addr;
+ __u64 dsisr;
+ __u64 reserved3;
+ };
+
+ flags:
+ These flags indicate which optional fields are present in
+ this struct. Currently all fields are mandatory.
+
+ address:
+ The address that the AFU unsuccessfully attempted to
+ access. Valid accesses will be handled transparently by the
+ kernel but invalid accesses will generate this event.
+
+ dsisr:
+ This field gives information on the type of fault. It is a
+ copy of the DSISR from the PSL hardware when the address
+ fault occurred. The form of the DSISR is as defined in the
+ CAIA.
+
+ reserved fields:
+ For future extensions
+
+ If the event type is CXL_EVENT_AFU_ERROR then the event structure
+ is defined as:
+
+ struct cxl_event_afu_error {
+ __u16 flags;
+ __u16 reserved1;
+ __u32 reserved2;
+ __u64 error;
+ };
+
+ flags:
+ These flags indicate which optional fields are present in
+ this struct. Currently all fields are Mandatory.
+
+ error:
+ Error status from the AFU. Defined by the AFU.
+
+ reserved fields:
+ For future extensions and padding
+
+Sysfs Class
+===========
+
+ A cxl sysfs class is added under /sys/class/cxl to facilitate
+ enumeration and tuning of the accelerators. Its layout is
+ described in Documentation/ABI/testing/sysfs-class-cxl
+
+Udev rules
+==========
+
+ The following udev rules could be used to create a symlink to the
+ most logical chardev to use in any programming mode (afuX.Yd for
+ dedicated, afuX.Ys for afu directed), since the API is virtually
+ identical for each:
+
+ SUBSYSTEM=="cxl", ATTRS{mode}=="dedicated_process", SYMLINK="cxl/%b"
+ SUBSYSTEM=="cxl", ATTRS{mode}=="afu_directed", \
+ KERNEL=="afu[0-9]*.[0-9]*s", SYMLINK="cxl/%b"
diff --git a/Documentation/scheduler/sched-deadline.txt b/Documentation/scheduler/sched-deadline.txt
index 18adc92a6b3b..21461a0441c1 100644
--- a/Documentation/scheduler/sched-deadline.txt
+++ b/Documentation/scheduler/sched-deadline.txt
@@ -15,6 +15,8 @@ CONTENTS
5. Tasks CPU affinity
5.1 SCHED_DEADLINE and cpusets HOWTO
6. Future plans
+ A. Test suite
+ B. Minimal main()
0. WARNING
@@ -38,24 +40,25 @@ CONTENTS
==================
SCHED_DEADLINE uses three parameters, named "runtime", "period", and
- "deadline" to schedule tasks. A SCHED_DEADLINE task is guaranteed to receive
+ "deadline", to schedule tasks. A SCHED_DEADLINE task should receive
"runtime" microseconds of execution time every "period" microseconds, and
these "runtime" microseconds are available within "deadline" microseconds
from the beginning of the period. In order to implement this behaviour,
every time the task wakes up, the scheduler computes a "scheduling deadline"
consistent with the guarantee (using the CBS[2,3] algorithm). Tasks are then
scheduled using EDF[1] on these scheduling deadlines (the task with the
- smallest scheduling deadline is selected for execution). Notice that this
- guaranteed is respected if a proper "admission control" strategy (see Section
- "4. Bandwidth management") is used.
+ earliest scheduling deadline is selected for execution). Notice that the
+ task actually receives "runtime" time units within "deadline" if a proper
+ "admission control" strategy (see Section "4. Bandwidth management") is used
+ (clearly, if the system is overloaded this guarantee cannot be respected).
Summing up, the CBS[2,3] algorithms assigns scheduling deadlines to tasks so
that each task runs for at most its runtime every period, avoiding any
interference between different tasks (bandwidth isolation), while the EDF[1]
- algorithm selects the task with the smallest scheduling deadline as the one
- to be executed first. Thanks to this feature, also tasks that do not
- strictly comply with the "traditional" real-time task model (see Section 3)
- can effectively use the new policy.
+ algorithm selects the task with the earliest scheduling deadline as the one
+ to be executed next. Thanks to this feature, tasks that do not strictly comply
+ with the "traditional" real-time task model (see Section 3) can effectively
+ use the new policy.
In more details, the CBS algorithm assigns scheduling deadlines to
tasks in the following way:
@@ -64,45 +67,45 @@ CONTENTS
"deadline", and "period" parameters;
- The state of the task is described by a "scheduling deadline", and
- a "current runtime". These two parameters are initially set to 0;
+ a "remaining runtime". These two parameters are initially set to 0;
- When a SCHED_DEADLINE task wakes up (becomes ready for execution),
the scheduler checks if
- current runtime runtime
- ---------------------------------- > ----------------
- scheduling deadline - current time period
+ remaining runtime runtime
+ ---------------------------------- > ---------
+ scheduling deadline - current time period
then, if the scheduling deadline is smaller than the current time, or
this condition is verified, the scheduling deadline and the
- current budget are re-initialised as
+ remaining runtime are re-initialised as
scheduling deadline = current time + deadline
- current runtime = runtime
+ remaining runtime = runtime
- otherwise, the scheduling deadline and the current runtime are
+ otherwise, the scheduling deadline and the remaining runtime are
left unchanged;
- When a SCHED_DEADLINE task executes for an amount of time t, its
- current runtime is decreased as
+ remaining runtime is decreased as
- current runtime = current runtime - t
+ remaining runtime = remaining runtime - t
(technically, the runtime is decreased at every tick, or when the
task is descheduled / preempted);
- - When the current runtime becomes less or equal than 0, the task is
+ - When the remaining runtime becomes less or equal than 0, the task is
said to be "throttled" (also known as "depleted" in real-time literature)
and cannot be scheduled until its scheduling deadline. The "replenishment
time" for this task (see next item) is set to be equal to the current
value of the scheduling deadline;
- When the current time is equal to the replenishment time of a
- throttled task, the scheduling deadline and the current runtime are
+ throttled task, the scheduling deadline and the remaining runtime are
updated as
scheduling deadline = scheduling deadline + period
- current runtime = current runtime + runtime
+ remaining runtime = remaining runtime + runtime
3. Scheduling Real-Time Tasks
@@ -134,6 +137,50 @@ CONTENTS
A real-time task can be periodic with period P if r_{j+1} = r_j + P, or
sporadic with minimum inter-arrival time P is r_{j+1} >= r_j + P. Finally,
d_j = r_j + D, where D is the task's relative deadline.
+ The utilisation of a real-time task is defined as the ratio between its
+ WCET and its period (or minimum inter-arrival time), and represents
+ the fraction of CPU time needed to execute the task.
+
+ If the total utilisation sum_i(WCET_i/P_i) is larger than M (with M equal
+ to the number of CPUs), then the scheduler is unable to respect all the
+ deadlines.
+ Note that total utilisation is defined as the sum of the utilisations
+ WCET_i/P_i over all the real-time tasks in the system. When considering
+ multiple real-time tasks, the parameters of the i-th task are indicated
+ with the "_i" suffix.
+ Moreover, if the total utilisation is larger than M, then we risk starving
+ non- real-time tasks by real-time tasks.
+ If, instead, the total utilisation is smaller than M, then non real-time
+ tasks will not be starved and the system might be able to respect all the
+ deadlines.
+ As a matter of fact, in this case it is possible to provide an upper bound
+ for tardiness (defined as the maximum between 0 and the difference
+ between the finishing time of a job and its absolute deadline).
+ More precisely, it can be proven that using a global EDF scheduler the
+ maximum tardiness of each task is smaller or equal than
+ ((M − 1) · WCET_max − WCET_min)/(M − (M − 2) · U_max) + WCET_max
+ where WCET_max = max_i{WCET_i} is the maximum WCET, WCET_min=min_i{WCET_i}
+ is the minimum WCET, and U_max = max_i{WCET_i/P_i} is the maximum utilisation.
+
+ If M=1 (uniprocessor system), or in case of partitioned scheduling (each
+ real-time task is statically assigned to one and only one CPU), it is
+ possible to formally check if all the deadlines are respected.
+ If D_i = P_i for all tasks, then EDF is able to respect all the deadlines
+ of all the tasks executing on a CPU if and only if the total utilisation
+ of the tasks running on such a CPU is smaller or equal than 1.
+ If D_i != P_i for some task, then it is possible to define the density of
+ a task as C_i/min{D_i,T_i}, and EDF is able to respect all the deadlines
+ of all the tasks running on a CPU if the sum sum_i C_i/min{D_i,T_i} of the
+ densities of the tasks running on such a CPU is smaller or equal than 1
+ (notice that this condition is only sufficient, and not necessary).
+
+ On multiprocessor systems with global EDF scheduling (non partitioned
+ systems), a sufficient test for schedulability can not be based on the
+ utilisations (it can be shown that task sets with utilisations slightly
+ larger than 1 can miss deadlines regardless of the number of CPUs M).
+ However, as previously stated, enforcing that the total utilisation is smaller
+ than M is enough to guarantee that non real-time tasks are not starved and
+ that the tardiness of real-time tasks has an upper bound.
SCHED_DEADLINE can be used to schedule real-time tasks guaranteeing that
the jobs' deadlines of a task are respected. In order to do this, a task
@@ -147,6 +194,8 @@ CONTENTS
and the absolute deadlines (d_j) coincide, so a proper admission control
allows to respect the jobs' absolute deadlines for this task (this is what is
called "hard schedulability property" and is an extension of Lemma 1 of [2]).
+ Notice that if runtime > deadline the admission control will surely reject
+ this task, as it is not possible to respect its temporal constraints.
References:
1 - C. L. Liu and J. W. Layland. Scheduling algorithms for multiprogram-
@@ -156,46 +205,57 @@ CONTENTS
Real-Time Systems. Proceedings of the 19th IEEE Real-time Systems
Symposium, 1998. http://retis.sssup.it/~giorgio/paps/1998/rtss98-cbs.pdf
3 - L. Abeni. Server Mechanisms for Multimedia Applications. ReTiS Lab
- Technical Report. http://xoomer.virgilio.it/lucabe72/pubs/tr-98-01.ps
+ Technical Report. http://disi.unitn.it/~abeni/tr-98-01.pdf
4. Bandwidth management
=======================
- In order for the -deadline scheduling to be effective and useful, it is
- important to have some method to keep the allocation of the available CPU
- bandwidth to the tasks under control.
- This is usually called "admission control" and if it is not performed at all,
+ As previously mentioned, in order for -deadline scheduling to be
+ effective and useful (that is, to be able to provide "runtime" time units
+ within "deadline"), it is important to have some method to keep the allocation
+ of the available fractions of CPU time to the various tasks under control.
+ This is usually called "admission control" and if it is not performed, then
no guarantee can be given on the actual scheduling of the -deadline tasks.
- Since when RT-throttling has been introduced each task group has a bandwidth
- associated, calculated as a certain amount of runtime over a period.
- Moreover, to make it possible to manipulate such bandwidth, readable/writable
- controls have been added to both procfs (for system wide settings) and cgroupfs
- (for per-group settings).
- Therefore, the same interface is being used for controlling the bandwidth
- distrubution to -deadline tasks.
-
- However, more discussion is needed in order to figure out how we want to manage
- SCHED_DEADLINE bandwidth at the task group level. Therefore, SCHED_DEADLINE
- uses (for now) a less sophisticated, but actually very sensible, mechanism to
- ensure that a certain utilization cap is not overcome per each root_domain.
-
- Another main difference between deadline bandwidth management and RT-throttling
+ As already stated in Section 3, a necessary condition to be respected to
+ correctly schedule a set of real-time tasks is that the total utilisation
+ is smaller than M. When talking about -deadline tasks, this requires that
+ the sum of the ratio between runtime and period for all tasks is smaller
+ than M. Notice that the ratio runtime/period is equivalent to the utilisation
+ of a "traditional" real-time task, and is also often referred to as
+ "bandwidth".
+ The interface used to control the CPU bandwidth that can be allocated
+ to -deadline tasks is similar to the one already used for -rt
+ tasks with real-time group scheduling (a.k.a. RT-throttling - see
+ Documentation/scheduler/sched-rt-group.txt), and is based on readable/
+ writable control files located in procfs (for system wide settings).
+ Notice that per-group settings (controlled through cgroupfs) are still not
+ defined for -deadline tasks, because more discussion is needed in order to
+ figure out how we want to manage SCHED_DEADLINE bandwidth at the task group
+ level.
+
+ A main difference between deadline bandwidth management and RT-throttling
is that -deadline tasks have bandwidth on their own (while -rt ones don't!),
- and thus we don't need an higher level throttling mechanism to enforce the
- desired bandwidth.
+ and thus we don't need a higher level throttling mechanism to enforce the
+ desired bandwidth. In other words, this means that interface parameters are
+ only used at admission control time (i.e., when the user calls
+ sched_setattr()). Scheduling is then performed considering actual tasks'
+ parameters, so that CPU bandwidth is allocated to SCHED_DEADLINE tasks
+ respecting their needs in terms of granularity. Therefore, using this simple
+ interface we can put a cap on total utilization of -deadline tasks (i.e.,
+ \Sum (runtime_i / period_i) < global_dl_utilization_cap).
4.1 System wide settings
------------------------
The system wide settings are configured under the /proc virtual file system.
- For now the -rt knobs are used for dl admission control and the -deadline
- runtime is accounted against the -rt runtime. We realise that this isn't
- entirely desirable; however, it is better to have a small interface for now,
- and be able to change it easily later. The ideal situation (see 5.) is to run
- -rt tasks from a -deadline server; in which case the -rt bandwidth is a direct
- subset of dl_bw.
+ For now the -rt knobs are used for -deadline admission control and the
+ -deadline runtime is accounted against the -rt runtime. We realise that this
+ isn't entirely desirable; however, it is better to have a small interface for
+ now, and be able to change it easily later. The ideal situation (see 5.) is to
+ run -rt tasks from a -deadline server; in which case the -rt bandwidth is a
+ direct subset of dl_bw.
This means that, for a root_domain comprising M CPUs, -deadline tasks
can be created while the sum of their bandwidths stays below:
@@ -231,8 +291,16 @@ CONTENTS
950000. With rt_period equal to 1000000, by default, it means that -deadline
tasks can use at most 95%, multiplied by the number of CPUs that compose the
root_domain, for each root_domain.
+ This means that non -deadline tasks will receive at least 5% of the CPU time,
+ and that -deadline tasks will receive their runtime with a guaranteed
+ worst-case delay respect to the "deadline" parameter. If "deadline" = "period"
+ and the cpuset mechanism is used to implement partitioned scheduling (see
+ Section 5), then this simple setting of the bandwidth management is able to
+ deterministically guarantee that -deadline tasks will receive their runtime
+ in a period.
- A -deadline task cannot fork.
+ Finally, notice that in order not to jeopardize the admission control a
+ -deadline task cannot fork.
5. Tasks CPU affinity
=====================
@@ -279,3 +347,179 @@ CONTENTS
throttling patches [https://lkml.org/lkml/2010/2/23/239] but we still are in
the preliminary phases of the merge and we really seek feedback that would
help us decide on the direction it should take.
+
+Appendix A. Test suite
+======================
+
+ The SCHED_DEADLINE policy can be easily tested using two applications that
+ are part of a wider Linux Scheduler validation suite. The suite is
+ available as a GitHub repository: https://github.com/scheduler-tools.
+
+ The first testing application is called rt-app and can be used to
+ start multiple threads with specific parameters. rt-app supports
+ SCHED_{OTHER,FIFO,RR,DEADLINE} scheduling policies and their related
+ parameters (e.g., niceness, priority, runtime/deadline/period). rt-app
+ is a valuable tool, as it can be used to synthetically recreate certain
+ workloads (maybe mimicking real use-cases) and evaluate how the scheduler
+ behaves under such workloads. In this way, results are easily reproducible.
+ rt-app is available at: https://github.com/scheduler-tools/rt-app.
+
+ Thread parameters can be specified from the command line, with something like
+ this:
+
+ # rt-app -t 100000:10000:d -t 150000:20000:f:10 -D5
+
+ The above creates 2 threads. The first one, scheduled by SCHED_DEADLINE,
+ executes for 10ms every 100ms. The second one, scheduled at SCHED_FIFO
+ priority 10, executes for 20ms every 150ms. The test will run for a total
+ of 5 seconds.
+
+ More interestingly, configurations can be described with a json file that
+ can be passed as input to rt-app with something like this:
+
+ # rt-app my_config.json
+
+ The parameters that can be specified with the second method are a superset
+ of the command line options. Please refer to rt-app documentation for more
+ details (<rt-app-sources>/doc/*.json).
+
+ The second testing application is a modification of schedtool, called
+ schedtool-dl, which can be used to setup SCHED_DEADLINE parameters for a
+ certain pid/application. schedtool-dl is available at:
+ https://github.com/scheduler-tools/schedtool-dl.git.
+
+ The usage is straightforward:
+
+ # schedtool -E -t 10000000:100000000 -e ./my_cpuhog_app
+
+ With this, my_cpuhog_app is put to run inside a SCHED_DEADLINE reservation
+ of 10ms every 100ms (note that parameters are expressed in microseconds).
+ You can also use schedtool to create a reservation for an already running
+ application, given that you know its pid:
+
+ # schedtool -E -t 10000000:100000000 my_app_pid
+
+Appendix B. Minimal main()
+==========================
+
+ We provide in what follows a simple (ugly) self-contained code snippet
+ showing how SCHED_DEADLINE reservations can be created by a real-time
+ application developer.
+
+ #define _GNU_SOURCE
+ #include <unistd.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+ #include <time.h>
+ #include <linux/unistd.h>
+ #include <linux/kernel.h>
+ #include <linux/types.h>
+ #include <sys/syscall.h>
+ #include <pthread.h>
+
+ #define gettid() syscall(__NR_gettid)
+
+ #define SCHED_DEADLINE 6
+
+ /* XXX use the proper syscall numbers */
+ #ifdef __x86_64__
+ #define __NR_sched_setattr 314
+ #define __NR_sched_getattr 315
+ #endif
+
+ #ifdef __i386__
+ #define __NR_sched_setattr 351
+ #define __NR_sched_getattr 352
+ #endif
+
+ #ifdef __arm__
+ #define __NR_sched_setattr 380
+ #define __NR_sched_getattr 381
+ #endif
+
+ static volatile int done;
+
+ struct sched_attr {
+ __u32 size;
+
+ __u32 sched_policy;
+ __u64 sched_flags;
+
+ /* SCHED_NORMAL, SCHED_BATCH */
+ __s32 sched_nice;
+
+ /* SCHED_FIFO, SCHED_RR */
+ __u32 sched_priority;
+
+ /* SCHED_DEADLINE (nsec) */
+ __u64 sched_runtime;
+ __u64 sched_deadline;
+ __u64 sched_period;
+ };
+
+ int sched_setattr(pid_t pid,
+ const struct sched_attr *attr,
+ unsigned int flags)
+ {
+ return syscall(__NR_sched_setattr, pid, attr, flags);
+ }
+
+ int sched_getattr(pid_t pid,
+ struct sched_attr *attr,
+ unsigned int size,
+ unsigned int flags)
+ {
+ return syscall(__NR_sched_getattr, pid, attr, size, flags);
+ }
+
+ void *run_deadline(void *data)
+ {
+ struct sched_attr attr;
+ int x = 0;
+ int ret;
+ unsigned int flags = 0;
+
+ printf("deadline thread started [%ld]\n", gettid());
+
+ attr.size = sizeof(attr);
+ attr.sched_flags = 0;
+ attr.sched_nice = 0;
+ attr.sched_priority = 0;
+
+ /* This creates a 10ms/30ms reservation */
+ attr.sched_policy = SCHED_DEADLINE;
+ attr.sched_runtime = 10 * 1000 * 1000;
+ attr.sched_period = attr.sched_deadline = 30 * 1000 * 1000;
+
+ ret = sched_setattr(0, &attr, flags);
+ if (ret < 0) {
+ done = 0;
+ perror("sched_setattr");
+ exit(-1);
+ }
+
+ while (!done) {
+ x++;
+ }
+
+ printf("deadline thread dies [%ld]\n", gettid());
+ return NULL;
+ }
+
+ int main (int argc, char **argv)
+ {
+ pthread_t thread;
+
+ printf("main thread [%ld]\n", gettid());
+
+ pthread_create(&thread, NULL, run_deadline, NULL);
+
+ sleep(10);
+
+ done = 1;
+ pthread_join(thread, NULL);
+
+ printf("main dies [%ld]\n", gettid());
+ return 0;
+ }
diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt
index 8727c194ca16..821c936e1a63 100644
--- a/Documentation/security/keys.txt
+++ b/Documentation/security/keys.txt
@@ -888,11 +888,11 @@ payload contents" for more information.
const char *callout_info);
This is used to request a key or keyring with a description that matches
- the description specified according to the key type's match function. This
- permits approximate matching to occur. If callout_string is not NULL, then
- /sbin/request-key will be invoked in an attempt to obtain the key from
- userspace. In that case, callout_string will be passed as an argument to
- the program.
+ the description specified according to the key type's match_preparse()
+ method. This permits approximate matching to occur. If callout_string is
+ not NULL, then /sbin/request-key will be invoked in an attempt to obtain
+ the key from userspace. In that case, callout_string will be passed as an
+ argument to the program.
Should the function fail error ENOKEY, EKEYEXPIRED or EKEYREVOKED will be
returned.
@@ -1170,7 +1170,7 @@ The structure has a number of fields, some of which are mandatory:
The method should return 0 if successful or a negative error code
otherwise.
-
+
(*) void (*free_preparse)(struct key_preparsed_payload *prep);
This method is only required if the preparse() method is provided,
@@ -1225,16 +1225,55 @@ The structure has a number of fields, some of which are mandatory:
It is safe to sleep in this method.
- (*) int (*match)(const struct key *key, const void *desc);
+ (*) int (*match_preparse)(struct key_match_data *match_data);
+
+ This method is optional. It is called when a key search is about to be
+ performed. It is given the following structure:
- This method is called to match a key against a description. It should
- return non-zero if the two match, zero if they don't.
+ struct key_match_data {
+ bool (*cmp)(const struct key *key,
+ const struct key_match_data *match_data);
+ const void *raw_data;
+ void *preparsed;
+ unsigned lookup_type;
+ };
- This method should not need to lock the key in any way. The type and
- description can be considered invariant, and the payload should not be
- accessed (the key may not yet be instantiated).
+ On entry, raw_data will be pointing to the criteria to be used in matching
+ a key by the caller and should not be modified. (*cmp)() will be pointing
+ to the default matcher function (which does an exact description match
+ against raw_data) and lookup_type will be set to indicate a direct lookup.
- It is not safe to sleep in this method; the caller may hold spinlocks.
+ The following lookup_type values are available:
+
+ [*] KEYRING_SEARCH_LOOKUP_DIRECT - A direct lookup hashes the type and
+ description to narrow down the search to a small number of keys.
+
+ [*] KEYRING_SEARCH_LOOKUP_ITERATE - An iterative lookup walks all the
+ keys in the keyring until one is matched. This must be used for any
+ search that's not doing a simple direct match on the key description.
+
+ The method may set cmp to point to a function of its choice that does some
+ other form of match, may set lookup_type to KEYRING_SEARCH_LOOKUP_ITERATE
+ and may attach something to the preparsed pointer for use by (*cmp)().
+ (*cmp)() should return true if a key matches and false otherwise.
+
+ If preparsed is set, it may be necessary to use the match_free() method to
+ clean it up.
+
+ The method should return 0 if successful or a negative error code
+ otherwise.
+
+ It is permitted to sleep in this method, but (*cmp)() may not sleep as
+ locks will be held over it.
+
+ If match_preparse() is not provided, keys of this type will be matched
+ exactly by their description.
+
+
+ (*) void (*match_free)(struct key_match_data *match_data);
+
+ This method is optional. If given, it called to clean up
+ match_data->preparsed after a successful call to match_preparse().
(*) void (*revoke)(struct key *key);
diff --git a/Documentation/video4linux/vivid.txt b/Documentation/video4linux/vivid.txt
new file mode 100644
index 000000000000..eeb11a28e4fc
--- /dev/null
+++ b/Documentation/video4linux/vivid.txt
@@ -0,0 +1,1111 @@
+vivid: Virtual Video Test Driver
+================================
+
+This driver emulates video4linux hardware of various types: video capture, video
+output, vbi capture and output, radio receivers and transmitters and a software
+defined radio receiver. In addition a simple framebuffer device is available for
+testing capture and output overlays.
+
+Up to 64 vivid instances can be created, each with up to 16 inputs and 16 outputs.
+
+Each input can be a webcam, TV capture device, S-Video capture device or an HDMI
+capture device. Each output can be an S-Video output device or an HDMI output
+device.
+
+These inputs and outputs act exactly as a real hardware device would behave. This
+allows you to use this driver as a test input for application development, since
+you can test the various features without requiring special hardware.
+
+This document describes the features implemented by this driver:
+
+- Support for read()/write(), MMAP, USERPTR and DMABUF streaming I/O.
+- A large list of test patterns and variations thereof
+- Working brightness, contrast, saturation and hue controls
+- Support for the alpha color component
+- Full colorspace support, including limited/full RGB range
+- All possible control types are present
+- Support for various pixel aspect ratios and video aspect ratios
+- Error injection to test what happens if errors occur
+- Supports crop/compose/scale in any combination for both input and output
+- Can emulate up to 4K resolutions
+- All Field settings are supported for testing interlaced capturing
+- Supports all standard YUV and RGB formats, including two multiplanar YUV formats
+- Raw and Sliced VBI capture and output support
+- Radio receiver and transmitter support, including RDS support
+- Software defined radio (SDR) support
+- Capture and output overlay support
+
+These features will be described in more detail below.
+
+
+Table of Contents
+-----------------
+
+Section 1: Configuring the driver
+Section 2: Video Capture
+Section 2.1: Webcam Input
+Section 2.2: TV and S-Video Inputs
+Section 2.3: HDMI Input
+Section 3: Video Output
+Section 3.1: S-Video Output
+Section 3.2: HDMI Output
+Section 4: VBI Capture
+Section 5: VBI Output
+Section 6: Radio Receiver
+Section 7: Radio Transmitter
+Section 8: Software Defined Radio Receiver
+Section 9: Controls
+Section 9.1: User Controls - Test Controls
+Section 9.2: User Controls - Video Capture
+Section 9.3: User Controls - Audio
+Section 9.4: Vivid Controls
+Section 9.4.1: Test Pattern Controls
+Section 9.4.2: Capture Feature Selection Controls
+Section 9.4.3: Output Feature Selection Controls
+Section 9.4.4: Error Injection Controls
+Section 9.4.5: VBI Raw Capture Controls
+Section 9.5: Digital Video Controls
+Section 9.6: FM Radio Receiver Controls
+Section 9.7: FM Radio Modulator
+Section 10: Video, VBI and RDS Looping
+Section 10.1: Video and Sliced VBI looping
+Section 10.2: Radio & RDS Looping
+Section 11: Cropping, Composing, Scaling
+Section 12: Formats
+Section 13: Capture Overlay
+Section 14: Output Overlay
+Section 15: Some Future Improvements
+
+
+Section 1: Configuring the driver
+---------------------------------
+
+By default the driver will create a single instance that has a video capture
+device with webcam, TV, S-Video and HDMI inputs, a video output device with
+S-Video and HDMI outputs, one vbi capture device, one vbi output device, one
+radio receiver device, one radio transmitter device and one SDR device.
+
+The number of instances, devices, video inputs and outputs and their types are
+all configurable using the following module options:
+
+n_devs: number of driver instances to create. By default set to 1. Up to 64
+ instances can be created.
+
+node_types: which devices should each driver instance create. An array of
+ hexadecimal values, one for each instance. The default is 0x1d3d.
+ Each value is a bitmask with the following meaning:
+ bit 0: Video Capture node
+ bit 2-3: VBI Capture node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both
+ bit 4: Radio Receiver node
+ bit 5: Software Defined Radio Receiver node
+ bit 8: Video Output node
+ bit 10-11: VBI Output node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both
+ bit 12: Radio Transmitter node
+ bit 16: Framebuffer for testing overlays
+
+ So to create four instances, the first two with just one video capture
+ device, the second two with just one video output device you would pass
+ these module options to vivid:
+
+ n_devs=4 node_types=0x1,0x1,0x100,0x100
+
+num_inputs: the number of inputs, one for each instance. By default 4 inputs
+ are created for each video capture device. At most 16 inputs can be created,
+ and there must be at least one.
+
+input_types: the input types for each instance, the default is 0xe4. This defines
+ what the type of each input is when the inputs are created for each driver
+ instance. This is a hexadecimal value with up to 16 pairs of bits, each
+ pair gives the type and bits 0-1 map to input 0, bits 2-3 map to input 1,
+ 30-31 map to input 15. Each pair of bits has the following meaning:
+
+ 00: this is a webcam input
+ 01: this is a TV tuner input
+ 10: this is an S-Video input
+ 11: this is an HDMI input
+
+ So to create a video capture device with 8 inputs where input 0 is a TV
+ tuner, inputs 1-3 are S-Video inputs and inputs 4-7 are HDMI inputs you
+ would use the following module options:
+
+ num_inputs=8 input_types=0xffa9
+
+num_outputs: the number of outputs, one for each instance. By default 2 outputs
+ are created for each video output device. At most 16 outputs can be
+ created, and there must be at least one.
+
+output_types: the output types for each instance, the default is 0x02. This defines
+ what the type of each output is when the outputs are created for each
+ driver instance. This is a hexadecimal value with up to 16 bits, each bit
+ gives the type and bit 0 maps to output 0, bit 1 maps to output 1, bit
+ 15 maps to output 15. The meaning of each bit is as follows:
+
+ 0: this is an S-Video output
+ 1: this is an HDMI output
+
+ So to create a video output device with 8 outputs where outputs 0-3 are
+ S-Video outputs and outputs 4-7 are HDMI outputs you would use the
+ following module options:
+
+ num_outputs=8 output_types=0xf0
+
+vid_cap_nr: give the desired videoX start number for each video capture device.
+ The default is -1 which will just take the first free number. This allows
+ you to map capture video nodes to specific videoX device nodes. Example:
+
+ n_devs=4 vid_cap_nr=2,4,6,8
+
+ This will attempt to assign /dev/video2 for the video capture device of
+ the first vivid instance, video4 for the next up to video8 for the last
+ instance. If it can't succeed, then it will just take the next free
+ number.
+
+vid_out_nr: give the desired videoX start number for each video output device.
+ The default is -1 which will just take the first free number.
+
+vbi_cap_nr: give the desired vbiX start number for each vbi capture device.
+ The default is -1 which will just take the first free number.
+
+vbi_out_nr: give the desired vbiX start number for each vbi output device.
+ The default is -1 which will just take the first free number.
+
+radio_rx_nr: give the desired radioX start number for each radio receiver device.
+ The default is -1 which will just take the first free number.
+
+radio_tx_nr: give the desired radioX start number for each radio transmitter
+ device. The default is -1 which will just take the first free number.
+
+sdr_cap_nr: give the desired swradioX start number for each SDR capture device.
+ The default is -1 which will just take the first free number.
+
+ccs_cap_mode: specify the allowed video capture crop/compose/scaling combination
+ for each driver instance. Video capture devices can have any combination
+ of cropping, composing and scaling capabilities and this will tell the
+ vivid driver which of those is should emulate. By default the user can
+ select this through controls.
+
+ The value is either -1 (controlled by the user) or a set of three bits,
+ each enabling (1) or disabling (0) one of the features:
+
+ bit 0: Enable crop support. Cropping will take only part of the
+ incoming picture.
+ bit 1: Enable compose support. Composing will copy the incoming
+ picture into a larger buffer.
+ bit 2: Enable scaling support. Scaling can scale the incoming
+ picture. The scaler of the vivid driver can enlarge up
+ or down to four times the original size. The scaler is
+ very simple and low-quality. Simplicity and speed were
+ key, not quality.
+
+ Note that this value is ignored by webcam inputs: those enumerate
+ discrete framesizes and that is incompatible with cropping, composing
+ or scaling.
+
+ccs_out_mode: specify the allowed video output crop/compose/scaling combination
+ for each driver instance. Video output devices can have any combination
+ of cropping, composing and scaling capabilities and this will tell the
+ vivid driver which of those is should emulate. By default the user can
+ select this through controls.
+
+ The value is either -1 (controlled by the user) or a set of three bits,
+ each enabling (1) or disabling (0) one of the features:
+
+ bit 0: Enable crop support. Cropping will take only part of the
+ outgoing buffer.
+ bit 1: Enable compose support. Composing will copy the incoming
+ buffer into a larger picture frame.
+ bit 2: Enable scaling support. Scaling can scale the incoming
+ buffer. The scaler of the vivid driver can enlarge up
+ or down to four times the original size. The scaler is
+ very simple and low-quality. Simplicity and speed were
+ key, not quality.
+
+multiplanar: select whether each device instance supports multi-planar formats,
+ and thus the V4L2 multi-planar API. By default the first device instance
+ is single-planar, the second multi-planar, and it keeps alternating.
+
+ This module option can override that for each instance. Values are:
+
+ 0: use alternating single and multi-planar devices.
+ 1: this is a single-planar instance.
+ 2: this is a multi-planar instance.
+
+vivid_debug: enable driver debugging info
+
+no_error_inj: if set disable the error injecting controls. This option is
+ needed in order to run a tool like v4l2-compliance. Tools like that
+ exercise all controls including a control like 'Disconnect' which
+ emulates a USB disconnect, making the device inaccessible and so
+ all tests that v4l2-compliance is doing will fail afterwards.
+
+ There may be other situations as well where you want to disable the
+ error injection support of vivid. When this option is set, then the
+ controls that select crop, compose and scale behavior are also
+ removed. Unless overridden by ccs_cap_mode and/or ccs_out_mode the
+ will default to enabling crop, compose and scaling.
+
+Taken together, all these module options allow you to precisely customize
+the driver behavior and test your application with all sorts of permutations.
+It is also very suitable to emulate hardware that is not yet available, e.g.
+when developing software for a new upcoming device.
+
+
+Section 2: Video Capture
+------------------------
+
+This is probably the most frequently used feature. The video capture device
+can be configured by using the module options num_inputs, input_types and
+ccs_cap_mode (see section 1 for more detailed information), but by default
+four inputs are configured: a webcam, a TV tuner, an S-Video and an HDMI
+input, one input for each input type. Those are described in more detail
+below.
+
+Special attention has been given to the rate at which new frames become
+available. The jitter will be around 1 jiffie (that depends on the HZ
+configuration of your kernel, so usually 1/100, 1/250 or 1/1000 of a second),
+but the long-term behavior is exactly following the framerate. So a
+framerate of 59.94 Hz is really different from 60 Hz. If the framerate
+exceeds your kernel's HZ value, then you will get dropped frames, but the
+frame/field sequence counting will keep track of that so the sequence
+count will skip whenever frames are dropped.
+
+
+Section 2.1: Webcam Input
+-------------------------
+
+The webcam input supports three framesizes: 320x180, 640x360 and 1280x720. It
+supports frames per second settings of 10, 15, 25, 30, 50 and 60 fps. Which ones
+are available depends on the chosen framesize: the larger the framesize, the
+lower the maximum frames per second.
+
+The initially selected colorspace when you switch to the webcam input will be
+sRGB.
+
+
+Section 2.2: TV and S-Video Inputs
+----------------------------------
+
+The only difference between the TV and S-Video input is that the TV has a
+tuner. Otherwise they behave identically.
+
+These inputs support audio inputs as well: one TV and one Line-In. They
+both support all TV standards. If the standard is queried, then the Vivid
+controls 'Standard Signal Mode' and 'Standard' determine what
+the result will be.
+
+These inputs support all combinations of the field setting. Special care has
+been taken to faithfully reproduce how fields are handled for the different
+TV standards. This is particularly noticable when generating a horizontally
+moving image so the temporal effect of using interlaced formats becomes clearly
+visible. For 50 Hz standards the top field is the oldest and the bottom field
+is the newest in time. For 60 Hz standards that is reversed: the bottom field
+is the oldest and the top field is the newest in time.
+
+When you start capturing in V4L2_FIELD_ALTERNATE mode the first buffer will
+contain the top field for 50 Hz standards and the bottom field for 60 Hz
+standards. This is what capture hardware does as well.
+
+Finally, for PAL/SECAM standards the first half of the top line contains noise.
+This simulates the Wide Screen Signal that is commonly placed there.
+
+The initially selected colorspace when you switch to the TV or S-Video input
+will be SMPTE-170M.
+
+The pixel aspect ratio will depend on the TV standard. The video aspect ratio
+can be selected through the 'Standard Aspect Ratio' Vivid control.
+Choices are '4x3', '16x9' which will give letterboxed widescreen video and
+'16x9 Anomorphic' which will give full screen squashed anamorphic widescreen
+video that will need to be scaled accordingly.
+
+The TV 'tuner' supports a frequency range of 44-958 MHz. Channels are available
+every 6 MHz, starting from 49.25 MHz. For each channel the generated image
+will be in color for the +/- 0.25 MHz around it, and in grayscale for
++/- 1 MHz around the channel. Beyond that it is just noise. The VIDIOC_G_TUNER
+ioctl will return 100% signal strength for +/- 0.25 MHz and 50% for +/- 1 MHz.
+It will also return correct afc values to show whether the frequency is too
+low or too high.
+
+The audio subchannels that are returned are MONO for the +/- 1 MHz range around
+a valid channel frequency. When the frequency is within +/- 0.25 MHz of the
+channel it will return either MONO, STEREO, either MONO | SAP (for NTSC) or
+LANG1 | LANG2 (for others), or STEREO | SAP.
+
+Which one is returned depends on the chosen channel, each next valid channel
+will cycle through the possible audio subchannel combinations. This allows
+you to test the various combinations by just switching channels..
+
+Finally, for these inputs the v4l2_timecode struct is filled in in the
+dequeued v4l2_buffer struct.
+
+
+Section 2.3: HDMI Input
+-----------------------
+
+The HDMI inputs supports all CEA-861 and DMT timings, both progressive and
+interlaced, for pixelclock frequencies between 25 and 600 MHz. The field
+mode for interlaced formats is always V4L2_FIELD_ALTERNATE. For HDMI the
+field order is always top field first, and when you start capturing an
+interlaced format you will receive the top field first.
+
+The initially selected colorspace when you switch to the HDMI input or
+select an HDMI timing is based on the format resolution: for resolutions
+less than or equal to 720x576 the colorspace is set to SMPTE-170M, for
+others it is set to REC-709 (CEA-861 timings) or sRGB (VESA DMT timings).
+
+The pixel aspect ratio will depend on the HDMI timing: for 720x480 is it
+set as for the NTSC TV standard, for 720x576 it is set as for the PAL TV
+standard, and for all others a 1:1 pixel aspect ratio is returned.
+
+The video aspect ratio can be selected through the 'DV Timings Aspect Ratio'
+Vivid control. Choices are 'Source Width x Height' (just use the
+same ratio as the chosen format), '4x3' or '16x9', either of which can
+result in pillarboxed or letterboxed video.
+
+For HDMI inputs it is possible to set the EDID. By default a simple EDID
+is provided. You can only set the EDID for HDMI inputs. Internally, however,
+the EDID is shared between all HDMI inputs.
+
+No interpretation is done of the EDID data.
+
+
+Section 3: Video Output
+-----------------------
+
+The video output device can be configured by using the module options
+num_outputs, output_types and ccs_out_mode (see section 1 for more detailed
+information), but by default two outputs are configured: an S-Video and an
+HDMI input, one output for each output type. Those are described in more detail
+below.
+
+Like with video capture the framerate is also exact in the long term.
+
+
+Section 3.1: S-Video Output
+---------------------------
+
+This output supports audio outputs as well: "Line-Out 1" and "Line-Out 2".
+The S-Video output supports all TV standards.
+
+This output supports all combinations of the field setting.
+
+The initially selected colorspace when you switch to the TV or S-Video input
+will be SMPTE-170M.
+
+
+Section 3.2: HDMI Output
+------------------------
+
+The HDMI output supports all CEA-861 and DMT timings, both progressive and
+interlaced, for pixelclock frequencies between 25 and 600 MHz. The field
+mode for interlaced formats is always V4L2_FIELD_ALTERNATE.
+
+The initially selected colorspace when you switch to the HDMI output or
+select an HDMI timing is based on the format resolution: for resolutions
+less than or equal to 720x576 the colorspace is set to SMPTE-170M, for
+others it is set to REC-709 (CEA-861 timings) or sRGB (VESA DMT timings).
+
+The pixel aspect ratio will depend on the HDMI timing: for 720x480 is it
+set as for the NTSC TV standard, for 720x576 it is set as for the PAL TV
+standard, and for all others a 1:1 pixel aspect ratio is returned.
+
+An HDMI output has a valid EDID which can be obtained through VIDIOC_G_EDID.
+
+
+Section 4: VBI Capture
+----------------------
+
+There are three types of VBI capture devices: those that only support raw
+(undecoded) VBI, those that only support sliced (decoded) VBI and those that
+support both. This is determined by the node_types module option. In all
+cases the driver will generate valid VBI data: for 60 Hz standards it will
+generate Closed Caption and XDS data. The closed caption stream will
+alternate between "Hello world!" and "Closed captions test" every second.
+The XDS stream will give the current time once a minute. For 50 Hz standards
+it will generate the Wide Screen Signal which is based on the actual Video
+Aspect Ratio control setting and teletext pages 100-159, one page per frame.
+
+The VBI device will only work for the S-Video and TV inputs, it will give
+back an error if the current input is a webcam or HDMI.
+
+
+Section 5: VBI Output
+---------------------
+
+There are three types of VBI output devices: those that only support raw
+(undecoded) VBI, those that only support sliced (decoded) VBI and those that
+support both. This is determined by the node_types module option.
+
+The sliced VBI output supports the Wide Screen Signal and the teletext signal
+for 50 Hz standards and Closed Captioning + XDS for 60 Hz standards.
+
+The VBI device will only work for the S-Video output, it will give
+back an error if the current output is HDMI.
+
+
+Section 6: Radio Receiver
+-------------------------
+
+The radio receiver emulates an FM/AM/SW receiver. The FM band also supports RDS.
+The frequency ranges are:
+
+ FM: 64 MHz - 108 MHz
+ AM: 520 kHz - 1710 kHz
+ SW: 2300 kHz - 26.1 MHz
+
+Valid channels are emulated every 1 MHz for FM and every 100 kHz for AM and SW.
+The signal strength decreases the further the frequency is from the valid
+frequency until it becomes 0% at +/- 50 kHz (FM) or 5 kHz (AM/SW) from the
+ideal frequency. The initial frequency when the driver is loaded is set to
+95 MHz.
+
+The FM receiver supports RDS as well, both using 'Block I/O' and 'Controls'
+modes. In the 'Controls' mode the RDS information is stored in read-only
+controls. These controls are updated every time the frequency is changed,
+or when the tuner status is requested. The Block I/O method uses the read()
+interface to pass the RDS blocks on to the application for decoding.
+
+The RDS signal is 'detected' for +/- 12.5 kHz around the channel frequency,
+and the further the frequency is away from the valid frequency the more RDS
+errors are randomly introduced into the block I/O stream, up to 50% of all
+blocks if you are +/- 12.5 kHz from the channel frequency. All four errors
+can occur in equal proportions: blocks marked 'CORRECTED', blocks marked
+'ERROR', blocks marked 'INVALID' and dropped blocks.
+
+The generated RDS stream contains all the standard fields contained in a
+0B group, and also radio text and the current time.
+
+The receiver supports HW frequency seek, either in Bounded mode, Wrap Around
+mode or both, which is configurable with the "Radio HW Seek Mode" control.
+
+
+Section 7: Radio Transmitter
+----------------------------
+
+The radio transmitter emulates an FM/AM/SW transmitter. The FM band also supports RDS.
+The frequency ranges are:
+
+ FM: 64 MHz - 108 MHz
+ AM: 520 kHz - 1710 kHz
+ SW: 2300 kHz - 26.1 MHz
+
+The initial frequency when the driver is loaded is 95.5 MHz.
+
+The FM transmitter supports RDS as well, both using 'Block I/O' and 'Controls'
+modes. In the 'Controls' mode the transmitted RDS information is configured
+using controls, and in 'Block I/O' mode the blocks are passed to the driver
+using write().
+
+
+Section 8: Software Defined Radio Receiver
+------------------------------------------
+
+The SDR receiver has three frequency bands for the ADC tuner:
+
+ - 300 kHz
+ - 900 kHz - 2800 kHz
+ - 3200 kHz
+
+The RF tuner supports 50 MHz - 2000 MHz.
+
+The generated data contains the In-phase and Quadrature components of a
+1 kHz tone that has an amplitude of sqrt(2).
+
+
+Section 9: Controls
+-------------------
+
+Different devices support different controls. The sections below will describe
+each control and which devices support them.
+
+
+Section 9.1: User Controls - Test Controls
+------------------------------------------
+
+The Button, Boolean, Integer 32 Bits, Integer 64 Bits, Menu, String, Bitmask and
+Integer Menu are controls that represent all possible control types. The Menu
+control and the Integer Menu control both have 'holes' in their menu list,
+meaning that one or more menu items return EINVAL when VIDIOC_QUERYMENU is called.
+Both menu controls also have a non-zero minimum control value. These features
+allow you to check if your application can handle such things correctly.
+These controls are supported for every device type.
+
+
+Section 9.2: User Controls - Video Capture
+------------------------------------------
+
+The following controls are specific to video capture.
+
+The Brightness, Contrast, Saturation and Hue controls actually work and are
+standard. There is one special feature with the Brightness control: each
+video input has its own brightness value, so changing input will restore
+the brightness for that input. In addition, each video input uses a different
+brightness range (minimum and maximum control values). Switching inputs will
+cause a control event to be sent with the V4L2_EVENT_CTRL_CH_RANGE flag set.
+This allows you to test controls that can change their range.
+
+The 'Gain, Automatic' and Gain controls can be used to test volatile controls:
+if 'Gain, Automatic' is set, then the Gain control is volatile and changes
+constantly. If 'Gain, Automatic' is cleared, then the Gain control is a normal
+control.
+
+The 'Horizontal Flip' and 'Vertical Flip' controls can be used to flip the
+image. These combine with the 'Sensor Flipped Horizontally/Vertically' Vivid
+controls.
+
+The 'Alpha Component' control can be used to set the alpha component for
+formats containing an alpha channel.
+
+
+Section 9.3: User Controls - Audio
+----------------------------------
+
+The following controls are specific to video capture and output and radio
+receivers and transmitters.
+
+The 'Volume' and 'Mute' audio controls are typical for such devices to
+control the volume and mute the audio. They don't actually do anything in
+the vivid driver.
+
+
+Section 9.4: Vivid Controls
+---------------------------
+
+These vivid custom controls control the image generation, error injection, etc.
+
+
+Section 9.4.1: Test Pattern Controls
+------------------------------------
+
+The Test Pattern Controls are all specific to video capture.
+
+Test Pattern: selects which test pattern to use. Use the CSC Colorbar for
+ testing colorspace conversions: the colors used in that test pattern
+ map to valid colors in all colorspaces. The colorspace conversion
+ is disabled for the other test patterns.
+
+OSD Text Mode: selects whether the text superimposed on the
+ test pattern should be shown, and if so, whether only counters should
+ be displayed or the full text.
+
+Horizontal Movement: selects whether the test pattern should
+ move to the left or right and at what speed.
+
+Vertical Movement: does the same for the vertical direction.
+
+Show Border: show a two-pixel wide border at the edge of the actual image,
+ excluding letter or pillarboxing.
+
+Show Square: show a square in the middle of the image. If the image is
+ displayed with the correct pixel and image aspect ratio corrections,
+ then the width and height of the square on the monitor should be
+ the same.
+
+Insert SAV Code in Image: adds a SAV (Start of Active Video) code to the image.
+ This can be used to check if such codes in the image are inadvertently
+ interpreted instead of being ignored.
+
+Insert EAV Code in Image: does the same for the EAV (End of Active Video) code.
+
+
+Section 9.4.2: Capture Feature Selection Controls
+-------------------------------------------------
+
+These controls are all specific to video capture.
+
+Sensor Flipped Horizontally: the image is flipped horizontally and the
+ V4L2_IN_ST_HFLIP input status flag is set. This emulates the case where
+ a sensor is for example mounted upside down.
+
+Sensor Flipped Vertically: the image is flipped vertically and the
+ V4L2_IN_ST_VFLIP input status flag is set. This emulates the case where
+ a sensor is for example mounted upside down.
+
+Standard Aspect Ratio: selects if the image aspect ratio as used for the TV or
+ S-Video input should be 4x3, 16x9 or anamorphic widescreen. This may
+ introduce letterboxing.
+
+DV Timings Aspect Ratio: selects if the image aspect ratio as used for the HDMI
+ input should be the same as the source width and height ratio, or if
+ it should be 4x3 or 16x9. This may introduce letter or pillarboxing.
+
+Timestamp Source: selects when the timestamp for each buffer is taken.
+
+Colorspace: selects which colorspace should be used when generating the image.
+ This only applies if the CSC Colorbar test pattern is selected,
+ otherwise the test pattern will go through unconverted (except for
+ the so-called 'Transfer Function' corrections and the R'G'B' to Y'CbCr
+ conversion). This behavior is also what you want, since a 75% Colorbar
+ should really have 75% signal intensity and should not be affected
+ by colorspace conversions.
+
+ Changing the colorspace will result in the V4L2_EVENT_SOURCE_CHANGE
+ to be sent since it emulates a detected colorspace change.
+
+Limited RGB Range (16-235): selects if the RGB range of the HDMI source should
+ be limited or full range. This combines with the Digital Video 'Rx RGB
+ Quantization Range' control and can be used to test what happens if
+ a source provides you with the wrong quantization range information.
+ See the description of that control for more details.
+
+Apply Alpha To Red Only: apply the alpha channel as set by the 'Alpha Component'
+ user control to the red color of the test pattern only.
+
+Enable Capture Cropping: enables crop support. This control is only present if
+ the ccs_cap_mode module option is set to the default value of -1 and if
+ the no_error_inj module option is set to 0 (the default).
+
+Enable Capture Composing: enables composing support. This control is only
+ present if the ccs_cap_mode module option is set to the default value of
+ -1 and if the no_error_inj module option is set to 0 (the default).
+
+Enable Capture Scaler: enables support for a scaler (maximum 4 times upscaling
+ and downscaling). This control is only present if the ccs_cap_mode
+ module option is set to the default value of -1 and if the no_error_inj
+ module option is set to 0 (the default).
+
+Maximum EDID Blocks: determines how many EDID blocks the driver supports.
+ Note that the vivid driver does not actually interpret new EDID
+ data, it just stores it. It allows for up to 256 EDID blocks
+ which is the maximum supported by the standard.
+
+Fill Percentage of Frame: can be used to draw only the top X percent
+ of the image. Since each frame has to be drawn by the driver, this
+ demands a lot of the CPU. For large resolutions this becomes
+ problematic. By drawing only part of the image this CPU load can
+ be reduced.
+
+
+Section 9.4.3: Output Feature Selection Controls
+------------------------------------------------
+
+These controls are all specific to video output.
+
+Enable Output Cropping: enables crop support. This control is only present if
+ the ccs_out_mode module option is set to the default value of -1 and if
+ the no_error_inj module option is set to 0 (the default).
+
+Enable Output Composing: enables composing support. This control is only
+ present if the ccs_out_mode module option is set to the default value of
+ -1 and if the no_error_inj module option is set to 0 (the default).
+
+Enable Output Scaler: enables support for a scaler (maximum 4 times upscaling
+ and downscaling). This control is only present if the ccs_out_mode
+ module option is set to the default value of -1 and if the no_error_inj
+ module option is set to 0 (the default).
+
+
+Section 9.4.4: Error Injection Controls
+---------------------------------------
+
+The following two controls are only valid for video and vbi capture.
+
+Standard Signal Mode: selects the behavior of VIDIOC_QUERYSTD: what should
+ it return?
+
+ Changing this control will result in the V4L2_EVENT_SOURCE_CHANGE
+ to be sent since it emulates a changed input condition (e.g. a cable
+ was plugged in or out).
+
+Standard: selects the standard that VIDIOC_QUERYSTD should return if the
+ previous control is set to "Selected Standard".
+
+ Changing this control will result in the V4L2_EVENT_SOURCE_CHANGE
+ to be sent since it emulates a changed input standard.
+
+
+The following two controls are only valid for video capture.
+
+DV Timings Signal Mode: selects the behavior of VIDIOC_QUERY_DV_TIMINGS: what
+ should it return?
+
+ Changing this control will result in the V4L2_EVENT_SOURCE_CHANGE
+ to be sent since it emulates a changed input condition (e.g. a cable
+ was plugged in or out).
+
+DV Timings: selects the timings the VIDIOC_QUERY_DV_TIMINGS should return
+ if the previous control is set to "Selected DV Timings".
+
+ Changing this control will result in the V4L2_EVENT_SOURCE_CHANGE
+ to be sent since it emulates changed input timings.
+
+
+The following controls are only present if the no_error_inj module option
+is set to 0 (the default). These controls are valid for video and vbi
+capture and output streams and for the SDR capture device except for the
+Disconnect control which is valid for all devices.
+
+Wrap Sequence Number: test what happens when you wrap the sequence number in
+ struct v4l2_buffer around.
+
+Wrap Timestamp: test what happens when you wrap the timestamp in struct
+ v4l2_buffer around.
+
+Percentage of Dropped Buffers: sets the percentage of buffers that
+ are never returned by the driver (i.e., they are dropped).
+
+Disconnect: emulates a USB disconnect. The device will act as if it has
+ been disconnected. Only after all open filehandles to the device
+ node have been closed will the device become 'connected' again.
+
+Inject V4L2_BUF_FLAG_ERROR: when pressed, the next frame returned by
+ the driver will have the error flag set (i.e. the frame is marked
+ corrupt).
+
+Inject VIDIOC_REQBUFS Error: when pressed, the next REQBUFS or CREATE_BUFS
+ ioctl call will fail with an error. To be precise: the videobuf2
+ queue_setup() op will return -EINVAL.
+
+Inject VIDIOC_QBUF Error: when pressed, the next VIDIOC_QBUF or
+ VIDIOC_PREPARE_BUFFER ioctl call will fail with an error. To be
+ precise: the videobuf2 buf_prepare() op will return -EINVAL.
+
+Inject VIDIOC_STREAMON Error: when pressed, the next VIDIOC_STREAMON ioctl
+ call will fail with an error. To be precise: the videobuf2
+ start_streaming() op will return -EINVAL.
+
+Inject Fatal Streaming Error: when pressed, the streaming core will be
+ marked as having suffered a fatal error, the only way to recover
+ from that is to stop streaming. To be precise: the videobuf2
+ vb2_queue_error() function is called.
+
+
+Section 9.4.5: VBI Raw Capture Controls
+---------------------------------------
+
+Interlaced VBI Format: if set, then the raw VBI data will be interlaced instead
+ of providing it grouped by field.
+
+
+Section 9.5: Digital Video Controls
+-----------------------------------
+
+Rx RGB Quantization Range: sets the RGB quantization detection of the HDMI
+ input. This combines with the Vivid 'Limited RGB Range (16-235)'
+ control and can be used to test what happens if a source provides
+ you with the wrong quantization range information. This can be tested
+ by selecting an HDMI input, setting this control to Full or Limited
+ range and selecting the opposite in the 'Limited RGB Range (16-235)'
+ control. The effect is easy to see if the 'Gray Ramp' test pattern
+ is selected.
+
+Tx RGB Quantization Range: sets the RGB quantization detection of the HDMI
+ output. It is currently not used for anything in vivid, but most HDMI
+ transmitters would typically have this control.
+
+Transmit Mode: sets the transmit mode of the HDMI output to HDMI or DVI-D. This
+ affects the reported colorspace since DVI_D outputs will always use
+ sRGB.
+
+
+Section 9.6: FM Radio Receiver Controls
+---------------------------------------
+
+RDS Reception: set if the RDS receiver should be enabled.
+
+RDS Program Type:
+RDS PS Name:
+RDS Radio Text:
+RDS Traffic Announcement:
+RDS Traffic Program:
+RDS Music: these are all read-only controls. If RDS Rx I/O Mode is set to
+ "Block I/O", then they are inactive as well. If RDS Rx I/O Mode is set
+ to "Controls", then these controls report the received RDS data. Note
+ that the vivid implementation of this is pretty basic: they are only
+ updated when you set a new frequency or when you get the tuner status
+ (VIDIOC_G_TUNER).
+
+Radio HW Seek Mode: can be one of "Bounded", "Wrap Around" or "Both". This
+ determines if VIDIOC_S_HW_FREQ_SEEK will be bounded by the frequency
+ range or wrap-around or if it is selectable by the user.
+
+Radio Programmable HW Seek: if set, then the user can provide the lower and
+ upper bound of the HW Seek. Otherwise the frequency range boundaries
+ will be used.
+
+Generate RBDS Instead of RDS: if set, then generate RBDS (the US variant of
+ RDS) data instead of RDS (European-style RDS). This affects only the
+ PICODE and PTY codes.
+
+RDS Rx I/O Mode: this can be "Block I/O" where the RDS blocks have to be read()
+ by the application, or "Controls" where the RDS data is provided by
+ the RDS controls mentioned above.
+
+
+Section 9.7: FM Radio Modulator Controls
+----------------------------------------
+
+RDS Program ID:
+RDS Program Type:
+RDS PS Name:
+RDS Radio Text:
+RDS Stereo:
+RDS Artificial Head:
+RDS Compressed:
+RDS Dymanic PTY:
+RDS Traffic Announcement:
+RDS Traffic Program:
+RDS Music: these are all controls that set the RDS data that is transmitted by
+ the FM modulator.
+
+RDS Tx I/O Mode: this can be "Block I/O" where the application has to use write()
+ to pass the RDS blocks to the driver, or "Controls" where the RDS data is
+ provided by the RDS controls mentioned above.
+
+
+Section 10: Video, VBI and RDS Looping
+--------------------------------------
+
+The vivid driver supports looping of video output to video input, VBI output
+to VBI input and RDS output to RDS input. For video/VBI looping this emulates
+as if a cable was hooked up between the output and input connector. So video
+and VBI looping is only supported between S-Video and HDMI inputs and outputs.
+VBI is only valid for S-Video as it makes no sense for HDMI.
+
+Since radio is wireless this looping always happens if the radio receiver
+frequency is close to the radio transmitter frequency. In that case the radio
+transmitter will 'override' the emulated radio stations.
+
+Looping is currently supported only between devices created by the same
+vivid driver instance.
+
+
+Section 10.1: Video and Sliced VBI looping
+------------------------------------------
+
+The way to enable video/VBI looping is currently fairly crude. A 'Loop Video'
+control is available in the "Vivid" control class of the video
+output and VBI output devices. When checked the video looping will be enabled.
+Once enabled any video S-Video or HDMI input will show a static test pattern
+until the video output has started. At that time the video output will be
+looped to the video input provided that:
+
+- the input type matches the output type. So the HDMI input cannot receive
+ video from the S-Video output.
+
+- the video resolution of the video input must match that of the video output.
+ So it is not possible to loop a 50 Hz (720x576) S-Video output to a 60 Hz
+ (720x480) S-Video input, or a 720p60 HDMI output to a 1080p30 input.
+
+- the pixel formats must be identical on both sides. Otherwise the driver would
+ have to do pixel format conversion as well, and that's taking things too far.
+
+- the field settings must be identical on both sides. Same reason as above:
+ requiring the driver to convert from one field format to another complicated
+ matters too much. This also prohibits capturing with 'Field Top' or 'Field
+ Bottom' when the output video is set to 'Field Alternate'. This combination,
+ while legal, became too complicated to support. Both sides have to be 'Field
+ Alternate' for this to work. Also note that for this specific case the
+ sequence and field counting in struct v4l2_buffer on the capture side may not
+ be 100% accurate.
+
+- on the input side the "Standard Signal Mode" for the S-Video input or the
+ "DV Timings Signal Mode" for the HDMI input should be configured so that a
+ valid signal is passed to the video input.
+
+The framerates do not have to match, although this might change in the future.
+
+By default you will see the OSD text superimposed on top of the looped video.
+This can be turned off by changing the "OSD Text Mode" control of the video
+capture device.
+
+For VBI looping to work all of the above must be valid and in addition the vbi
+output must be configured for sliced VBI. The VBI capture side can be configured
+for either raw or sliced VBI. Note that at the moment only CC/XDS (60 Hz formats)
+and WSS (50 Hz formats) VBI data is looped. Teletext VBI data is not looped.
+
+
+Section 10.2: Radio & RDS Looping
+---------------------------------
+
+As mentioned in section 6 the radio receiver emulates stations are regular
+frequency intervals. Depending on the frequency of the radio receiver a
+signal strength value is calculated (this is returned by VIDIOC_G_TUNER).
+However, it will also look at the frequency set by the radio transmitter and
+if that results in a higher signal strength than the settings of the radio
+transmitter will be used as if it was a valid station. This also includes
+the RDS data (if any) that the transmitter 'transmits'. This is received
+faithfully on the receiver side. Note that when the driver is loaded the
+frequencies of the radio receiver and transmitter are not identical, so
+initially no looping takes place.
+
+
+Section 11: Cropping, Composing, Scaling
+----------------------------------------
+
+This driver supports cropping, composing and scaling in any combination. Normally
+which features are supported can be selected through the Vivid controls,
+but it is also possible to hardcode it when the module is loaded through the
+ccs_cap_mode and ccs_out_mode module options. See section 1 on the details of
+these module options.
+
+This allows you to test your application for all these variations.
+
+Note that the webcam input never supports cropping, composing or scaling. That
+only applies to the TV/S-Video/HDMI inputs and outputs. The reason is that
+webcams, including this virtual implementation, normally use
+VIDIOC_ENUM_FRAMESIZES to list a set of discrete framesizes that it supports.
+And that does not combine with cropping, composing or scaling. This is
+primarily a limitation of the V4L2 API which is carefully reproduced here.
+
+The minimum and maximum resolutions that the scaler can achieve are 16x16 and
+(4096 * 4) x (2160 x 4), but it can only scale up or down by a factor of 4 or
+less. So for a source resolution of 1280x720 the minimum the scaler can do is
+320x180 and the maximum is 5120x2880. You can play around with this using the
+qv4l2 test tool and you will see these dependencies.
+
+This driver also supports larger 'bytesperline' settings, something that
+VIDIOC_S_FMT allows but that few drivers implement.
+
+The scaler is a simple scaler that uses the Coarse Bresenham algorithm. It's
+designed for speed and simplicity, not quality.
+
+If the combination of crop, compose and scaling allows it, then it is possible
+to change crop and compose rectangles on the fly.
+
+
+Section 12: Formats
+-------------------
+
+The driver supports all the regular packed YUYV formats, 16, 24 and 32 RGB
+packed formats and two multiplanar formats (one luma and one chroma plane).
+
+The alpha component can be set through the 'Alpha Component' User control
+for those formats that support it. If the 'Apply Alpha To Red Only' control
+is set, then the alpha component is only used for the color red and set to
+0 otherwise.
+
+The driver has to be configured to support the multiplanar formats. By default
+the first driver instance is single-planar, the second is multi-planar, and it
+keeps alternating. This can be changed by setting the multiplanar module option,
+see section 1 for more details on that option.
+
+If the driver instance is using the multiplanar formats/API, then the first
+single planar format (YUYV) and the multiplanar NV16M and NV61M formats the
+will have a plane that has a non-zero data_offset of 128 bytes. It is rare for
+data_offset to be non-zero, so this is a useful feature for testing applications.
+
+Video output will also honor any data_offset that the application set.
+
+
+Section 13: Capture Overlay
+---------------------------
+
+Note: capture overlay support is implemented primarily to test the existing
+V4L2 capture overlay API. In practice few if any GPUs support such overlays
+anymore, and neither are they generally needed anymore since modern hardware
+is so much more capable. By setting flag 0x10000 in the node_types module
+option the vivid driver will create a simple framebuffer device that can be
+used for testing this API. Whether this API should be used for new drivers is
+questionable.
+
+This driver has support for a destructive capture overlay with bitmap clipping
+and list clipping (up to 16 rectangles) capabilities. Overlays are not
+supported for multiplanar formats. It also honors the struct v4l2_window field
+setting: if it is set to FIELD_TOP or FIELD_BOTTOM and the capture setting is
+FIELD_ALTERNATE, then only the top or bottom fields will be copied to the overlay.
+
+The overlay only works if you are also capturing at that same time. This is a
+vivid limitation since it copies from a buffer to the overlay instead of
+filling the overlay directly. And if you are not capturing, then no buffers
+are available to fill.
+
+In addition, the pixelformat of the capture format and that of the framebuffer
+must be the same for the overlay to work. Otherwise VIDIOC_OVERLAY will return
+an error.
+
+In order to really see what it going on you will need to create two vivid
+instances: the first with a framebuffer enabled. You configure the capture
+overlay of the second instance to use the framebuffer of the first, then
+you start capturing in the second instance. For the first instance you setup
+the output overlay for the video output, turn on video looping and capture
+to see the blended framebuffer overlay that's being written to by the second
+instance. This setup would require the following commands:
+
+ $ sudo modprobe vivid n_devs=2 node_types=0x10101,0x1 multiplanar=1,1
+ $ v4l2-ctl -d1 --find-fb
+ /dev/fb1 is the framebuffer associated with base address 0x12800000
+ $ sudo v4l2-ctl -d2 --set-fbuf fb=1
+ $ v4l2-ctl -d1 --set-fbuf fb=1
+ $ v4l2-ctl -d0 --set-fmt-video=pixelformat='AR15'
+ $ v4l2-ctl -d1 --set-fmt-video-out=pixelformat='AR15'
+ $ v4l2-ctl -d2 --set-fmt-video=pixelformat='AR15'
+ $ v4l2-ctl -d0 -i2
+ $ v4l2-ctl -d2 -i2
+ $ v4l2-ctl -d2 -c horizontal_movement=4
+ $ v4l2-ctl -d1 --overlay=1
+ $ v4l2-ctl -d1 -c loop_video=1
+ $ v4l2-ctl -d2 --stream-mmap --overlay=1
+
+And from another console:
+
+ $ v4l2-ctl -d1 --stream-out-mmap
+
+And yet another console:
+
+ $ qv4l2
+
+and start streaming.
+
+As you can see, this is not for the faint of heart...
+
+
+Section 14: Output Overlay
+--------------------------
+
+Note: output overlays are primarily implemented in order to test the existing
+V4L2 output overlay API. Whether this API should be used for new drivers is
+questionable.
+
+This driver has support for an output overlay and is capable of:
+
+ - bitmap clipping,
+ - list clipping (up to 16 rectangles)
+ - chromakey
+ - source chromakey
+ - global alpha
+ - local alpha
+ - local inverse alpha
+
+Output overlays are not supported for multiplanar formats. In addition, the
+pixelformat of the capture format and that of the framebuffer must be the
+same for the overlay to work. Otherwise VIDIOC_OVERLAY will return an error.
+
+Output overlays only work if the driver has been configured to create a
+framebuffer by setting flag 0x10000 in the node_types module option. The
+created framebuffer has a size of 720x576 and supports ARGB 1:5:5:5 and
+RGB 5:6:5.
+
+In order to see the effects of the various clipping, chromakeying or alpha
+processing capabilities you need to turn on video looping and see the results
+on the capture side. The use of the clipping, chromakeying or alpha processing
+capabilities will slow down the video loop considerably as a lot of checks have
+to be done per pixel.
+
+
+Section 15: Some Future Improvements
+------------------------------------
+
+Just as a reminder and in no particular order:
+
+- Add a virtual alsa driver to test audio
+- Add virtual sub-devices and media controller support
+- Some support for testing compressed video
+- Add support to loop raw VBI output to raw VBI input
+- Add support to loop teletext sliced VBI output to VBI input
+- Fix sequence/field numbering when looping of video with alternate fields
+- Add support for V4L2_CID_BG_COLOR for video outputs
+- Add ARGB888 overlay support: better testing of the alpha channel
+- Add custom DV timings support
+- Add support for V4L2_DV_FL_REDUCED_FPS
+- Improve pixel aspect support in the tpg code by passing a real v4l2_fract
+- Use per-queue locks and/or per-device locks to improve throughput
+- Add support to loop from a specific output to a specific input across
+ vivid instances
+- Add support for VIDIOC_EXPBUF once support for that has been added to vb2
+- The SDR radio should use the same 'frequencies' for stations as the normal
+ radio receiver, and give back noise if the frequency doesn't match up with
+ a station frequency
+- Improve the sine generation of the SDR radio.
+- Make a thread for the RDS generation, that would help in particular for the
+ "Controls" RDS Rx I/O Mode as the read-only RDS controls could be updated
+ in real-time.
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index afe68ddbe6a4..052ee643a32e 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -5,7 +5,7 @@ Virtual memory map with 4 level page tables:
0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm
hole caused by [48:63] sign extension
-ffff800000000000 - ffff80ffffffffff (=40 bits) guard hole
+ffff800000000000 - ffff87ffffffffff (=43 bits) guard hole, reserved for hypervisor
ffff880000000000 - ffffc7ffffffffff (=64 TB) direct mapping of all phys. memory
ffffc80000000000 - ffffc8ffffffffff (=40 bits) hole
ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space
diff --git a/MAINTAINERS b/MAINTAINERS
index 40d4796886c9..f413abff3807 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -734,7 +734,6 @@ F: net/appletalk/
APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER
M: Iyappan Subramanian <isubramanian@apm.com>
M: Keyur Chudgar <kchudgar@apm.com>
-M: Ravi Patel <rapatel@apm.com>
S: Supported
F: drivers/net/ethernet/apm/xgene/
F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt
@@ -2761,6 +2760,18 @@ W: http://www.chelsio.com
S: Supported
F: drivers/net/ethernet/chelsio/cxgb4vf/
+CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER
+M: Ian Munsie <imunsie@au1.ibm.com>
+M: Michael Neuling <mikey@neuling.org>
+L: linuxppc-dev@lists.ozlabs.org
+S: Supported
+F: drivers/misc/cxl/
+F: include/misc/cxl.h
+F: include/uapi/misc/cxl.h
+F: Documentation/powerpc/cxl.txt
+F: Documentation/powerpc/cxl.txt
+F: Documentation/ABI/testing/sysfs-class-cxl
+
STMMAC ETHERNET DRIVER
M: Giuseppe Cavallaro <peppe.cavallaro@st.com>
L: netdev@vger.kernel.org
@@ -4233,6 +4244,16 @@ L: linuxppc-dev@lists.ozlabs.org
S: Odd Fixes
F: drivers/tty/hvc/
+HACKRF MEDIA DRIVER
+M: Antti Palosaari <crope@iki.fi>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org/
+W: http://palosaari.fi/linux/
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+T: git git://linuxtv.org/anttip/media_tree.git
+S: Maintained
+F: drivers/media/usb/hackrf/
+
HARDWARE MONITORING
M: Jean Delvare <jdelvare@suse.de>
M: Guenter Roeck <linux@roeck-us.net>
@@ -5131,7 +5152,7 @@ W: http://palosaari.fi/linux/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
T: git git://linuxtv.org/anttip/media_tree.git
S: Maintained
-F: drivers/media/tuners/tuner_it913x*
+F: drivers/media/tuners/it913x*
IVTV VIDEO4LINUX DRIVER
M: Andy Walls <awalls@md.metrocast.net>
@@ -5659,8 +5680,8 @@ M: Ingo Molnar <mingo@redhat.com>
L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
S: Maintained
-F: Documentation/lockdep*.txt
-F: Documentation/lockstat.txt
+F: Documentation/locking/lockdep*.txt
+F: Documentation/locking/lockstat.txt
F: include/linux/lockdep.h
F: kernel/locking/
@@ -6606,10 +6627,9 @@ S: Maintained
F: drivers/mmc/host/omap.c
OMAP HS MMC SUPPORT
-M: Balaji T K <balajitk@ti.com>
L: linux-mmc@vger.kernel.org
L: linux-omap@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/mmc/host/omap_hsmmc.c
OMAP RANDOM NUMBER GENERATOR SUPPORT
@@ -7364,6 +7384,14 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/usb/pwc/*
+PWM FAN DRIVER
+M: Kamil Debski <k.debski@samsung.com>
+L: lm-sensors@lm-sensors.org
+S: Supported
+F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt
+F: Documentation/hwmon/pwm-fan
+F: drivers/hwmon/pwm-fan.c
+
PWM SUBSYSTEM
M: Thierry Reding <thierry.reding@gmail.com>
L: linux-pwm@vger.kernel.org
@@ -8170,6 +8198,8 @@ F: drivers/mmc/host/sdhci-pltfm.[ch]
SECURE COMPUTING
M: Kees Cook <keescook@chromium.org>
+R: Andy Lutomirski <luto@amacapital.net>
+R: Will Drewry <wad@chromium.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git seccomp
S: Supported
F: kernel/seccomp.c
@@ -8664,6 +8694,14 @@ F: include/sound/dmaengine_pcm.h
F: sound/core/pcm_dmaengine.c
F: sound/soc/soc-generic-dmaengine-pcm.c
+SP2 MEDIA DRIVER
+M: Olli Salonen <olli.salonen@iki.fi>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org/
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+S: Maintained
+F: drivers/media/dvb-frontends/sp2*
+
SPARC + UltraSPARC (sparc/sparc64)
M: "David S. Miller" <davem@davemloft.net>
L: sparclinux@vger.kernel.org
@@ -9367,6 +9405,14 @@ T: git git://linuxtv.org/media_tree.git
S: Odd fixes
F: drivers/media/usb/tm6000/
+TW68 VIDEO4LINUX DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Odd Fixes
+F: drivers/media/pci/tw68/
+
TPM DEVICE DRIVER
M: Peter Huewe <peterhuewe@gmx.de>
M: Ashley Lai <ashley@ashleylai.com>
@@ -9388,6 +9434,7 @@ F: include/*/ftrace.h
F: include/linux/trace*.h
F: include/trace/
F: kernel/trace/
+F: tools/testing/selftests/ftrace/
TRIVIAL PATCHES
M: Jiri Kosina <trivial@kernel.org>
@@ -10235,6 +10282,15 @@ S: Supported
F: drivers/block/xen-blkback/*
F: drivers/block/xen*
+XEN PVSCSI DRIVERS
+M: Juergen Gross <jgross@suse.com>
+L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/xen-scsifront.c
+F: drivers/xen/xen-scsiback.c
+F: include/xen/interface/io/vscsiif.h
+
XEN SWIOTLB SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
diff --git a/arch/Kconfig b/arch/Kconfig
index 0eae9df35b88..05d7a8a458d5 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -323,6 +323,17 @@ config HAVE_ARCH_SECCOMP_FILTER
results in the system call being skipped immediately.
- seccomp syscall wired up
+ For best performance, an arch should use seccomp_phase1 and
+ seccomp_phase2 directly. It should call seccomp_phase1 for all
+ syscalls if TIF_SECCOMP is set, but seccomp_phase1 does not
+ need to be called from a ptrace-safe context. It must then
+ call seccomp_phase2 if seccomp_phase1 returns anything other
+ than SECCOMP_PHASE1_OK or SECCOMP_PHASE1_SKIP.
+
+ As an additional optimization, an arch may provide seccomp_data
+ directly to seccomp_phase1; this avoids multiple calls
+ to the syscall_xyz helpers for every syscall.
+
config SECCOMP_FILTER
def_bool y
depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index ed60a1ee1ed3..8f8eafbedd7c 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -17,8 +17,8 @@
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic64_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
#define atomic64_set(v,i) ((v)->counter = (i))
@@ -29,145 +29,92 @@
* branch back to restart the operation.
*/
-static __inline__ void atomic_add(int i, atomic_t * v)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "1: ldl_l %0,%1\n"
- " addl %0,%2,%0\n"
- " stl_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter)
- :"Ir" (i), "m" (v->counter));
-}
-
-static __inline__ void atomic64_add(long i, atomic64_t * v)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " addq %0,%2,%0\n"
- " stq_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter)
- :"Ir" (i), "m" (v->counter));
-}
-
-static __inline__ void atomic_sub(int i, atomic_t * v)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "1: ldl_l %0,%1\n"
- " subl %0,%2,%0\n"
- " stl_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter)
- :"Ir" (i), "m" (v->counter));
+#define ATOMIC_OP(op) \
+static __inline__ void atomic_##op(int i, atomic_t * v) \
+{ \
+ unsigned long temp; \
+ __asm__ __volatile__( \
+ "1: ldl_l %0,%1\n" \
+ " " #op "l %0,%2,%0\n" \
+ " stl_c %0,%1\n" \
+ " beq %0,2f\n" \
+ ".subsection 2\n" \
+ "2: br 1b\n" \
+ ".previous" \
+ :"=&r" (temp), "=m" (v->counter) \
+ :"Ir" (i), "m" (v->counter)); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ long temp, result; \
+ smp_mb(); \
+ __asm__ __volatile__( \
+ "1: ldl_l %0,%1\n" \
+ " " #op "l %0,%3,%2\n" \
+ " " #op "l %0,%3,%0\n" \
+ " stl_c %0,%1\n" \
+ " beq %0,2f\n" \
+ ".subsection 2\n" \
+ "2: br 1b\n" \
+ ".previous" \
+ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
+ :"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_mb(); \
+ return result; \
}
-static __inline__ void atomic64_sub(long i, atomic64_t * v)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " subq %0,%2,%0\n"
- " stq_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter)
- :"Ir" (i), "m" (v->counter));
-}
-
-
-/*
- * Same as above, but return the result value
- */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- long temp, result;
- smp_mb();
- __asm__ __volatile__(
- "1: ldl_l %0,%1\n"
- " addl %0,%3,%2\n"
- " addl %0,%3,%0\n"
- " stl_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter), "=&r" (result)
- :"Ir" (i), "m" (v->counter) : "memory");
- smp_mb();
- return result;
+#define ATOMIC64_OP(op) \
+static __inline__ void atomic64_##op(long i, atomic64_t * v) \
+{ \
+ unsigned long temp; \
+ __asm__ __volatile__( \
+ "1: ldq_l %0,%1\n" \
+ " " #op "q %0,%2,%0\n" \
+ " stq_c %0,%1\n" \
+ " beq %0,2f\n" \
+ ".subsection 2\n" \
+ "2: br 1b\n" \
+ ".previous" \
+ :"=&r" (temp), "=m" (v->counter) \
+ :"Ir" (i), "m" (v->counter)); \
+} \
+
+#define ATOMIC64_OP_RETURN(op) \
+static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+{ \
+ long temp, result; \
+ smp_mb(); \
+ __asm__ __volatile__( \
+ "1: ldq_l %0,%1\n" \
+ " " #op "q %0,%3,%2\n" \
+ " " #op "q %0,%3,%0\n" \
+ " stq_c %0,%1\n" \
+ " beq %0,2f\n" \
+ ".subsection 2\n" \
+ "2: br 1b\n" \
+ ".previous" \
+ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
+ :"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_mb(); \
+ return result; \
}
-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
-{
- long temp, result;
- smp_mb();
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " addq %0,%3,%2\n"
- " addq %0,%3,%0\n"
- " stq_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter), "=&r" (result)
- :"Ir" (i), "m" (v->counter) : "memory");
- smp_mb();
- return result;
-}
+#define ATOMIC_OPS(opg) \
+ ATOMIC_OP(opg) \
+ ATOMIC_OP_RETURN(opg) \
+ ATOMIC64_OP(opg) \
+ ATOMIC64_OP_RETURN(opg)
-static __inline__ long atomic_sub_return(int i, atomic_t * v)
-{
- long temp, result;
- smp_mb();
- __asm__ __volatile__(
- "1: ldl_l %0,%1\n"
- " subl %0,%3,%2\n"
- " subl %0,%3,%0\n"
- " stl_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter), "=&r" (result)
- :"Ir" (i), "m" (v->counter) : "memory");
- smp_mb();
- return result;
-}
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
-{
- long temp, result;
- smp_mb();
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " subq %0,%3,%2\n"
- " subq %0,%3,%0\n"
- " stq_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter), "=&r" (result)
- :"Ir" (i), "m" (v->counter) : "memory");
- smp_mb();
- return result;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 1402fcc11c2c..f9c732e18284 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -446,7 +446,8 @@ struct procfs_args {
* unhappy with OSF UFS. [CHECKME]
*/
static int
-osf_ufs_mount(const char *dirname, struct ufs_args __user *args, int flags)
+osf_ufs_mount(const char __user *dirname,
+ struct ufs_args __user *args, int flags)
{
int retval;
struct cdfs_args tmp;
@@ -466,7 +467,8 @@ osf_ufs_mount(const char *dirname, struct ufs_args __user *args, int flags)
}
static int
-osf_cdfs_mount(const char *dirname, struct cdfs_args __user *args, int flags)
+osf_cdfs_mount(const char __user *dirname,
+ struct cdfs_args __user *args, int flags)
{
int retval;
struct cdfs_args tmp;
@@ -486,7 +488,8 @@ osf_cdfs_mount(const char *dirname, struct cdfs_args __user *args, int flags)
}
static int
-osf_procfs_mount(const char *dirname, struct procfs_args __user *args, int flags)
+osf_procfs_mount(const char __user *dirname,
+ struct procfs_args __user *args, int flags)
{
struct procfs_args tmp;
@@ -500,28 +503,22 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path,
int, flag, void __user *, data)
{
int retval;
- struct filename *name;
- name = getname(path);
- retval = PTR_ERR(name);
- if (IS_ERR(name))
- goto out;
switch (typenr) {
case 1:
- retval = osf_ufs_mount(name->name, data, flag);
+ retval = osf_ufs_mount(path, data, flag);
break;
case 6:
- retval = osf_cdfs_mount(name->name, data, flag);
+ retval = osf_cdfs_mount(path, data, flag);
break;
case 9:
- retval = osf_procfs_mount(name->name, data, flag);
+ retval = osf_procfs_mount(path, data, flag);
break;
default:
retval = -EINVAL;
printk("osf_mount(%ld, %x)\n", typenr, flag);
}
- putname(name);
- out:
+
return retval;
}
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 83f03ca6caf6..173f303a868f 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -25,79 +25,36 @@
#define atomic_set(v, i) (((v)->counter) = (i))
-static inline void atomic_add(int i, atomic_t *v)
-{
- unsigned int temp;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " add %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp) /* Early clobber, to prevent reg reuse */
- : "r"(&v->counter), "ir"(i)
- : "cc");
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- unsigned int temp;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " sub %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp)
- : "r"(&v->counter), "ir"(i)
- : "cc");
-}
-
-/* add and also return the new value */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned int temp;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " add %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp)
- : "r"(&v->counter), "ir"(i)
- : "cc");
-
- return temp;
-}
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned int temp;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " sub %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp)
- : "r"(&v->counter), "ir"(i)
- : "cc");
-
- return temp;
-}
-
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned int temp;
-
- __asm__ __volatile__(
- "1: llock %0, [%1] \n"
- " bic %0, %0, %2 \n"
- " scond %0, [%1] \n"
- " bnz 1b \n"
- : "=&r"(temp)
- : "r"(addr), "ir"(mask)
- : "cc");
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned int temp; \
+ \
+ __asm__ __volatile__( \
+ "1: llock %0, [%1] \n" \
+ " " #asm_op " %0, %0, %2 \n" \
+ " scond %0, [%1] \n" \
+ " bnz 1b \n" \
+ : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
+ : "r"(&v->counter), "ir"(i) \
+ : "cc"); \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned int temp; \
+ \
+ __asm__ __volatile__( \
+ "1: llock %0, [%1] \n" \
+ " " #asm_op " %0, %0, %2 \n" \
+ " scond %0, [%1] \n" \
+ " bnz 1b \n" \
+ : "=&r"(temp) \
+ : "r"(&v->counter), "ir"(i) \
+ : "cc"); \
+ \
+ return temp; \
}
#else /* !CONFIG_ARC_HAS_LLSC */
@@ -126,6 +83,7 @@ static inline void atomic_set(atomic_t *v, int i)
v->counter = i;
atomic_ops_unlock(flags);
}
+
#endif
/*
@@ -133,62 +91,46 @@ static inline void atomic_set(atomic_t *v, int i)
* Locking would change to irq-disabling only (UP) and spinlocks (SMP)
*/
-static inline void atomic_add(int i, atomic_t *v)
-{
- unsigned long flags;
-
- atomic_ops_lock(flags);
- v->counter += i;
- atomic_ops_unlock(flags);
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ atomic_ops_lock(flags); \
+ v->counter c_op i; \
+ atomic_ops_unlock(flags); \
}
-static inline void atomic_sub(int i, atomic_t *v)
-{
- unsigned long flags;
-
- atomic_ops_lock(flags);
- v->counter -= i;
- atomic_ops_unlock(flags);
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ unsigned long temp; \
+ \
+ atomic_ops_lock(flags); \
+ temp = v->counter; \
+ temp c_op i; \
+ v->counter = temp; \
+ atomic_ops_unlock(flags); \
+ \
+ return temp; \
}
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long flags;
- unsigned long temp;
-
- atomic_ops_lock(flags);
- temp = v->counter;
- temp += i;
- v->counter = temp;
- atomic_ops_unlock(flags);
-
- return temp;
-}
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long flags;
- unsigned long temp;
-
- atomic_ops_lock(flags);
- temp = v->counter;
- temp -= i;
- v->counter = temp;
- atomic_ops_unlock(flags);
+#endif /* !CONFIG_ARC_HAS_LLSC */
- return temp;
-}
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op)
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned long flags;
+ATOMIC_OPS(add, +=, add)
+ATOMIC_OPS(sub, -=, sub)
+ATOMIC_OP(and, &=, and)
- atomic_ops_lock(flags);
- *addr &= ~mask;
- atomic_ops_unlock(flags);
-}
+#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
-#endif /* !CONFIG_ARC_HAS_LLSC */
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
/**
* __atomic_add_unless - add unless the number is a given value
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 18f392f8b744..89c4b5ccc68d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1779,7 +1779,7 @@ config XEN_DOM0
depends on XEN
config XEN
- bool "Xen guest support on ARM (EXPERIMENTAL)"
+ bool "Xen guest support on ARM"
depends on ARM && AEABI && OF
depends on CPU_V7 && !CPU_V6
depends on !GENERIC_ATOMIC64
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index 70a559cf1a3d..4f2df61c1cfc 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -69,7 +69,8 @@
samsung,dw-mshc-ddr-timing = <1 2>;
pinctrl-names = "default";
pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus4>;
- vmmc-supply = <&ldo10_reg>;
+ vmmc-supply = <&ldo19_reg>;
+ vqmmc-supply = <&ldo13_reg>;
bus-width = <4>;
cap-sd-highspeed;
};
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 3040359094d9..e22c11970b7b 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -27,7 +27,7 @@
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
#if __LINUX_ARM_ARCH__ >= 6
@@ -37,84 +37,47 @@
* store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens.
*/
-static inline void atomic_add(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- prefetchw(&v->counter);
- __asm__ __volatile__("@ atomic_add\n"
-"1: ldrex %0, [%3]\n"
-" add %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
-}
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- smp_mb();
- prefetchw(&v->counter);
-
- __asm__ __volatile__("@ atomic_add_return\n"
-"1: ldrex %0, [%3]\n"
-" add %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
-
- smp_mb();
-
- return result;
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- prefetchw(&v->counter);
- __asm__ __volatile__("@ atomic_sub\n"
-"1: ldrex %0, [%3]\n"
-" sub %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
-}
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- smp_mb();
- prefetchw(&v->counter);
-
- __asm__ __volatile__("@ atomic_sub_return\n"
-"1: ldrex %0, [%3]\n"
-" sub %0, %0, %4\n"
-" strex %1, %0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "Ir" (i)
- : "cc");
-
- smp_mb();
-
- return result;
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ prefetchw(&v->counter); \
+ __asm__ __volatile__("@ atomic_" #op "\n" \
+"1: ldrex %0, [%3]\n" \
+" " #asm_op " %0, %0, %4\n" \
+" strex %1, %0, [%3]\n" \
+" teq %1, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "Ir" (i) \
+ : "cc"); \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ smp_mb(); \
+ prefetchw(&v->counter); \
+ \
+ __asm__ __volatile__("@ atomic_" #op "_return\n" \
+"1: ldrex %0, [%3]\n" \
+" " #asm_op " %0, %0, %4\n" \
+" strex %1, %0, [%3]\n" \
+" teq %1, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "Ir" (i) \
+ : "cc"); \
+ \
+ smp_mb(); \
+ \
+ return result; \
}
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
@@ -174,33 +137,29 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#error SMP not supported on pre-ARMv6 CPUs
#endif
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int val;
-
- raw_local_irq_save(flags);
- val = v->counter;
- v->counter = val += i;
- raw_local_irq_restore(flags);
-
- return val;
-}
-#define atomic_add(i, v) (void) atomic_add_return(i, v)
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int val;
-
- raw_local_irq_save(flags);
- val = v->counter;
- v->counter = val -= i;
- raw_local_irq_restore(flags);
-
- return val;
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int val; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ val = v->counter; \
+ raw_local_irq_restore(flags); \
+ \
+ return val; \
}
-#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
@@ -228,6 +187,17 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#endif /* __LINUX_ARM_ARCH__ */
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op)
+
+ATOMIC_OPS(add, +=, add)
+ATOMIC_OPS(sub, -=, sub)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_inc(v) atomic_add(1, v)
@@ -300,89 +270,60 @@ static inline void atomic64_set(atomic64_t *v, long long i)
}
#endif
-static inline void atomic64_add(long long i, atomic64_t *v)
-{
- long long result;
- unsigned long tmp;
-
- prefetchw(&v->counter);
- __asm__ __volatile__("@ atomic64_add\n"
-"1: ldrexd %0, %H0, [%3]\n"
-" adds %Q0, %Q0, %Q4\n"
-" adc %R0, %R0, %R4\n"
-" strexd %1, %0, %H0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "r" (i)
- : "cc");
-}
-
-static inline long long atomic64_add_return(long long i, atomic64_t *v)
-{
- long long result;
- unsigned long tmp;
-
- smp_mb();
- prefetchw(&v->counter);
-
- __asm__ __volatile__("@ atomic64_add_return\n"
-"1: ldrexd %0, %H0, [%3]\n"
-" adds %Q0, %Q0, %Q4\n"
-" adc %R0, %R0, %R4\n"
-" strexd %1, %0, %H0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "r" (i)
- : "cc");
-
- smp_mb();
-
- return result;
-}
-
-static inline void atomic64_sub(long long i, atomic64_t *v)
-{
- long long result;
- unsigned long tmp;
-
- prefetchw(&v->counter);
- __asm__ __volatile__("@ atomic64_sub\n"
-"1: ldrexd %0, %H0, [%3]\n"
-" subs %Q0, %Q0, %Q4\n"
-" sbc %R0, %R0, %R4\n"
-" strexd %1, %0, %H0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "r" (i)
- : "cc");
+#define ATOMIC64_OP(op, op1, op2) \
+static inline void atomic64_##op(long long i, atomic64_t *v) \
+{ \
+ long long result; \
+ unsigned long tmp; \
+ \
+ prefetchw(&v->counter); \
+ __asm__ __volatile__("@ atomic64_" #op "\n" \
+"1: ldrexd %0, %H0, [%3]\n" \
+" " #op1 " %Q0, %Q0, %Q4\n" \
+" " #op2 " %R0, %R0, %R4\n" \
+" strexd %1, %0, %H0, [%3]\n" \
+" teq %1, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "r" (i) \
+ : "cc"); \
+} \
+
+#define ATOMIC64_OP_RETURN(op, op1, op2) \
+static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
+{ \
+ long long result; \
+ unsigned long tmp; \
+ \
+ smp_mb(); \
+ prefetchw(&v->counter); \
+ \
+ __asm__ __volatile__("@ atomic64_" #op "_return\n" \
+"1: ldrexd %0, %H0, [%3]\n" \
+" " #op1 " %Q0, %Q0, %Q4\n" \
+" " #op2 " %R0, %R0, %R4\n" \
+" strexd %1, %0, %H0, [%3]\n" \
+" teq %1, #0\n" \
+" bne 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "r" (i) \
+ : "cc"); \
+ \
+ smp_mb(); \
+ \
+ return result; \
}
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
-{
- long long result;
- unsigned long tmp;
-
- smp_mb();
- prefetchw(&v->counter);
-
- __asm__ __volatile__("@ atomic64_sub_return\n"
-"1: ldrexd %0, %H0, [%3]\n"
-" subs %Q0, %Q0, %Q4\n"
-" sbc %R0, %R0, %R4\n"
-" strexd %1, %0, %H0, [%3]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
- : "r" (&v->counter), "r" (i)
- : "cc");
+#define ATOMIC64_OPS(op, op1, op2) \
+ ATOMIC64_OP(op, op1, op2) \
+ ATOMIC64_OP_RETURN(op, op1, op2)
- smp_mb();
+ATOMIC64_OPS(add, adds, adc)
+ATOMIC64_OPS(sub, subs, sbc)
- return result;
-}
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
long long new)
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index c45b61a4b4a5..85738b200023 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -265,22 +265,6 @@ extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs);
-static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
-}
-
-static inline void dma_free_writecombine(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
-}
-
/*
* This can be called during early boot to increase the size of the atomic
* coherent DMA pool above the default value of 256KiB. It must be called
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index a0a691d1cbee..fe972a2f3df3 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -114,18 +114,13 @@ void soft_restart(unsigned long addr)
BUG();
}
-static void null_restart(enum reboot_mode reboot_mode, const char *cmd)
-{
-}
-
/*
* Function pointers to optional machine specific functions
*/
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
-void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd) = null_restart;
-EXPORT_SYMBOL_GPL(arm_pm_restart);
+void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
/*
* This is our default idle handler.
@@ -230,7 +225,10 @@ void machine_restart(char *cmd)
local_irq_disable();
smp_send_stop();
- arm_pm_restart(reboot_mode, cmd);
+ if (arm_pm_restart)
+ arm_pm_restart(reboot_mode, cmd);
+ else
+ do_kernel_restart(cmd);
/* Give a grace period for failure to restart of 1s */
mdelay(1000);
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 0c27ed6f3f23..5e772a21ab97 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -933,8 +933,13 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
current_thread_info()->syscall = scno;
/* Do the secure computing check first; failures should be fast. */
- if (secure_computing(scno) == -1)
+#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
+ if (secure_computing() == -1)
return -1;
+#else
+ /* XXX: remove this once OABI gets fixed */
+ secure_computing_strict(scno);
+#endif
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index e35d880f9773..89cfdd6e50cb 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -42,7 +42,7 @@
*/
static DEFINE_PER_CPU(unsigned long, cpu_scale);
-unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
+unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
{
return per_cpu(cpu_scale, cpu);
}
@@ -166,7 +166,7 @@ static void update_cpu_capacity(unsigned int cpu)
set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
printk(KERN_INFO "CPU%u: update cpu_capacity %lu\n",
- cpu, arch_scale_freq_capacity(NULL, cpu));
+ cpu, arch_scale_cpu_capacity(NULL, cpu));
}
#else
diff --git a/arch/arm/mach-shmobile/board-koelsch.c b/arch/arm/mach-shmobile/board-koelsch.c
index b7d5bc7659cd..126a8b4ec491 100644
--- a/arch/arm/mach-shmobile/board-koelsch.c
+++ b/arch/arm/mach-shmobile/board-koelsch.c
@@ -331,7 +331,6 @@ SDHI_REGULATOR(2, RCAR_GP_PIN(7, 19), RCAR_GP_PIN(2, 26));
static struct sh_mobile_sdhi_info sdhi0_info __initdata = {
.tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_POWER_OFF_CARD,
- .tmio_caps2 = MMC_CAP2_NO_MULTI_READ,
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
};
@@ -344,7 +343,6 @@ static struct resource sdhi0_resources[] __initdata = {
static struct sh_mobile_sdhi_info sdhi1_info __initdata = {
.tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_POWER_OFF_CARD,
- .tmio_caps2 = MMC_CAP2_NO_MULTI_READ,
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
};
@@ -357,7 +355,6 @@ static struct resource sdhi1_resources[] __initdata = {
static struct sh_mobile_sdhi_info sdhi2_info __initdata = {
.tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_POWER_OFF_CARD,
- .tmio_caps2 = MMC_CAP2_NO_MULTI_READ,
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT |
TMIO_MMC_WRPROTECT_DISABLE,
};
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index e1d8215da0b0..f5a98e2942b3 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -630,7 +630,6 @@ static void __init lager_add_rsnd_device(void)
static struct sh_mobile_sdhi_info sdhi0_info __initdata = {
.tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_POWER_OFF_CARD,
- .tmio_caps2 = MMC_CAP2_NO_MULTI_READ,
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT |
TMIO_MMC_WRPROTECT_DISABLE,
};
@@ -644,7 +643,6 @@ static struct resource sdhi0_resources[] __initdata = {
static struct sh_mobile_sdhi_info sdhi2_info __initdata = {
.tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_POWER_OFF_CARD,
- .tmio_caps2 = MMC_CAP2_NO_MULTI_READ,
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT |
TMIO_MMC_WRPROTECT_DISABLE,
};
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c49ca4c738bb..ac9afde76dea 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -349,7 +349,7 @@ config XEN_DOM0
depends on XEN
config XEN
- bool "Xen guest support on ARM64 (EXPERIMENTAL)"
+ bool "Xen guest support on ARM64"
depends on ARM64 && OF
select SWIOTLB_XEN
help
diff --git a/arch/arm64/boot/dts/apm-mustang.dts b/arch/arm64/boot/dts/apm-mustang.dts
index f64900052f4e..8eb6d94c7851 100644
--- a/arch/arm64/boot/dts/apm-mustang.dts
+++ b/arch/arm64/boot/dts/apm-mustang.dts
@@ -40,3 +40,7 @@
&menet {
status = "ok";
};
+
+&xgenet {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
index 4f6d04d52cca..87d3205e98d5 100644
--- a/arch/arm64/boot/dts/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -176,6 +176,16 @@
clock-output-names = "menetclk";
};
+ xge0clk: xge0clk@1f61c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f61c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ csr-mask = <0x3>;
+ clock-output-names = "xge0clk";
+ };
+
sataphy1clk: sataphy1clk@1f21c000 {
compatible = "apm,xgene-device-clock";
#clock-cells = <1>;
@@ -585,7 +595,8 @@
interrupts = <0x0 0x3c 0x4>;
dma-coherent;
clocks = <&menetclk 0>;
- local-mac-address = [00 01 73 00 00 01];
+ /* mac address will be overwritten by the bootloader */
+ local-mac-address = [00 00 00 00 00 00];
phy-connection-type = "rgmii";
phy-handle = <&menetphy>;
mdio {
@@ -600,12 +611,26 @@
};
};
+ xgenet: ethernet@1f610000 {
+ compatible = "apm,xgene-enet";
+ status = "disabled";
+ reg = <0x0 0x1f610000 0x0 0xd100>,
+ <0x0 0x1f600000 0x0 0X400>,
+ <0x0 0x18000000 0x0 0X200>;
+ reg-names = "enet_csr", "ring_csr", "ring_cmd";
+ interrupts = <0x0 0x60 0x4>;
+ dma-coherent;
+ clocks = <&xge0clk 0>;
+ /* mac address will be overwritten by the bootloader */
+ local-mac-address = [00 00 00 00 00 00];
+ phy-connection-type = "xgmii";
+ };
+
rng: rng@10520000 {
compatible = "apm,xgene-rng";
reg = <0x0 0x10520000 0x0 0x100>;
interrupts = <0x0 0x41 0x4>;
clocks = <&rngpkaclk 0>;
};
-
};
};
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 65f1569ac96e..7047051ded40 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -35,7 +35,7 @@
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
/*
@@ -43,69 +43,51 @@
* store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens.
*/
-static inline void atomic_add(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- asm volatile("// atomic_add\n"
-"1: ldxr %w0, %2\n"
-" add %w0, %w0, %w3\n"
-" stxr %w1, %w0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i));
-}
-
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- asm volatile("// atomic_add_return\n"
-"1: ldxr %w0, %2\n"
-" add %w0, %w0, %w3\n"
-" stlxr %w1, %w0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "memory");
-
- smp_mb();
- return result;
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
- asm volatile("// atomic_sub\n"
-"1: ldxr %w0, %2\n"
-" sub %w0, %w0, %w3\n"
-" stxr %w1, %w0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i));
+#define ATOMIC_OP(op, asm_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ asm volatile("// atomic_" #op "\n" \
+"1: ldxr %w0, %2\n" \
+" " #asm_op " %w0, %w0, %w3\n" \
+" stxr %w1, %w0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i)); \
+} \
+
+#define ATOMIC_OP_RETURN(op, asm_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ asm volatile("// atomic_" #op "_return\n" \
+"1: ldxr %w0, %2\n" \
+" " #asm_op " %w0, %w0, %w3\n" \
+" stlxr %w1, %w0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i) \
+ : "memory"); \
+ \
+ smp_mb(); \
+ return result; \
}
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
+#define ATOMIC_OPS(op, asm_op) \
+ ATOMIC_OP(op, asm_op) \
+ ATOMIC_OP_RETURN(op, asm_op)
- asm volatile("// atomic_sub_return\n"
-"1: ldxr %w0, %2\n"
-" sub %w0, %w0, %w3\n"
-" stlxr %w1, %w0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "memory");
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
- smp_mb();
- return result;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
@@ -157,72 +139,53 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
*/
#define ATOMIC64_INIT(i) { (i) }
-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
+#define atomic64_read(v) ACCESS_ONCE((v)->counter)
#define atomic64_set(v,i) (((v)->counter) = (i))
-static inline void atomic64_add(u64 i, atomic64_t *v)
-{
- long result;
- unsigned long tmp;
-
- asm volatile("// atomic64_add\n"
-"1: ldxr %0, %2\n"
-" add %0, %0, %3\n"
-" stxr %w1, %0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i));
+#define ATOMIC64_OP(op, asm_op) \
+static inline void atomic64_##op(long i, atomic64_t *v) \
+{ \
+ long result; \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_" #op "\n" \
+"1: ldxr %0, %2\n" \
+" " #asm_op " %0, %0, %3\n" \
+" stxr %w1, %0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i)); \
+} \
+
+#define ATOMIC64_OP_RETURN(op, asm_op) \
+static inline long atomic64_##op##_return(long i, atomic64_t *v) \
+{ \
+ long result; \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_" #op "_return\n" \
+"1: ldxr %0, %2\n" \
+" " #asm_op " %0, %0, %3\n" \
+" stlxr %w1, %0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i) \
+ : "memory"); \
+ \
+ smp_mb(); \
+ return result; \
}
-static inline long atomic64_add_return(long i, atomic64_t *v)
-{
- long result;
- unsigned long tmp;
+#define ATOMIC64_OPS(op, asm_op) \
+ ATOMIC64_OP(op, asm_op) \
+ ATOMIC64_OP_RETURN(op, asm_op)
- asm volatile("// atomic64_add_return\n"
-"1: ldxr %0, %2\n"
-" add %0, %0, %3\n"
-" stlxr %w1, %0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "memory");
+ATOMIC64_OPS(add, add)
+ATOMIC64_OPS(sub, sub)
- smp_mb();
- return result;
-}
-
-static inline void atomic64_sub(u64 i, atomic64_t *v)
-{
- long result;
- unsigned long tmp;
-
- asm volatile("// atomic64_sub\n"
-"1: ldxr %0, %2\n"
-" sub %0, %0, %3\n"
-" stxr %w1, %0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i));
-}
-
-static inline long atomic64_sub_return(long i, atomic64_t *v)
-{
- long result;
- unsigned long tmp;
-
- asm volatile("// atomic64_sub_return\n"
-"1: ldxr %0, %2\n"
-" sub %0, %0, %3\n"
-" stlxr %w1, %0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "memory");
-
- smp_mb();
- return result;
-}
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
{
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 89f41f7d27dd..c3065dbc4fa2 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -72,7 +72,6 @@ void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-EXPORT_SYMBOL_GPL(arm_pm_restart);
/*
* This is our default idle handler.
@@ -154,6 +153,8 @@ void machine_restart(char *cmd)
/* Now call the architecture specific reboot code. */
if (arm_pm_restart)
arm_pm_restart(reboot_mode, cmd);
+ else
+ do_kernel_restart(cmd);
/*
* Whoops - the architecture was unable to reboot.
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index 0780f3f2415b..2d07ce1c5327 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -19,33 +19,46 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
+#define ATOMIC_OP_RETURN(op, asm_op, asm_con) \
+static inline int __atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int result; \
+ \
+ asm volatile( \
+ "/* atomic_" #op "_return */\n" \
+ "1: ssrf 5\n" \
+ " ld.w %0, %2\n" \
+ " " #asm_op " %0, %3\n" \
+ " stcond %1, %0\n" \
+ " brne 1b" \
+ : "=&r" (result), "=o" (v->counter) \
+ : "m" (v->counter), #asm_con (i) \
+ : "cc"); \
+ \
+ return result; \
+}
+
+ATOMIC_OP_RETURN(sub, sub, rKs21)
+ATOMIC_OP_RETURN(add, add, r)
+
+#undef ATOMIC_OP_RETURN
+
/*
- * atomic_sub_return - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
+ * Probably found the reason why we want to use sub with the signed 21-bit
+ * limit, it uses one less register than the add instruction that can add up to
+ * 32-bit values.
*
- * Atomically subtracts @i from @v. Returns the resulting value.
+ * Both instructions are 32-bit, to use a 16-bit instruction the immediate is
+ * very small; 4 bit.
+ *
+ * sub 32-bit, type IV, takes a register and subtracts a 21-bit immediate.
+ * add 32-bit, type II, adds two register values together.
*/
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- int result;
-
- asm volatile(
- "/* atomic_sub_return */\n"
- "1: ssrf 5\n"
- " ld.w %0, %2\n"
- " sub %0, %3\n"
- " stcond %1, %0\n"
- " brne 1b"
- : "=&r"(result), "=o"(v->counter)
- : "m"(v->counter), "rKs21"(i)
- : "cc");
-
- return result;
-}
+#define IS_21BIT_CONST(i) \
+ (__builtin_constant_p(i) && ((i) >= -1048575) && ((i) <= 1048576))
/*
* atomic_add_return - add integer to atomic variable
@@ -56,51 +69,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
- int result;
-
- if (__builtin_constant_p(i) && (i >= -1048575) && (i <= 1048576))
- result = atomic_sub_return(-i, v);
- else
- asm volatile(
- "/* atomic_add_return */\n"
- "1: ssrf 5\n"
- " ld.w %0, %1\n"
- " add %0, %3\n"
- " stcond %2, %0\n"
- " brne 1b"
- : "=&r"(result), "=o"(v->counter)
- : "m"(v->counter), "r"(i)
- : "cc", "memory");
+ if (IS_21BIT_CONST(i))
+ return __atomic_sub_return(-i, v);
- return result;
+ return __atomic_add_return(i, v);
}
/*
- * atomic_sub_unless - sub unless the number is a given value
+ * atomic_sub_return - subtract the atomic variable
+ * @i: integer value to subtract
* @v: pointer of type atomic_t
- * @a: the amount to subtract from v...
- * @u: ...unless v is equal to u.
*
- * Atomically subtract @a from @v, so long as it was not @u.
- * Returns the old value of @v.
-*/
-static inline void atomic_sub_unless(atomic_t *v, int a, int u)
+ * Atomically subtracts @i from @v. Returns the resulting value.
+ */
+static inline int atomic_sub_return(int i, atomic_t *v)
{
- int tmp;
+ if (IS_21BIT_CONST(i))
+ return __atomic_sub_return(i, v);
- asm volatile(
- "/* atomic_sub_unless */\n"
- "1: ssrf 5\n"
- " ld.w %0, %2\n"
- " cp.w %0, %4\n"
- " breq 1f\n"
- " sub %0, %3\n"
- " stcond %1, %0\n"
- " brne 1b\n"
- "1:"
- : "=&r"(tmp), "=o"(v->counter)
- : "m"(v->counter), "rKs21"(a), "rKs21"(u)
- : "cc", "memory");
+ return __atomic_add_return(-i, v);
}
/*
@@ -116,9 +103,21 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int tmp, old = atomic_read(v);
- if (__builtin_constant_p(a) && (a >= -1048575) && (a <= 1048576))
- atomic_sub_unless(v, -a, u);
- else {
+ if (IS_21BIT_CONST(a)) {
+ asm volatile(
+ "/* __atomic_sub_unless */\n"
+ "1: ssrf 5\n"
+ " ld.w %0, %2\n"
+ " cp.w %0, %4\n"
+ " breq 1f\n"
+ " sub %0, %3\n"
+ " stcond %1, %0\n"
+ " brne 1b\n"
+ "1:"
+ : "=&r"(tmp), "=o"(v->counter)
+ : "m"(v->counter), "rKs21"(-a), "rKs21"(u)
+ : "cc", "memory");
+ } else {
asm volatile(
"/* __atomic_add_unless */\n"
"1: ssrf 5\n"
@@ -137,6 +136,8 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return old;
}
+#undef IS_21BIT_CONST
+
/*
* atomic_sub_if_positive - conditionally subtract integer from atomic variable
* @i: integer value to subtract
diff --git a/arch/cris/arch-v10/drivers/sync_serial.c b/arch/cris/arch-v10/drivers/sync_serial.c
index 29eb02ab3f25..0f3983241e60 100644
--- a/arch/cris/arch-v10/drivers/sync_serial.c
+++ b/arch/cris/arch-v10/drivers/sync_serial.c
@@ -1086,7 +1086,6 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
}
local_irq_restore(flags);
schedule();
- set_current_state(TASK_RUNNING);
remove_wait_queue(&port->out_wait_q, &wait);
if (signal_pending(current))
return -EINTR;
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index bbb806b68838..5a149134cfb5 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -1089,7 +1089,6 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
}
schedule();
- set_current_state(TASK_RUNNING);
remove_wait_queue(&port->out_wait_q, &wait);
if (signal_pending(current))
diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h
index aa429baebaf9..279766a70664 100644
--- a/arch/cris/include/asm/atomic.h
+++ b/arch/cris/include/asm/atomic.h
@@ -17,48 +17,41 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
/* These should be written in asm but we do it in C for now. */
-static inline void atomic_add(int i, volatile atomic_t *v)
-{
- unsigned long flags;
- cris_atomic_save(v, flags);
- v->counter += i;
- cris_atomic_restore(v, flags);
+#define ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, volatile atomic_t *v) \
+{ \
+ unsigned long flags; \
+ cris_atomic_save(v, flags); \
+ v->counter c_op i; \
+ cris_atomic_restore(v, flags); \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, volatile atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int retval; \
+ cris_atomic_save(v, flags); \
+ retval = (v->counter c_op i); \
+ cris_atomic_restore(v, flags); \
+ return retval; \
}
-static inline void atomic_sub(int i, volatile atomic_t *v)
-{
- unsigned long flags;
- cris_atomic_save(v, flags);
- v->counter -= i;
- cris_atomic_restore(v, flags);
-}
+#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
-static inline int atomic_add_return(int i, volatile atomic_t *v)
-{
- unsigned long flags;
- int retval;
- cris_atomic_save(v, flags);
- retval = (v->counter += i);
- cris_atomic_restore(v, flags);
- return retval;
-}
+ATOMIC_OPS(add, +=)
+ATOMIC_OPS(sub, -=)
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
-static inline int atomic_sub_return(int i, volatile atomic_t *v)
-{
- unsigned long flags;
- int retval;
- cris_atomic_save(v, flags);
- retval = (v->counter -= i);
- cris_atomic_restore(v, flags);
- return retval;
-}
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
static inline int atomic_sub_and_test(int i, volatile atomic_t *v)
{
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index f6c3a1690101..102190a61d65 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -31,7 +31,7 @@
*/
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v, i) (((v)->counter) = (i))
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index de916b11bff5..93d07025f183 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -94,41 +94,47 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return __oldval;
}
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- int output;
-
- __asm__ __volatile__ (
- "1: %0 = memw_locked(%1);\n"
- " %0 = add(%0,%2);\n"
- " memw_locked(%1,P3)=%0;\n"
- " if !P3 jump 1b;\n"
- : "=&r" (output)
- : "r" (&v->counter), "r" (i)
- : "memory", "p3"
- );
- return output;
-
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ int output; \
+ \
+ __asm__ __volatile__ ( \
+ "1: %0 = memw_locked(%1);\n" \
+ " %0 = "#op "(%0,%2);\n" \
+ " memw_locked(%1,P3)=%0;\n" \
+ " if !P3 jump 1b;\n" \
+ : "=&r" (output) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int output; \
+ \
+ __asm__ __volatile__ ( \
+ "1: %0 = memw_locked(%1);\n" \
+ " %0 = "#op "(%0,%2);\n" \
+ " memw_locked(%1,P3)=%0;\n" \
+ " if !P3 jump 1b;\n" \
+ : "=&r" (output) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+ ); \
+ return output; \
}
-#define atomic_add(i, v) atomic_add_return(i, (v))
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- int output;
- __asm__ __volatile__ (
- "1: %0 = memw_locked(%1);\n"
- " %0 = sub(%0,%2);\n"
- " memw_locked(%1,P3)=%0\n"
- " if !P3 jump 1b;\n"
- : "=&r" (output)
- : "r" (&v->counter), "r" (i)
- : "memory", "p3"
- );
- return output;
-}
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
-#define atomic_sub(i, v) atomic_sub_return(i, (v))
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
/**
* __atomic_add_unless - add unless the number is a given value
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 0f8bf48dadf3..0bf03501fe5c 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -21,68 +21,100 @@
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic64_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
#define atomic64_set(v,i) (((v)->counter) = (i))
-static __inline__ int
-ia64_atomic_add (int i, atomic_t *v)
-{
- __s32 old, new;
- CMPXCHG_BUGCHECK_DECL
-
- do {
- CMPXCHG_BUGCHECK(v);
- old = atomic_read(v);
- new = old + i;
- } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
- return new;
+#define ATOMIC_OP(op, c_op) \
+static __inline__ int \
+ia64_atomic_##op (int i, atomic_t *v) \
+{ \
+ __s32 old, new; \
+ CMPXCHG_BUGCHECK_DECL \
+ \
+ do { \
+ CMPXCHG_BUGCHECK(v); \
+ old = atomic_read(v); \
+ new = old c_op i; \
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
+ return new; \
}
-static __inline__ long
-ia64_atomic64_add (__s64 i, atomic64_t *v)
-{
- __s64 old, new;
- CMPXCHG_BUGCHECK_DECL
-
- do {
- CMPXCHG_BUGCHECK(v);
- old = atomic64_read(v);
- new = old + i;
- } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
- return new;
-}
+ATOMIC_OP(add, +)
+ATOMIC_OP(sub, -)
-static __inline__ int
-ia64_atomic_sub (int i, atomic_t *v)
-{
- __s32 old, new;
- CMPXCHG_BUGCHECK_DECL
-
- do {
- CMPXCHG_BUGCHECK(v);
- old = atomic_read(v);
- new = old - i;
- } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
- return new;
-}
+#undef ATOMIC_OP
-static __inline__ long
-ia64_atomic64_sub (__s64 i, atomic64_t *v)
-{
- __s64 old, new;
- CMPXCHG_BUGCHECK_DECL
-
- do {
- CMPXCHG_BUGCHECK(v);
- old = atomic64_read(v);
- new = old - i;
- } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
- return new;
+#define atomic_add_return(i,v) \
+({ \
+ int __ia64_aar_i = (i); \
+ (__builtin_constant_p(i) \
+ && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
+ || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
+ || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
+ || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
+ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
+ : ia64_atomic_add(__ia64_aar_i, v); \
+})
+
+#define atomic_sub_return(i,v) \
+({ \
+ int __ia64_asr_i = (i); \
+ (__builtin_constant_p(i) \
+ && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
+ || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
+ || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
+ || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
+ ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
+ : ia64_atomic_sub(__ia64_asr_i, v); \
+})
+
+#define ATOMIC64_OP(op, c_op) \
+static __inline__ long \
+ia64_atomic64_##op (__s64 i, atomic64_t *v) \
+{ \
+ __s64 old, new; \
+ CMPXCHG_BUGCHECK_DECL \
+ \
+ do { \
+ CMPXCHG_BUGCHECK(v); \
+ old = atomic64_read(v); \
+ new = old c_op i; \
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
+ return new; \
}
+ATOMIC64_OP(add, +)
+ATOMIC64_OP(sub, -)
+
+#undef ATOMIC64_OP
+
+#define atomic64_add_return(i,v) \
+({ \
+ long __ia64_aar_i = (i); \
+ (__builtin_constant_p(i) \
+ && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
+ || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
+ || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
+ || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
+ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
+ : ia64_atomic64_add(__ia64_aar_i, v); \
+})
+
+#define atomic64_sub_return(i,v) \
+({ \
+ long __ia64_asr_i = (i); \
+ (__builtin_constant_p(i) \
+ && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
+ || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
+ || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
+ || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
+ ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
+ : ia64_atomic64_sub(__ia64_asr_i, v); \
+})
+
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -123,30 +155,6 @@ static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-#define atomic_add_return(i,v) \
-({ \
- int __ia64_aar_i = (i); \
- (__builtin_constant_p(i) \
- && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
- || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
- || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
- || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
- ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
- : ia64_atomic_add(__ia64_aar_i, v); \
-})
-
-#define atomic64_add_return(i,v) \
-({ \
- long __ia64_aar_i = (i); \
- (__builtin_constant_p(i) \
- && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
- || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
- || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
- || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
- ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
- : ia64_atomic64_add(__ia64_aar_i, v); \
-})
-
/*
* Atomically add I to V and return TRUE if the resulting value is
* negative.
@@ -163,30 +171,6 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
return atomic64_add_return(i, v) < 0;
}
-#define atomic_sub_return(i,v) \
-({ \
- int __ia64_asr_i = (i); \
- (__builtin_constant_p(i) \
- && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
- || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
- || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
- || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
- ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
- : ia64_atomic_sub(__ia64_asr_i, v); \
-})
-
-#define atomic64_sub_return(i,v) \
-({ \
- long __ia64_asr_i = (i); \
- (__builtin_constant_p(i) \
- && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
- || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
- || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
- || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
- ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
- : ia64_atomic64_sub(__ia64_asr_i, v); \
-})
-
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
@@ -199,13 +183,13 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
-#define atomic_add(i,v) atomic_add_return((i), (v))
-#define atomic_sub(i,v) atomic_sub_return((i), (v))
+#define atomic_add(i,v) (void)atomic_add_return((i), (v))
+#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
#define atomic_inc(v) atomic_add(1, (v))
#define atomic_dec(v) atomic_sub(1, (v))
-#define atomic64_add(i,v) atomic64_add_return((i), (v))
-#define atomic64_sub(i,v) atomic64_sub_return((i), (v))
+#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
+#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
#define atomic64_inc(v) atomic64_add(1, (v))
#define atomic64_dec(v) atomic64_sub(1, (v))
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index c7367130ab14..ce53c50d0ba4 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -19,7 +19,6 @@
#include <asm/ptrace.h>
#include <asm/ustack.h>
-#define __ARCH_WANT_UNLOCKED_CTXSW
#define ARCH_HAS_PREFETCH_SWITCH_STACK
#define IA64_NUM_PHYS_STACK_REG 96
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 8ad0ed4182a5..31bb74adba08 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -28,7 +28,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
/**
* atomic_set - set atomic variable
@@ -39,85 +39,64 @@
*/
#define atomic_set(v,i) (((v)->counter) = (i))
-/**
- * atomic_add_return - add integer to atomic variable and return it
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and return (@i + @v).
- */
-static __inline__ int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int result;
-
- local_irq_save(flags);
- __asm__ __volatile__ (
- "# atomic_add_return \n\t"
- DCACHE_CLEAR("%0", "r4", "%1")
- M32R_LOCK" %0, @%1; \n\t"
- "add %0, %2; \n\t"
- M32R_UNLOCK" %0, @%1; \n\t"
- : "=&r" (result)
- : "r" (&v->counter), "r" (i)
- : "memory"
#ifdef CONFIG_CHIP_M32700_TS1
- , "r4"
-#endif /* CONFIG_CHIP_M32700_TS1 */
- );
- local_irq_restore(flags);
-
- return result;
+#define __ATOMIC_CLOBBER , "r4"
+#else
+#define __ATOMIC_CLOBBER
+#endif
+
+#define ATOMIC_OP(op) \
+static __inline__ void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int result; \
+ \
+ local_irq_save(flags); \
+ __asm__ __volatile__ ( \
+ "# atomic_" #op " \n\t" \
+ DCACHE_CLEAR("%0", "r4", "%1") \
+ M32R_LOCK" %0, @%1; \n\t" \
+ #op " %0, %2; \n\t" \
+ M32R_UNLOCK" %0, @%1; \n\t" \
+ : "=&r" (result) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory" \
+ __ATOMIC_CLOBBER \
+ ); \
+ local_irq_restore(flags); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int result; \
+ \
+ local_irq_save(flags); \
+ __asm__ __volatile__ ( \
+ "# atomic_" #op "_return \n\t" \
+ DCACHE_CLEAR("%0", "r4", "%1") \
+ M32R_LOCK" %0, @%1; \n\t" \
+ #op " %0, %2; \n\t" \
+ M32R_UNLOCK" %0, @%1; \n\t" \
+ : "=&r" (result) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory" \
+ __ATOMIC_CLOBBER \
+ ); \
+ local_irq_restore(flags); \
+ \
+ return result; \
}
-/**
- * atomic_sub_return - subtract integer from atomic variable and return it
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and return (@v - @i).
- */
-static __inline__ int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int result;
-
- local_irq_save(flags);
- __asm__ __volatile__ (
- "# atomic_sub_return \n\t"
- DCACHE_CLEAR("%0", "r4", "%1")
- M32R_LOCK" %0, @%1; \n\t"
- "sub %0, %2; \n\t"
- M32R_UNLOCK" %0, @%1; \n\t"
- : "=&r" (result)
- : "r" (&v->counter), "r" (i)
- : "memory"
-#ifdef CONFIG_CHIP_M32700_TS1
- , "r4"
-#endif /* CONFIG_CHIP_M32700_TS1 */
- );
- local_irq_restore(flags);
-
- return result;
-}
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
-/**
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v.
- */
-#define atomic_add(i,v) ((void) atomic_add_return((i), (v)))
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
-/**
- * atomic_sub - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v.
- */
-#define atomic_sub(i,v) ((void) atomic_sub_return((i), (v)))
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
/**
* atomic_sub_and_test - subtract value from variable and test result
@@ -151,9 +130,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
: "=&r" (result)
: "r" (&v->counter)
: "memory"
-#ifdef CONFIG_CHIP_M32700_TS1
- , "r4"
-#endif /* CONFIG_CHIP_M32700_TS1 */
+ __ATOMIC_CLOBBER
);
local_irq_restore(flags);
@@ -181,9 +158,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
: "=&r" (result)
: "r" (&v->counter)
: "memory"
-#ifdef CONFIG_CHIP_M32700_TS1
- , "r4"
-#endif /* CONFIG_CHIP_M32700_TS1 */
+ __ATOMIC_CLOBBER
);
local_irq_restore(flags);
@@ -280,9 +255,7 @@ static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
: "=&r" (tmp)
: "r" (addr), "r" (~mask)
: "memory"
-#ifdef CONFIG_CHIP_M32700_TS1
- , "r5"
-#endif /* CONFIG_CHIP_M32700_TS1 */
+ __ATOMIC_CLOBBER
);
local_irq_restore(flags);
}
@@ -302,9 +275,7 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
: "=&r" (tmp)
: "r" (addr), "r" (mask)
: "memory"
-#ifdef CONFIG_CHIP_M32700_TS1
- , "r5"
-#endif /* CONFIG_CHIP_M32700_TS1 */
+ __ATOMIC_CLOBBER
);
local_irq_restore(flags);
}
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 55695212a2ae..e85f047fb072 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -17,7 +17,7 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
/*
@@ -30,16 +30,57 @@
#define ASM_DI "di"
#endif
-static inline void atomic_add(int i, atomic_t *v)
-{
- __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
+#define ATOMIC_OP(op, c_op, asm_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
+} \
+
+#ifdef CONFIG_RMW_INSNS
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int t, tmp; \
+ \
+ __asm__ __volatile__( \
+ "1: movel %2,%1\n" \
+ " " #asm_op "l %3,%1\n" \
+ " casl %2,%1,%0\n" \
+ " jne 1b" \
+ : "+m" (*v), "=&d" (t), "=&d" (tmp) \
+ : "g" (i), "2" (atomic_read(v))); \
+ return t; \
}
-static inline void atomic_sub(int i, atomic_t *v)
-{
- __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
+#else
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static inline int atomic_##op##_return(int i, atomic_t * v) \
+{ \
+ unsigned long flags; \
+ int t; \
+ \
+ local_irq_save(flags); \
+ t = (v->counter c_op i); \
+ local_irq_restore(flags); \
+ \
+ return t; \
}
+#endif /* CONFIG_RMW_INSNS */
+
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op)
+
+ATOMIC_OPS(add, +=, add)
+ATOMIC_OPS(sub, -=, sub)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
static inline void atomic_inc(atomic_t *v)
{
__asm__ __volatile__("addql #1,%0" : "+m" (*v));
@@ -76,67 +117,11 @@ static inline int atomic_inc_and_test(atomic_t *v)
#ifdef CONFIG_RMW_INSNS
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- int t, tmp;
-
- __asm__ __volatile__(
- "1: movel %2,%1\n"
- " addl %3,%1\n"
- " casl %2,%1,%0\n"
- " jne 1b"
- : "+m" (*v), "=&d" (t), "=&d" (tmp)
- : "g" (i), "2" (atomic_read(v)));
- return t;
-}
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- int t, tmp;
-
- __asm__ __volatile__(
- "1: movel %2,%1\n"
- " subl %3,%1\n"
- " casl %2,%1,%0\n"
- " jne 1b"
- : "+m" (*v), "=&d" (t), "=&d" (tmp)
- : "g" (i), "2" (atomic_read(v)));
- return t;
-}
-
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#else /* !CONFIG_RMW_INSNS */
-static inline int atomic_add_return(int i, atomic_t * v)
-{
- unsigned long flags;
- int t;
-
- local_irq_save(flags);
- t = atomic_read(v);
- t += i;
- atomic_set(v, t);
- local_irq_restore(flags);
-
- return t;
-}
-
-static inline int atomic_sub_return(int i, atomic_t * v)
-{
- unsigned long flags;
- int t;
-
- local_irq_save(flags);
- t = atomic_read(v);
- t -= i;
- atomic_set(v, t);
- local_irq_restore(flags);
-
- return t;
-}
-
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
unsigned long flags;
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
index d2e60a18986c..948d8688643c 100644
--- a/arch/metag/include/asm/atomic_lnkget.h
+++ b/arch/metag/include/asm/atomic_lnkget.h
@@ -27,85 +27,56 @@ static inline int atomic_read(const atomic_t *v)
return temp;
}
-static inline void atomic_add(int i, atomic_t *v)
-{
- int temp;
-
- asm volatile (
- "1: LNKGETD %0, [%1]\n"
- " ADD %0, %0, %2\n"
- " LNKSETD [%1], %0\n"
- " DEFR %0, TXSTAT\n"
- " ANDT %0, %0, #HI(0x3f000000)\n"
- " CMPT %0, #HI(0x02000000)\n"
- " BNZ 1b\n"
- : "=&d" (temp)
- : "da" (&v->counter), "bd" (i)
- : "cc");
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ int temp; \
+ \
+ asm volatile ( \
+ "1: LNKGETD %0, [%1]\n" \
+ " " #op " %0, %0, %2\n" \
+ " LNKSETD [%1], %0\n" \
+ " DEFR %0, TXSTAT\n" \
+ " ANDT %0, %0, #HI(0x3f000000)\n" \
+ " CMPT %0, #HI(0x02000000)\n" \
+ " BNZ 1b\n" \
+ : "=&d" (temp) \
+ : "da" (&v->counter), "bd" (i) \
+ : "cc"); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int result, temp; \
+ \
+ smp_mb(); \
+ \
+ asm volatile ( \
+ "1: LNKGETD %1, [%2]\n" \
+ " " #op " %1, %1, %3\n" \
+ " LNKSETD [%2], %1\n" \
+ " DEFR %0, TXSTAT\n" \
+ " ANDT %0, %0, #HI(0x3f000000)\n" \
+ " CMPT %0, #HI(0x02000000)\n" \
+ " BNZ 1b\n" \
+ : "=&d" (temp), "=&da" (result) \
+ : "da" (&v->counter), "bd" (i) \
+ : "cc"); \
+ \
+ smp_mb(); \
+ \
+ return result; \
}
-static inline void atomic_sub(int i, atomic_t *v)
-{
- int temp;
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
- asm volatile (
- "1: LNKGETD %0, [%1]\n"
- " SUB %0, %0, %2\n"
- " LNKSETD [%1], %0\n"
- " DEFR %0, TXSTAT\n"
- " ANDT %0, %0, #HI(0x3f000000)\n"
- " CMPT %0, #HI(0x02000000)\n"
- " BNZ 1b\n"
- : "=&d" (temp)
- : "da" (&v->counter), "bd" (i)
- : "cc");
-}
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- int result, temp;
-
- smp_mb();
-
- asm volatile (
- "1: LNKGETD %1, [%2]\n"
- " ADD %1, %1, %3\n"
- " LNKSETD [%2], %1\n"
- " DEFR %0, TXSTAT\n"
- " ANDT %0, %0, #HI(0x3f000000)\n"
- " CMPT %0, #HI(0x02000000)\n"
- " BNZ 1b\n"
- : "=&d" (temp), "=&da" (result)
- : "da" (&v->counter), "bd" (i)
- : "cc");
-
- smp_mb();
-
- return result;
-}
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- int result, temp;
-
- smp_mb();
-
- asm volatile (
- "1: LNKGETD %1, [%2]\n"
- " SUB %1, %1, %3\n"
- " LNKSETD [%2], %1\n"
- " DEFR %0, TXSTAT\n"
- " ANDT %0, %0, #HI(0x3f000000)\n"
- " CMPT %0, #HI(0x02000000)\n"
- " BNZ 1b\n"
- : "=&d" (temp), "=&da" (result)
- : "da" (&v->counter), "bd" (i)
- : "cc");
-
- smp_mb();
-
- return result;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h
index e578955e674b..f5d5898c1020 100644
--- a/arch/metag/include/asm/atomic_lock1.h
+++ b/arch/metag/include/asm/atomic_lock1.h
@@ -37,55 +37,41 @@ static inline int atomic_set(atomic_t *v, int i)
return i;
}
-static inline void atomic_add(int i, atomic_t *v)
-{
- unsigned long flags;
-
- __global_lock1(flags);
- fence();
- v->counter += i;
- __global_unlock1(flags);
+#define ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ __global_lock1(flags); \
+ fence(); \
+ v->counter c_op i; \
+ __global_unlock1(flags); \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long result; \
+ unsigned long flags; \
+ \
+ __global_lock1(flags); \
+ result = v->counter; \
+ result c_op i; \
+ fence(); \
+ v->counter = result; \
+ __global_unlock1(flags); \
+ \
+ return result; \
}
-static inline void atomic_sub(int i, atomic_t *v)
-{
- unsigned long flags;
+#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
- __global_lock1(flags);
- fence();
- v->counter -= i;
- __global_unlock1(flags);
-}
-
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long result;
- unsigned long flags;
+ATOMIC_OPS(add, +=)
+ATOMIC_OPS(sub, -=)
- __global_lock1(flags);
- result = v->counter;
- result += i;
- fence();
- v->counter = result;
- __global_unlock1(flags);
-
- return result;
-}
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long result;
- unsigned long flags;
-
- __global_lock1(flags);
- result = v->counter;
- result -= i;
- fence();
- v->counter = result;
- __global_unlock1(flags);
-
- return result;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 37b2befe651a..6dd6bfc607e9 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -29,7 +29,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
/*
* atomic_set - set atomic variable
@@ -40,195 +40,103 @@
*/
#define atomic_set(v, i) ((v)->counter = (i))
-/*
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v.
- */
-static __inline__ void atomic_add(int i, atomic_t * v)
-{
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- int temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %0, %1 # atomic_add \n"
- " addu %0, %2 \n"
- " sc %0, %1 \n"
- " beqzl %0, 1b \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- int temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " ll %0, %1 # atomic_add \n"
- " addu %0, %2 \n"
- " sc %0, %1 \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!temp));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter += i;
- raw_local_irq_restore(flags);
- }
-}
-
-/*
- * atomic_sub - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v.
- */
-static __inline__ void atomic_sub(int i, atomic_t * v)
-{
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- int temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %0, %1 # atomic_sub \n"
- " subu %0, %2 \n"
- " sc %0, %1 \n"
- " beqzl %0, 1b \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- int temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " ll %0, %1 # atomic_sub \n"
- " subu %0, %2 \n"
- " sc %0, %1 \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!temp));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter -= i;
- raw_local_irq_restore(flags);
- }
-}
-
-/*
- * Same as above, but return the result value
- */
-static __inline__ int atomic_add_return(int i, atomic_t * v)
-{
- int result;
-
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- int temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %1, %2 # atomic_add_return \n"
- " addu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " beqzl %0, 1b \n"
- " addu %0, %1, %3 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- int temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " ll %1, %2 # atomic_add_return \n"
- " addu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!result));
-
- result = temp + i;
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result += i;
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
+#define ATOMIC_OP(op, c_op, asm_op) \
+static __inline__ void atomic_##op(int i, atomic_t * v) \
+{ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: ll %0, %1 # atomic_" #op " \n" \
+ " " #asm_op " %0, %2 \n" \
+ " sc %0, %1 \n" \
+ " beqzl %0, 1b \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " ll %0, %1 # atomic_" #op "\n" \
+ " " #asm_op " %0, %2 \n" \
+ " sc %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!temp)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
+{ \
+ int result; \
+ \
+ smp_mb__before_llsc(); \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: ll %1, %2 # atomic_" #op "_return \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " ll %1, %2 # atomic_" #op "_return \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!result)); \
+ \
+ result = temp; result c_op i; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result c_op i; \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ } \
+ \
+ smp_llsc_mb(); \
+ \
+ return result; \
}
-static __inline__ int atomic_sub_return(int i, atomic_t * v)
-{
- int result;
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_OP_RETURN(op, c_op, asm_op)
- smp_mb__before_llsc();
+ATOMIC_OPS(add, +=, addu)
+ATOMIC_OPS(sub, -=, subu)
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- int temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: ll %1, %2 # atomic_sub_return \n"
- " subu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " beqzl %0, 1b \n"
- " subu %0, %1, %3 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
-
- result = temp - i;
- } else if (kernel_uses_llsc) {
- int temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " ll %1, %2 # atomic_sub_return \n"
- " subu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!result));
-
- result = temp - i;
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result -= i;
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
/*
* atomic_sub_if_positive - conditionally subtract integer from atomic variable
@@ -398,7 +306,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
* @v: pointer of type atomic64_t
*
*/
-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
+#define atomic64_read(v) ACCESS_ONCE((v)->counter)
/*
* atomic64_set - set atomic variable
@@ -407,195 +315,104 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
*/
#define atomic64_set(v, i) ((v)->counter = (i))
-/*
- * atomic64_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v.
- */
-static __inline__ void atomic64_add(long i, atomic64_t * v)
-{
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- long temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %0, %1 # atomic64_add \n"
- " daddu %0, %2 \n"
- " scd %0, %1 \n"
- " beqzl %0, 1b \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- long temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " lld %0, %1 # atomic64_add \n"
- " daddu %0, %2 \n"
- " scd %0, %1 \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!temp));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter += i;
- raw_local_irq_restore(flags);
- }
+#define ATOMIC64_OP(op, c_op, asm_op) \
+static __inline__ void atomic64_##op(long i, atomic64_t * v) \
+{ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: lld %0, %1 # atomic64_" #op " \n" \
+ " " #asm_op " %0, %2 \n" \
+ " scd %0, %1 \n" \
+ " beqzl %0, 1b \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " lld %0, %1 # atomic64_" #op "\n" \
+ " " #asm_op " %0, %2 \n" \
+ " scd %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!temp)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
+} \
+
+#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
+static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+{ \
+ long result; \
+ \
+ smp_mb__before_llsc(); \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: lld %1, %2 # atomic64_" #op "_return\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ " lld %1, %2 # atomic64_" #op "_return\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter) \
+ : "Ir" (i), "m" (v->counter) \
+ : "memory"); \
+ } while (unlikely(!result)); \
+ \
+ result = temp; result c_op i; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result c_op i; \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ } \
+ \
+ smp_llsc_mb(); \
+ \
+ return result; \
}
-/*
- * atomic64_sub - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v.
- */
-static __inline__ void atomic64_sub(long i, atomic64_t * v)
-{
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- long temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %0, %1 # atomic64_sub \n"
- " dsubu %0, %2 \n"
- " scd %0, %1 \n"
- " beqzl %0, 1b \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- long temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " lld %0, %1 # atomic64_sub \n"
- " dsubu %0, %2 \n"
- " scd %0, %1 \n"
- " .set mips0 \n"
- : "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } while (unlikely(!temp));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter -= i;
- raw_local_irq_restore(flags);
- }
-}
-
-/*
- * Same as above, but return the result value
- */
-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
-{
- long result;
+#define ATOMIC64_OPS(op, c_op, asm_op) \
+ ATOMIC64_OP(op, c_op, asm_op) \
+ ATOMIC64_OP_RETURN(op, c_op, asm_op)
- smp_mb__before_llsc();
+ATOMIC64_OPS(add, +=, daddu)
+ATOMIC64_OPS(sub, -=, dsubu)
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- long temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %1, %2 # atomic64_add_return \n"
- " daddu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " beqzl %0, 1b \n"
- " daddu %0, %1, %3 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
- : "Ir" (i));
- } else if (kernel_uses_llsc) {
- long temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " lld %1, %2 # atomic64_add_return \n"
- " daddu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
- } while (unlikely(!result));
-
- result = temp + i;
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result += i;
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
-}
-
-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
-{
- long result;
-
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- long temp;
-
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- "1: lld %1, %2 # atomic64_sub_return \n"
- " dsubu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " beqzl %0, 1b \n"
- " dsubu %0, %1, %3 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
- } else if (kernel_uses_llsc) {
- long temp;
-
- do {
- __asm__ __volatile__(
- " .set arch=r4000 \n"
- " lld %1, %2 # atomic64_sub_return \n"
- " dsubu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
- } while (unlikely(!result));
-
- result = temp - i;
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result -= i;
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
-}
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
/*
* atomic64_sub_if_positive - conditionally subtract integer from atomic variable
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 05f08438a7c4..f1df4cb4a286 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -397,12 +397,6 @@ unsigned long get_wchan(struct task_struct *p);
#define ARCH_HAS_PREFETCHW
#define prefetchw(x) __builtin_prefetch((x), 1, 1)
-/*
- * See Documentation/scheduler/sched-arch.txt; prevents deadlock on SMP
- * systems.
- */
-#define __ARCH_WANT_UNLOCKED_CTXSW
-
#endif
#endif /* _ASM_PROCESSOR_H */
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 645b3c4fcfba..f7aac5b57b4b 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -770,7 +770,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
long ret = 0;
user_exit();
- if (secure_computing(syscall) == -1)
+ if (secure_computing() == -1)
return -1;
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index cadeb1e2cdfc..5be655e83e70 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -33,7 +33,6 @@
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
*/
#define atomic_read(v) (ACCESS_ONCE((v)->counter))
@@ -43,102 +42,62 @@
* @i: required value
*
* Atomically sets the value of @v to @i. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
*/
#define atomic_set(v, i) (((v)->counter) = (i))
-/**
- * atomic_add_return - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns the result
- * Note that the guaranteed useful range of an atomic_t is only 24 bits.
- */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- int retval;
-#ifdef CONFIG_SMP
- int status;
-
- asm volatile(
- "1: mov %4,(_AAR,%3) \n"
- " mov (_ADR,%3),%1 \n"
- " add %5,%1 \n"
- " mov %1,(_ADR,%3) \n"
- " mov (_ADR,%3),%0 \n" /* flush */
- " mov (_ASR,%3),%0 \n"
- " or %0,%0 \n"
- " bne 1b \n"
- : "=&r"(status), "=&r"(retval), "=m"(v->counter)
- : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
- : "memory", "cc");
-
-#else
- unsigned long flags;
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ int retval, status; \
+ \
+ asm volatile( \
+ "1: mov %4,(_AAR,%3) \n" \
+ " mov (_ADR,%3),%1 \n" \
+ " " #op " %5,%1 \n" \
+ " mov %1,(_ADR,%3) \n" \
+ " mov (_ADR,%3),%0 \n" /* flush */ \
+ " mov (_ASR,%3),%0 \n" \
+ " or %0,%0 \n" \
+ " bne 1b \n" \
+ : "=&r"(status), "=&r"(retval), "=m"(v->counter) \
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
+ : "memory", "cc"); \
+}
- flags = arch_local_cli_save();
- retval = v->counter;
- retval += i;
- v->counter = retval;
- arch_local_irq_restore(flags);
-#endif
- return retval;
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int retval, status; \
+ \
+ asm volatile( \
+ "1: mov %4,(_AAR,%3) \n" \
+ " mov (_ADR,%3),%1 \n" \
+ " " #op " %5,%1 \n" \
+ " mov %1,(_ADR,%3) \n" \
+ " mov (_ADR,%3),%0 \n" /* flush */ \
+ " mov (_ASR,%3),%0 \n" \
+ " or %0,%0 \n" \
+ " bne 1b \n" \
+ : "=&r"(status), "=&r"(retval), "=m"(v->counter) \
+ : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
+ : "memory", "cc"); \
+ return retval; \
}
-/**
- * atomic_sub_return - subtract integer from atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns the result
- * Note that the guaranteed useful range of an atomic_t is only 24 bits.
- */
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- int retval;
-#ifdef CONFIG_SMP
- int status;
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
- asm volatile(
- "1: mov %4,(_AAR,%3) \n"
- " mov (_ADR,%3),%1 \n"
- " sub %5,%1 \n"
- " mov %1,(_ADR,%3) \n"
- " mov (_ADR,%3),%0 \n" /* flush */
- " mov (_ASR,%3),%0 \n"
- " or %0,%0 \n"
- " bne 1b \n"
- : "=&r"(status), "=&r"(retval), "=m"(v->counter)
- : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
- : "memory", "cc");
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
-#else
- unsigned long flags;
- flags = arch_local_cli_save();
- retval = v->counter;
- retval -= i;
- v->counter = retval;
- arch_local_irq_restore(flags);
-#endif
- return retval;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
static inline int atomic_add_negative(int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
}
-static inline void atomic_add(int i, atomic_t *v)
-{
- atomic_add_return(i, v);
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- atomic_sub_return(i, v);
-}
-
static inline void atomic_inc(atomic_t *v)
{
atomic_add_return(1, v);
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 0be2db2c7d44..226f8ca993f6 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -55,24 +55,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
* are atomic, so a reader never sees inconsistent values.
*/
-/* It's possible to reduce all atomic operations to either
- * __atomic_add_return, atomic_set and atomic_read (the latter
- * is there only for consistency).
- */
-
-static __inline__ int __atomic_add_return(int i, atomic_t *v)
-{
- int ret;
- unsigned long flags;
- _atomic_spin_lock_irqsave(v, flags);
-
- ret = (v->counter += i);
-
- _atomic_spin_unlock_irqrestore(v, flags);
- return ret;
-}
-
-static __inline__ void atomic_set(atomic_t *v, int i)
+static __inline__ void atomic_set(atomic_t *v, int i)
{
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
@@ -84,7 +67,7 @@ static __inline__ void atomic_set(atomic_t *v, int i)
static __inline__ int atomic_read(const atomic_t *v)
{
- return (*(volatile int *)&(v)->counter);
+ return ACCESS_ONCE((v)->counter);
}
/* exported interface */
@@ -115,16 +98,43 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
+#define ATOMIC_OP(op, c_op) \
+static __inline__ void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ _atomic_spin_lock_irqsave(v, flags); \
+ v->counter c_op i; \
+ _atomic_spin_unlock_irqrestore(v, flags); \
+} \
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int ret; \
+ \
+ _atomic_spin_lock_irqsave(v, flags); \
+ ret = (v->counter c_op i); \
+ _atomic_spin_unlock_irqrestore(v, flags); \
+ \
+ return ret; \
+}
+
+#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
+
+ATOMIC_OPS(add, +=)
+ATOMIC_OPS(sub, -=)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
-#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
-#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int) (i)),(v))))
-#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
-#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
+#define atomic_inc(v) (atomic_add( 1,(v)))
+#define atomic_dec(v) (atomic_add( -1,(v)))
-#define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
-#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))
-#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
-#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
+#define atomic_inc_return(v) (atomic_add_return( 1,(v)))
+#define atomic_dec_return(v) (atomic_add_return( -1,(v)))
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
@@ -148,18 +158,37 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC64_INIT(i) { (i) }
-static __inline__ s64
-__atomic64_add_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- unsigned long flags;
- _atomic_spin_lock_irqsave(v, flags);
+#define ATOMIC64_OP(op, c_op) \
+static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ _atomic_spin_lock_irqsave(v, flags); \
+ v->counter c_op i; \
+ _atomic_spin_unlock_irqrestore(v, flags); \
+} \
+
+#define ATOMIC64_OP_RETURN(op, c_op) \
+static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ s64 ret; \
+ \
+ _atomic_spin_lock_irqsave(v, flags); \
+ ret = (v->counter c_op i); \
+ _atomic_spin_unlock_irqrestore(v, flags); \
+ \
+ return ret; \
+}
- ret = (v->counter += i);
+#define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)
- _atomic_spin_unlock_irqrestore(v, flags);
- return ret;
-}
+ATOMIC64_OPS(add, +=)
+ATOMIC64_OPS(sub, -=)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
static __inline__ void
atomic64_set(atomic64_t *v, s64 i)
@@ -175,18 +204,14 @@ atomic64_set(atomic64_t *v, s64 i)
static __inline__ s64
atomic64_read(const atomic64_t *v)
{
- return (*(volatile long *)&(v)->counter);
+ return ACCESS_ONCE((v)->counter);
}
-#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))
-#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v))))
-#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
-#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
+#define atomic64_inc(v) (atomic64_add( 1,(v)))
+#define atomic64_dec(v) (atomic64_add( -1,(v)))
-#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v)))
-#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v)))
-#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
-#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
+#define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
+#define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index f5645d6a89f2..10df7079f4cd 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -8,12 +8,12 @@
#define SIGTRAP 5
#define SIGABRT 6
#define SIGIOT 6
-#define SIGEMT 7
+#define SIGSTKFLT 7
#define SIGFPE 8
#define SIGKILL 9
#define SIGBUS 10
#define SIGSEGV 11
-#define SIGSYS 12 /* Linux doesn't use this */
+#define SIGXCPU 12
#define SIGPIPE 13
#define SIGALRM 14
#define SIGTERM 15
@@ -32,16 +32,12 @@
#define SIGTTIN 27
#define SIGTTOU 28
#define SIGURG 29
-#define SIGLOST 30 /* Linux doesn't use this either */
-#define SIGUNUSED 31
-#define SIGRESERVE SIGUNUSED
-
-#define SIGXCPU 33
-#define SIGXFSZ 34
-#define SIGSTKFLT 36
+#define SIGXFSZ 30
+#define SIGUNUSED 31
+#define SIGSYS 31 /* Linux doesn't use this */
/* These should not be considered constants from userland. */
-#define SIGRTMIN 37
+#define SIGRTMIN 32
#define SIGRTMAX _NSIG /* it's 44 under HP/UX */
/*
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4bc7b62fb4b6..88eace4e28c3 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -147,6 +147,7 @@ config PPC
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select HAVE_ARCH_AUDITSYSCALL
select ARCH_SUPPORTS_ATOMIC_RMW
+ select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
@@ -182,7 +183,7 @@ config SCHED_OMIT_FRAME_POINTER
config ARCH_MAY_HAVE_PC_FDC
bool
- default !PPC_PSERIES || PCI
+ default PCI
config PPC_OF
def_bool y
@@ -287,6 +288,10 @@ config PPC_EMULATE_SSTEP
bool
default y if KPROBES || UPROBES || XMON || HAVE_HW_BREAKPOINT
+config ZONE_DMA32
+ bool
+ default y if PPC64
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -603,6 +608,10 @@ config PPC_SUBPAGE_PROT
to set access permissions (read/write, readonly, or no access)
on the 4k subpages of each 64k page.
+config PPC_COPRO_BASE
+ bool
+ default n
+
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
depends on PPC64 && SMP
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 5687e299d0a5..132d9c681d6a 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -135,6 +135,7 @@ CFLAGS-$(CONFIG_POWER4_CPU) += $(call cc-option,-mcpu=power4)
CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5)
CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6)
CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7)
+CFLAGS-$(CONFIG_POWER8_CPU) += $(call cc-option,-mcpu=power8)
# Altivec option not allowed with e500mc64 in GCC.
ifeq ($(CONFIG_ALTIVEC),y)
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index ccc25eddbcb8..8a5bc1cfc6aa 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -389,7 +389,12 @@ $(obj)/zImage: $(addprefix $(obj)/, $(image-y))
$(obj)/zImage.initrd: $(addprefix $(obj)/, $(initrd-y))
@rm -f $@; ln $< $@
+# Only install the vmlinux
install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
+ sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)"
+
+# Install the vmlinux and other built boot targets.
+zInstall: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $^
# anything not in $(targets)
diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
index 97479f0ce630..aecee9690a88 100644
--- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
@@ -410,7 +410,7 @@
/include/ "qoriq-gpio-3.dtsi"
/include/ "qoriq-usb2-mph-0.dtsi"
usb0: usb@210000 {
- compatible = "fsl-usb2-mph-v2.4", "fsl-usb2-mph";
+ compatible = "fsl-usb2-mph-v2.5", "fsl-usb2-mph";
fsl,iommu-parent = <&pamu1>;
fsl,liodn-reg = <&guts 0x520>; /* USB1LIODNR */
phy_type = "utmi";
@@ -418,7 +418,7 @@
};
/include/ "qoriq-usb2-dr-0.dtsi"
usb1: usb@211000 {
- compatible = "fsl-usb2-dr-v2.4", "fsl-usb2-dr";
+ compatible = "fsl-usb2-dr-v2.5", "fsl-usb2-dr";
fsl,iommu-parent = <&pamu1>;
fsl,liodn-reg = <&guts 0x524>; /* USB1LIODNR */
dr_mode = "host";
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
index a3d582e0361a..7e2fc7cdce48 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
@@ -498,13 +498,13 @@
/include/ "qoriq-gpio-3.dtsi"
/include/ "qoriq-usb2-mph-0.dtsi"
usb0: usb@210000 {
- compatible = "fsl-usb2-mph-v2.4", "fsl-usb2-mph";
+ compatible = "fsl-usb2-mph-v2.5", "fsl-usb2-mph";
phy_type = "utmi";
port0;
};
/include/ "qoriq-usb2-dr-0.dtsi"
usb1: usb@211000 {
- compatible = "fsl-usb2-dr-v2.4", "fsl-usb2-dr";
+ compatible = "fsl-usb2-dr-v2.5", "fsl-usb2-dr";
dr_mode = "host";
phy_type = "utmi";
};
diff --git a/arch/powerpc/boot/dts/t1040rdb.dts b/arch/powerpc/boot/dts/t1040rdb.dts
new file mode 100644
index 000000000000..79a0bed04c1a
--- /dev/null
+++ b/arch/powerpc/boot/dts/t1040rdb.dts
@@ -0,0 +1,48 @@
+/*
+ * T1040RDB Device Tree Source
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/t104xsi-pre.dtsi"
+/include/ "t104xrdb.dtsi"
+
+/ {
+ model = "fsl,T1040RDB";
+ compatible = "fsl,T1040RDB";
+ ifc: localbus@ffe124000 {
+ cpld@3,0 {
+ compatible = "fsl,t1040rdb-cpld";
+ };
+ };
+};
+
+/include/ "fsl/t1040si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/t1042rdb.dts b/arch/powerpc/boot/dts/t1042rdb.dts
new file mode 100644
index 000000000000..738c23790e94
--- /dev/null
+++ b/arch/powerpc/boot/dts/t1042rdb.dts
@@ -0,0 +1,48 @@
+/*
+ * T1042RDB Device Tree Source
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/t104xsi-pre.dtsi"
+/include/ "t104xrdb.dtsi"
+
+/ {
+ model = "fsl,T1042RDB";
+ compatible = "fsl,T1042RDB";
+ ifc: localbus@ffe124000 {
+ cpld@3,0 {
+ compatible = "fsl,t1042rdb-cpld";
+ };
+ };
+};
+
+/include/ "fsl/t1042si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/t1042rdb_pi.dts b/arch/powerpc/boot/dts/t1042rdb_pi.dts
new file mode 100644
index 000000000000..634f751fa6d3
--- /dev/null
+++ b/arch/powerpc/boot/dts/t1042rdb_pi.dts
@@ -0,0 +1,57 @@
+/*
+ * T1042RDB_PI Device Tree Source
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/t104xsi-pre.dtsi"
+/include/ "t104xrdb.dtsi"
+
+/ {
+ model = "fsl,T1042RDB_PI";
+ compatible = "fsl,T1042RDB_PI";
+ ifc: localbus@ffe124000 {
+ cpld@3,0 {
+ compatible = "fsl,t1042rdb_pi-cpld";
+ };
+ };
+ soc: soc@ffe000000 {
+ i2c@118000 {
+ rtc@68 {
+ compatible = "dallas,ds1337";
+ reg = <0x68>;
+ interrupts = <0x2 0x1 0 0>;
+ };
+ };
+ };
+};
+
+/include/ "fsl/t1042si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/t104xrdb.dtsi b/arch/powerpc/boot/dts/t104xrdb.dtsi
new file mode 100644
index 000000000000..1cf0f3c5f7e5
--- /dev/null
+++ b/arch/powerpc/boot/dts/t104xrdb.dtsi
@@ -0,0 +1,156 @@
+/*
+ * T1040RDB/T1042RDB Device Tree Source
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/ {
+
+ ifc: localbus@ffe124000 {
+ reg = <0xf 0xfe124000 0 0x2000>;
+ ranges = <0 0 0xf 0xe8000000 0x08000000
+ 2 0 0xf 0xff800000 0x00010000
+ 3 0 0xf 0xffdf0000 0x00008000>;
+
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x8000000>;
+ bank-width = <2>;
+ device-width = <1>;
+ };
+
+ nand@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,ifc-nand";
+ reg = <0x2 0x0 0x10000>;
+ };
+
+ cpld@3,0 {
+ reg = <3 0 0x300>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ dcsr: dcsr@f00000000 {
+ ranges = <0x00000000 0xf 0x00000000 0x01072000>;
+ };
+
+ soc: soc@ffe000000 {
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
+
+ spi@110000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "micron,n25q512a";
+ reg = <0>;
+ spi-max-frequency = <10000000>; /* input clock */
+ };
+ };
+
+ i2c@118100 {
+ pca9546@77 {
+ compatible = "nxp,pca9546";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ };
+
+ pci0: pcie@ffe240000 {
+ reg = <0xf 0xfe240000 0 0x10000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x10000000
+ 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x10000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci1: pcie@ffe250000 {
+ reg = <0xf 0xfe250000 0 0x10000>;
+ ranges = <0x02000000 0x0 0xe0000000 0xc 0x10000000 0x0 0x10000000
+ 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x10000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci2: pcie@ffe260000 {
+ reg = <0xf 0xfe260000 0 0x10000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x20000000 0 0x10000000
+ 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x10000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci3: pcie@ffe270000 {
+ reg = <0xf 0xfe270000 0 0x10000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x30000000 0 0x10000000
+ 0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x10000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+};
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 45fd06cdc3e8..7a7b3c879f96 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -18,6 +18,7 @@ CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_PPC_POWERNV is not set
# CONFIG_PPC_PSERIES is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_PS3=y
diff --git a/arch/powerpc/configs/celleb_defconfig b/arch/powerpc/configs/celleb_defconfig
index 77d7bf3ca2ac..acccbfde8a50 100644
--- a/arch/powerpc/configs/celleb_defconfig
+++ b/arch/powerpc/configs/celleb_defconfig
@@ -15,6 +15,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_PPC_POWERNV is not set
# CONFIG_PPC_PSERIES is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_CELLEB=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index 6a3c58adf253..688e9e4d29a1 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -165,6 +165,8 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=m
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 269d6e47c67d..6db97e4414b2 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -50,7 +50,6 @@ CONFIG_NET_IPIP=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
-CONFIG_ARPD=y
CONFIG_INET_ESP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
@@ -60,33 +59,17 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
-CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_BLOCK=y
CONFIG_FTL=y
CONFIG_MTD_CFI=y
-CONFIG_MTD_GEN_PROBE=y
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_M25P80=y
-CONFIG_MTD_CFI_UTIL=y
-CONFIG_MTD_NAND_ECC=y
CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_IDS=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_WL_THRESHOLD=4096
-CONFIG_MTD_UBI_BEB_RESERVE=1
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
@@ -102,6 +85,7 @@ CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_SERIO_LIBPS2=y
+CONFIG_PPC_EPAPR_HV_BYTECHAN=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_MANY_PORTS=y
@@ -115,7 +99,6 @@ CONFIG_SPI_GPIO=y
CONFIG_SPI_FSL_SPI=y
CONFIG_SPI_FSL_ESPI=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_USB_HID=m
CONFIG_USB=y
CONFIG_USB_MON=y
@@ -124,14 +107,17 @@ CONFIG_USB_EHCI_FSL=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_RTC_DRV_DS3232=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
+CONFIG_VIRT_DRIVERS=y
+CONFIG_FSL_HV_MANAGER=y
+CONFIG_FSL_CORENET_CF=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_ISO9660_FS=m
@@ -144,35 +130,24 @@ CONFIG_NTFS_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
-CONFIG_MISC_FILESYSTEMS=y
CONFIG_JFFS2_FS=y
CONFIG_JFFS2_FS_DEBUG=1
-CONFIG_JFFS2_FS_WRITEBUFFER=y
-CONFIG_JFFS2_ZLIB=y
-CONFIG_JFFS2_RTIME=y
CONFIG_UBIFS_FS=y
-CONFIG_UBIFS_FS_XATTR=y
-CONFIG_UBIFS_FS_LZO=y
-CONFIG_UBIFS_FS_ZLIB=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=m
CONFIG_CRC_T10DIF=y
-CONFIG_CRC16=y
-CONFIG_ZLIB_DEFLATE=y
-CONFIG_LZO_COMPRESS=y
-CONFIG_LZO_DECOMPRESS=y
-CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRYPTO_LZO=y
+CONFIG_DEBUG_INFO=y
CONFIG_FRAME_WARN=1024
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_INFO=y
CONFIG_CRYPTO_NULL=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=y
@@ -180,4 +155,3 @@ CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_FSL_CAAM=y
-CONFIG_FSL_CORENET_CF=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 7594c5ac6481..6fab06f7f411 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -16,6 +16,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_PPC_POWERNV is not set
# CONFIG_PPC_PSERIES is not set
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index c8b6a9ddb21b..fbd9e4163311 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -16,6 +16,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_PPC_POWERNV is not set
# CONFIG_PPC_PSERIES is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_MAPLE=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index fa1bfd37f1ec..d2c415489f72 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -213,7 +213,6 @@ CONFIG_RTC_DRV_DS1307=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_CMOS=y
-CONFIG_RTC_DRV_DS1307=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
# CONFIG_NET_DMA is not set
@@ -227,6 +226,9 @@ CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index 0b452ebd8b3d..87460083dbc7 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -214,7 +214,6 @@ CONFIG_RTC_DRV_DS1307=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_CMOS=y
-CONFIG_RTC_DRV_DS1307=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
# CONFIG_NET_DMA is not set
@@ -228,6 +227,9 @@ CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
diff --git a/arch/powerpc/configs/mpc86xx_defconfig b/arch/powerpc/configs/mpc86xx_defconfig
index 35595ea74ff4..fc58aa8a89e4 100644
--- a/arch/powerpc/configs/mpc86xx_defconfig
+++ b/arch/powerpc/configs/mpc86xx_defconfig
@@ -145,6 +145,9 @@ CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_ADFS_FS=m
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index e5e7838af008..3e72c8c06a0d 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -14,6 +14,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
+# CONFIG_PPC_POWERNV is not set
# CONFIG_PPC_PSERIES is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_PASEMI=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 36518870e6b2..20bc5e2d368d 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -50,6 +50,7 @@ CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=m
CONFIG_PPC_TRANSACTIONAL_MEM=y
CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_SCHED_SMT=y
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 28992d012926..512d2782b043 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -26,76 +26,53 @@ static __inline__ void atomic_set(atomic_t *v, int i)
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
}
-static __inline__ void atomic_add(int a, atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%3 # atomic_add\n\
- add %0,%2,%0\n"
- PPC405_ERR77(0,%3)
-" stwcx. %0,0,%3 \n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (a), "r" (&v->counter)
- : "cc");
+#define ATOMIC_OP(op, asm_op) \
+static __inline__ void atomic_##op(int a, atomic_t *v) \
+{ \
+ int t; \
+ \
+ __asm__ __volatile__( \
+"1: lwarx %0,0,%3 # atomic_" #op "\n" \
+ #asm_op " %0,%2,%0\n" \
+ PPC405_ERR77(0,%3) \
+" stwcx. %0,0,%3 \n" \
+" bne- 1b\n" \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+} \
+
+#define ATOMIC_OP_RETURN(op, asm_op) \
+static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
+{ \
+ int t; \
+ \
+ __asm__ __volatile__( \
+ PPC_ATOMIC_ENTRY_BARRIER \
+"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
+ #asm_op " %0,%1,%0\n" \
+ PPC405_ERR77(0,%2) \
+" stwcx. %0,0,%2 \n" \
+" bne- 1b\n" \
+ PPC_ATOMIC_EXIT_BARRIER \
+ : "=&r" (t) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc", "memory"); \
+ \
+ return t; \
}
-static __inline__ int atomic_add_return(int a, atomic_t *v)
-{
- int t;
+#define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
- __asm__ __volatile__(
- PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%2 # atomic_add_return\n\
- add %0,%1,%0\n"
- PPC405_ERR77(0,%2)
-" stwcx. %0,0,%2 \n\
- bne- 1b"
- PPC_ATOMIC_EXIT_BARRIER
- : "=&r" (t)
- : "r" (a), "r" (&v->counter)
- : "cc", "memory");
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, subf)
- return t;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-static __inline__ void atomic_sub(int a, atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%3 # atomic_sub\n\
- subf %0,%2,%0\n"
- PPC405_ERR77(0,%3)
-" stwcx. %0,0,%3 \n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (a), "r" (&v->counter)
- : "cc");
-}
-
-static __inline__ int atomic_sub_return(int a, atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
- PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%2 # atomic_sub_return\n\
- subf %0,%1,%0\n"
- PPC405_ERR77(0,%2)
-" stwcx. %0,0,%2 \n\
- bne- 1b"
- PPC_ATOMIC_EXIT_BARRIER
- : "=&r" (t)
- : "r" (a), "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
static __inline__ void atomic_inc(atomic_t *v)
{
int t;
@@ -289,71 +266,50 @@ static __inline__ void atomic64_set(atomic64_t *v, long i)
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
}
-static __inline__ void atomic64_add(long a, atomic64_t *v)
-{
- long t;
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%3 # atomic64_add\n\
- add %0,%2,%0\n\
- stdcx. %0,0,%3 \n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (a), "r" (&v->counter)
- : "cc");
+#define ATOMIC64_OP(op, asm_op) \
+static __inline__ void atomic64_##op(long a, atomic64_t *v) \
+{ \
+ long t; \
+ \
+ __asm__ __volatile__( \
+"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
+ #asm_op " %0,%2,%0\n" \
+" stdcx. %0,0,%3 \n" \
+" bne- 1b\n" \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
}
-static __inline__ long atomic64_add_return(long a, atomic64_t *v)
-{
- long t;
-
- __asm__ __volatile__(
- PPC_ATOMIC_ENTRY_BARRIER
-"1: ldarx %0,0,%2 # atomic64_add_return\n\
- add %0,%1,%0\n\
- stdcx. %0,0,%2 \n\
- bne- 1b"
- PPC_ATOMIC_EXIT_BARRIER
- : "=&r" (t)
- : "r" (a), "r" (&v->counter)
- : "cc", "memory");
-
- return t;
+#define ATOMIC64_OP_RETURN(op, asm_op) \
+static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
+{ \
+ long t; \
+ \
+ __asm__ __volatile__( \
+ PPC_ATOMIC_ENTRY_BARRIER \
+"1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
+ #asm_op " %0,%1,%0\n" \
+" stdcx. %0,0,%2 \n" \
+" bne- 1b\n" \
+ PPC_ATOMIC_EXIT_BARRIER \
+ : "=&r" (t) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc", "memory"); \
+ \
+ return t; \
}
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-
-static __inline__ void atomic64_sub(long a, atomic64_t *v)
-{
- long t;
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%3 # atomic64_sub\n\
- subf %0,%2,%0\n\
- stdcx. %0,0,%3 \n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (a), "r" (&v->counter)
- : "cc");
-}
+#define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
-static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
-{
- long t;
+ATOMIC64_OPS(add, add)
+ATOMIC64_OPS(sub, subf)
- __asm__ __volatile__(
- PPC_ATOMIC_ENTRY_BARRIER
-"1: ldarx %0,0,%2 # atomic64_sub_return\n\
- subf %0,%1,%0\n\
- stdcx. %0,0,%2 \n\
- bne- 1b"
- PPC_ATOMIC_EXIT_BARRIER
- : "=&r" (t)
- : "r" (a), "r" (&v->counter)
- : "cc", "memory");
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
- return t;
-}
+#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
static __inline__ void atomic64_inc(atomic64_t *v)
{
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index 3eb53d741070..3a39283333c3 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -133,7 +133,6 @@ extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
extern void bad_page_fault(struct pt_regs *, unsigned long, int);
extern void _exception(int, struct pt_regs *, int, unsigned long);
extern void die(const char *, struct pt_regs *, long);
-extern void print_backtrace(unsigned long *);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/copro.h b/arch/powerpc/include/asm/copro.h
new file mode 100644
index 000000000000..ce216df31381
--- /dev/null
+++ b/arch/powerpc/include/asm/copro.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_COPRO_H
+#define _ASM_POWERPC_COPRO_H
+
+struct copro_slb
+{
+ u64 esid, vsid;
+};
+
+int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
+ unsigned long dsisr, unsigned *flt);
+
+int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb);
+
+
+#ifdef CONFIG_PPC_COPRO_BASE
+void copro_flush_all_slbs(struct mm_struct *mm);
+#else
+static inline void copro_flush_all_slbs(struct mm_struct *mm) {}
+#endif
+#endif /* _ASM_POWERPC_COPRO_H */
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 607559ab271f..6c840ceab820 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -32,6 +32,8 @@ static inline void setup_cputime_one_jiffy(void) { }
typedef u64 __nocast cputime_t;
typedef u64 __nocast cputime64_t;
+#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
+
#ifdef __KERNEL__
/*
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 150866b2a3fe..894d538f3567 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -135,6 +135,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
extern int dma_set_mask(struct device *dev, u64 dma_mask);
extern int __dma_set_mask(struct device *dev, u64 dma_mask);
+extern u64 __dma_get_required_mask(struct device *dev);
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 9983c3d26bca..3b260efbfbf9 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -146,6 +146,11 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
return edev ? edev->pdev : NULL;
}
+static inline struct eeh_pe *eeh_dev_to_pe(struct eeh_dev* edev)
+{
+ return edev ? edev->pe : NULL;
+}
+
/* Return values from eeh_ops::next_error */
enum {
EEH_NEXT_ERR_NONE = 0,
@@ -167,6 +172,7 @@ enum {
#define EEH_OPT_ENABLE 1 /* EEH enable */
#define EEH_OPT_THAW_MMIO 2 /* MMIO enable */
#define EEH_OPT_THAW_DMA 3 /* DMA enable */
+#define EEH_OPT_FREEZE_PE 4 /* Freeze PE */
#define EEH_STATE_UNAVAILABLE (1 << 0) /* State unavailable */
#define EEH_STATE_NOT_SUPPORT (1 << 1) /* EEH not supported */
#define EEH_STATE_RESET_ACTIVE (1 << 2) /* Active reset */
@@ -198,6 +204,8 @@ struct eeh_ops {
int (*wait_state)(struct eeh_pe *pe, int max_wait);
int (*get_log)(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len);
int (*configure_bridge)(struct eeh_pe *pe);
+ int (*err_inject)(struct eeh_pe *pe, int type, int func,
+ unsigned long addr, unsigned long mask);
int (*read_config)(struct device_node *dn, int where, int size, u32 *val);
int (*write_config)(struct device_node *dn, int where, int size, u32 val);
int (*next_error)(struct eeh_pe **pe);
@@ -269,8 +277,7 @@ void eeh_dev_phb_init_dynamic(struct pci_controller *phb);
int eeh_init(void);
int __init eeh_ops_register(struct eeh_ops *ops);
int __exit eeh_ops_unregister(const char *name);
-unsigned long eeh_check_failure(const volatile void __iomem *token,
- unsigned long val);
+int eeh_check_failure(const volatile void __iomem *token);
int eeh_dev_check_failure(struct eeh_dev *edev);
void eeh_addr_cache_build(void);
void eeh_add_device_early(struct device_node *);
@@ -279,6 +286,8 @@ void eeh_add_device_late(struct pci_dev *);
void eeh_add_device_tree_late(struct pci_bus *);
void eeh_add_sysfs_files(struct pci_bus *);
void eeh_remove_device(struct pci_dev *);
+int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state);
+int eeh_pe_reset_and_recover(struct eeh_pe *pe);
int eeh_dev_open(struct pci_dev *pdev);
void eeh_dev_release(struct pci_dev *pdev);
struct eeh_pe *eeh_iommu_group_to_pe(struct iommu_group *group);
@@ -321,9 +330,9 @@ static inline void *eeh_dev_init(struct device_node *dn, void *data)
static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { }
-static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
+static inline int eeh_check_failure(const volatile void __iomem *token)
{
- return val;
+ return 0;
}
#define eeh_dev_check_failure(x) (0)
@@ -354,7 +363,7 @@ static inline u8 eeh_readb(const volatile void __iomem *addr)
{
u8 val = in_8(addr);
if (EEH_POSSIBLE_ERROR(val, u8))
- return eeh_check_failure(addr, val);
+ eeh_check_failure(addr);
return val;
}
@@ -362,7 +371,7 @@ static inline u16 eeh_readw(const volatile void __iomem *addr)
{
u16 val = in_le16(addr);
if (EEH_POSSIBLE_ERROR(val, u16))
- return eeh_check_failure(addr, val);
+ eeh_check_failure(addr);
return val;
}
@@ -370,7 +379,7 @@ static inline u32 eeh_readl(const volatile void __iomem *addr)
{
u32 val = in_le32(addr);
if (EEH_POSSIBLE_ERROR(val, u32))
- return eeh_check_failure(addr, val);
+ eeh_check_failure(addr);
return val;
}
@@ -378,7 +387,7 @@ static inline u64 eeh_readq(const volatile void __iomem *addr)
{
u64 val = in_le64(addr);
if (EEH_POSSIBLE_ERROR(val, u64))
- return eeh_check_failure(addr, val);
+ eeh_check_failure(addr);
return val;
}
@@ -386,7 +395,7 @@ static inline u16 eeh_readw_be(const volatile void __iomem *addr)
{
u16 val = in_be16(addr);
if (EEH_POSSIBLE_ERROR(val, u16))
- return eeh_check_failure(addr, val);
+ eeh_check_failure(addr);
return val;
}
@@ -394,7 +403,7 @@ static inline u32 eeh_readl_be(const volatile void __iomem *addr)
{
u32 val = in_be32(addr);
if (EEH_POSSIBLE_ERROR(val, u32))
- return eeh_check_failure(addr, val);
+ eeh_check_failure(addr);
return val;
}
@@ -402,7 +411,7 @@ static inline u64 eeh_readq_be(const volatile void __iomem *addr)
{
u64 val = in_be64(addr);
if (EEH_POSSIBLE_ERROR(val, u64))
- return eeh_check_failure(addr, val);
+ eeh_check_failure(addr);
return val;
}
@@ -416,7 +425,7 @@ static inline void eeh_memcpy_fromio(void *dest, const
* were copied. Check all four bytes.
*/
if (n >= 4 && EEH_POSSIBLE_ERROR(*((u32 *)(dest + n - 4)), u32))
- eeh_check_failure(src, *((u32 *)(dest + n - 4)));
+ eeh_check_failure(src);
}
/* in-string eeh macros */
@@ -425,7 +434,7 @@ static inline void eeh_readsb(const volatile void __iomem *addr, void * buf,
{
_insb(addr, buf, ns);
if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8))
- eeh_check_failure(addr, *(u8*)buf);
+ eeh_check_failure(addr);
}
static inline void eeh_readsw(const volatile void __iomem *addr, void * buf,
@@ -433,7 +442,7 @@ static inline void eeh_readsw(const volatile void __iomem *addr, void * buf,
{
_insw(addr, buf, ns);
if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16))
- eeh_check_failure(addr, *(u16*)buf);
+ eeh_check_failure(addr);
}
static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
@@ -441,7 +450,7 @@ static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
{
_insl(addr, buf, nl);
if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32))
- eeh_check_failure(addr, *(u32*)buf);
+ eeh_check_failure(addr);
}
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/include/asm/hydra.h b/arch/powerpc/include/asm/hydra.h
index 5b0c98bd46ab..1cb39c96d155 100644
--- a/arch/powerpc/include/asm/hydra.h
+++ b/arch/powerpc/include/asm/hydra.h
@@ -95,7 +95,6 @@ extern volatile struct Hydra __iomem *Hydra;
#define HYDRA_INT_SPARE 19
extern int hydra_init(void);
-extern void macio_adb_init(void);
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 41f13cec8a8f..e8e3a0a04eb0 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -31,11 +31,6 @@ extern atomic_t ppc_n_lost_interrupts;
extern irq_hw_number_t virq_to_hw(unsigned int virq);
-/**
- * irq_early_init - Init irq remapping subsystem
- */
-extern void irq_early_init(void);
-
static __inline__ int irq_canonicalize(int irq)
{
return irq;
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 16d7e33d35e9..19c36cba37c4 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -81,7 +81,6 @@ extern void default_machine_crash_shutdown(struct pt_regs *regs);
extern int crash_shutdown_register(crash_shutdown_t handler);
extern int crash_shutdown_unregister(crash_shutdown_t handler);
-extern void machine_kexec_simple(struct kimage *image);
extern void crash_kexec_secondary(struct pt_regs *regs);
extern int overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void);
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 3af721633618..307347f8ddbd 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -328,8 +328,6 @@ extern struct machdep_calls *machine_id;
extern void probe_machine(void);
-extern char cmd_line[COMMAND_LINE_SIZE];
-
#ifdef CONFIG_PPC_PMAC
/*
* Power macintoshes have either a CUDA, PMU or SMU controlling
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index d76514487d6f..aeebc94b2bce 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -190,6 +190,13 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
#ifndef __ASSEMBLY__
+static inline int slb_vsid_shift(int ssize)
+{
+ if (ssize == MMU_SEGSIZE_256M)
+ return SLB_VSID_SHIFT;
+ return SLB_VSID_SHIFT_1T;
+}
+
static inline int segment_shift(int ssize)
{
if (ssize == MMU_SEGSIZE_256M)
@@ -317,6 +324,7 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access,
unsigned int local, int ssize);
struct mm_struct;
unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
+extern int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap);
extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, int local, int ssize,
@@ -342,6 +350,8 @@ extern void hash_failure_debug(unsigned long ea, unsigned long access,
extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long pstart, unsigned long prot,
int psize, int ssize);
+int htab_remove_mapping(unsigned long vstart, unsigned long vend,
+ int psize, int ssize);
extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 86055e598269..9124b0ede1fc 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -135,6 +135,7 @@ struct opal_sg_list {
#define OPAL_FLASH_MANAGE 77
#define OPAL_FLASH_UPDATE 78
#define OPAL_RESYNC_TIMEBASE 79
+#define OPAL_CHECK_TOKEN 80
#define OPAL_DUMP_INIT 81
#define OPAL_DUMP_INFO 82
#define OPAL_DUMP_READ 83
@@ -146,7 +147,9 @@ struct opal_sg_list {
#define OPAL_GET_PARAM 89
#define OPAL_SET_PARAM 90
#define OPAL_DUMP_RESEND 91
+#define OPAL_PCI_SET_PHB_CXL_MODE 93
#define OPAL_DUMP_INFO2 94
+#define OPAL_PCI_ERR_INJECT 96
#define OPAL_PCI_EEH_FREEZE_SET 97
#define OPAL_HANDLE_HMI 98
#define OPAL_REGISTER_DUMP_REGION 101
@@ -199,6 +202,35 @@ enum OpalPciErrorSeverity {
OPAL_EEH_SEV_INF = 5
};
+enum OpalErrinjectType {
+ OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR = 0,
+ OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64 = 1,
+};
+
+enum OpalErrinjectFunc {
+ /* IOA bus specific errors */
+ OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR = 0,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_DATA = 1,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_IO_ADDR = 2,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_IO_DATA = 3,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_ADDR = 4,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_DATA = 5,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_ADDR = 6,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_DATA = 7,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_IO_ADDR = 8,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_IO_DATA = 9,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_ADDR = 10,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_DATA = 11,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_ADDR = 12,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_DATA = 13,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_MASTER = 14,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_TARGET = 15,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_ADDR = 16,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_DATA = 17,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_MASTER = 18,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET = 19,
+};
+
enum OpalShpcAction {
OPAL_SHPC_GET_LINK_STATE = 0,
OPAL_SHPC_GET_SLOT_STATE = 1
@@ -356,9 +388,12 @@ enum OpalM64EnableAction {
};
enum OpalPciResetScope {
- OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3,
- OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5,
- OPAL_PCI_IODA_TABLE_RESET = 6,
+ OPAL_RESET_PHB_COMPLETE = 1,
+ OPAL_RESET_PCI_LINK = 2,
+ OPAL_RESET_PHB_ERROR = 3,
+ OPAL_RESET_PCI_HOT = 4,
+ OPAL_RESET_PCI_FUNDAMENTAL = 5,
+ OPAL_RESET_PCI_IODA_TABLE = 6
};
enum OpalPciReinitScope {
@@ -819,6 +854,8 @@ int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number,
uint64_t eeh_action_token);
int64_t opal_pci_eeh_freeze_set(uint64_t phb_id, uint64_t pe_number,
uint64_t eeh_action_token);
+int64_t opal_pci_err_inject(uint64_t phb_id, uint32_t pe_no, uint32_t type,
+ uint32_t func, uint64_t addr, uint64_t mask);
int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state);
@@ -887,6 +924,7 @@ int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe,
__be16 *pci_error_type, __be16 *severity);
int64_t opal_pci_poll(uint64_t phb_id);
int64_t opal_return_cpu(void);
+int64_t opal_check_token(uint64_t token);
int64_t opal_reinit_cpus(uint64_t flags);
int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val);
@@ -924,6 +962,7 @@ int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
int64_t opal_handle_hmi(void);
int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end);
int64_t opal_unregister_dump_region(uint32_t id);
+int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index 88693cef4f3d..d908a46d05c0 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -42,20 +42,40 @@
typedef unsigned long pte_basic_t;
-static __inline__ void clear_page(void *addr)
+static inline void clear_page(void *addr)
{
- unsigned long lines, line_size;
-
- line_size = ppc64_caches.dline_size;
- lines = ppc64_caches.dlines_per_page;
-
- __asm__ __volatile__(
+ unsigned long iterations;
+ unsigned long onex, twox, fourx, eightx;
+
+ iterations = ppc64_caches.dlines_per_page / 8;
+
+ /*
+ * Some verisions of gcc use multiply instructions to
+ * calculate the offsets so lets give it a hand to
+ * do better.
+ */
+ onex = ppc64_caches.dline_size;
+ twox = onex << 1;
+ fourx = onex << 2;
+ eightx = onex << 3;
+
+ asm volatile(
"mtctr %1 # clear_page\n\
-1: dcbz 0,%0\n\
- add %0,%0,%3\n\
+ .balign 16\n\
+1: dcbz 0,%0\n\
+ dcbz %3,%0\n\
+ dcbz %4,%0\n\
+ dcbz %5,%0\n\
+ dcbz %6,%0\n\
+ dcbz %7,%0\n\
+ dcbz %8,%0\n\
+ dcbz %9,%0\n\
+ add %0,%0,%10\n\
bdnz+ 1b"
- : "=r" (addr)
- : "r" (lines), "0" (addr), "r" (line_size)
+ : "=&r" (addr)
+ : "r" (iterations), "0" (addr), "b" (onex), "b" (twox),
+ "b" (twox+onex), "b" (fourx), "b" (fourx+onex),
+ "b" (twox+fourx), "b" (eightx-onex), "r" (eightx)
: "ctr", "memory");
}
@@ -104,7 +124,6 @@ extern unsigned long slice_get_unmapped_area(unsigned long addr,
extern unsigned int get_slice_psize(struct mm_struct *mm,
unsigned long addr);
-extern void slice_init_context(struct mm_struct *mm, unsigned int psize);
extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long len, unsigned int psize);
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 47edde8c3556..945e47adf7db 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -8,8 +8,6 @@
#include <linux/threads.h>
#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
-extern unsigned long va_to_phys(unsigned long address);
-extern pte_t *va_to_pte(unsigned long address);
extern unsigned long ioremap_bot;
#ifdef CONFIG_44x
@@ -50,10 +48,10 @@ extern int icache_44x_need_flush;
#define FIRST_USER_ADDRESS 0
#define pte_ERROR(e) \
- printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
+ pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
(unsigned long long)pte_val(e))
#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+ pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/*
* This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
index 12798c9d4b4b..7b935683f268 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
@@ -64,7 +64,7 @@
(((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
#define pud_ERROR(e) \
- printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
+ pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
/*
* On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 7b3d54fae46f..ae153c40ab7c 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -328,11 +328,11 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
#define pte_ERROR(e) \
- printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+ pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+ pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+ pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/* Encode and de-code a swap entry */
#define __swp_type(entry) (((entry).val >> 1) & 0x3f)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index f60d4ea8b50c..316f9a5da173 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -4,6 +4,7 @@
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
+#include <linux/mmzone.h>
#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
@@ -248,6 +249,8 @@ extern unsigned long empty_zero_page[];
extern pgd_t swapper_pg_dir[];
+void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn);
+int dma_pfn_limit_to_zone(u64 pfn_limit);
extern void paging_init(void);
/*
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index 12c32c5f533d..67859edbf8fd 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -273,7 +273,7 @@ static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
static inline long enable_reloc_on_exceptions(void)
{
/* mflags = 3: Exceptions at 0xC000000000004000 */
- return plpar_set_mode(3, 3, 0, 0);
+ return plpar_set_mode(3, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
}
/*
@@ -284,7 +284,7 @@ static inline long enable_reloc_on_exceptions(void)
* returns H_SUCCESS.
*/
static inline long disable_reloc_on_exceptions(void) {
- return plpar_set_mode(0, 3, 0, 0);
+ return plpar_set_mode(0, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
}
/*
@@ -297,7 +297,7 @@ static inline long disable_reloc_on_exceptions(void) {
static inline long enable_big_endian_exceptions(void)
{
/* mflags = 0: big endian exceptions */
- return plpar_set_mode(0, 4, 0, 0);
+ return plpar_set_mode(0, H_SET_MODE_RESOURCE_LE, 0, 0);
}
/*
@@ -310,17 +310,17 @@ static inline long enable_big_endian_exceptions(void)
static inline long enable_little_endian_exceptions(void)
{
/* mflags = 1: little endian exceptions */
- return plpar_set_mode(1, 4, 0, 0);
+ return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0);
}
static inline long plapr_set_ciabr(unsigned long ciabr)
{
- return plpar_set_mode(0, 1, ciabr, 0);
+ return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0);
}
static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
{
- return plpar_set_mode(0, 2, dawr0, dawrx0);
+ return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0);
}
#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
new file mode 100644
index 000000000000..f09a22fa1bd7
--- /dev/null
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PNV_PCI_H
+#define _ASM_PNV_PCI_H
+
+#include <linux/pci.h>
+#include <misc/cxl.h>
+
+int pnv_phb_to_cxl(struct pci_dev *dev);
+int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
+ unsigned int virq);
+int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num);
+void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num);
+int pnv_cxl_get_irq_count(struct pci_dev *dev);
+struct device_node *pnv_pci_to_phb_node(struct pci_dev *dev);
+
+#ifdef CONFIG_CXL_BASE
+int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
+ struct pci_dev *dev, int num);
+void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
+ struct pci_dev *dev);
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 74b79f07f041..7f436ba1b56f 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -76,8 +76,6 @@ void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,
unsigned long *busno, unsigned long *phys,
unsigned long *size);
-extern void kdump_move_device_tree(void);
-
extern void of_instantiate_rtc(void);
extern int of_get_ibm_chip_id(struct device_node *np);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 0c0505956a29..fe3f9488f321 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -947,7 +947,7 @@
* 32-bit 8xx:
* - SPRG0 scratch for exception vectors
* - SPRG1 scratch for exception vectors
- * - SPRG2 apparently unused but initialized
+ * - SPRG2 scratch for exception vectors
*
*/
#ifdef CONFIG_PPC64
@@ -1057,6 +1057,7 @@
#ifdef CONFIG_8xx
#define SPRN_SPRG_SCRATCH0 SPRN_SPRG0
#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1
+#define SPRN_SPRG_SCRATCH2 SPRN_SPRG2
#endif
diff --git a/arch/powerpc/include/asm/rio.h b/arch/powerpc/include/asm/rio.h
index b1d2deceeedb..ec800f28fec5 100644
--- a/arch/powerpc/include/asm/rio.h
+++ b/arch/powerpc/include/asm/rio.h
@@ -13,7 +13,6 @@
#ifndef ASM_PPC_RIO_H
#define ASM_PPC_RIO_H
-extern void platform_rio_init(void);
#ifdef CONFIG_FSL_RIO
extern int fsl_rio_mcheck_exception(struct pt_regs *);
#else
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 37b7ca39ec9f..a6e6e2bf9d15 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -27,6 +27,8 @@
#include <linux/workqueue.h>
#include <linux/device.h>
#include <linux/mutex.h>
+#include <asm/reg.h>
+#include <asm/copro.h>
#define LS_SIZE (256 * 1024)
#define LS_ADDR_MASK (LS_SIZE - 1)
@@ -277,9 +279,6 @@ void spu_remove_dev_attr(struct device_attribute *attr);
int spu_add_dev_attr_group(struct attribute_group *attrs);
void spu_remove_dev_attr_group(struct attribute_group *attrs);
-int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
- unsigned long dsisr, unsigned *flt);
-
/*
* Notifier blocks:
*
diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h
index f593b0f9b627..d3a42cc45a82 100644
--- a/arch/powerpc/include/asm/sstep.h
+++ b/arch/powerpc/include/asm/sstep.h
@@ -25,3 +25,65 @@ struct pt_regs;
/* Emulate instructions that cause a transfer of control. */
extern int emulate_step(struct pt_regs *regs, unsigned int instr);
+
+enum instruction_type {
+ COMPUTE, /* arith/logical/CR op, etc. */
+ LOAD,
+ LOAD_MULTI,
+ LOAD_FP,
+ LOAD_VMX,
+ LOAD_VSX,
+ STORE,
+ STORE_MULTI,
+ STORE_FP,
+ STORE_VMX,
+ STORE_VSX,
+ LARX,
+ STCX,
+ BRANCH,
+ MFSPR,
+ MTSPR,
+ CACHEOP,
+ BARRIER,
+ SYSCALL,
+ MFMSR,
+ MTMSR,
+ RFI,
+ INTERRUPT,
+ UNKNOWN
+};
+
+#define INSTR_TYPE_MASK 0x1f
+
+/* Load/store flags, ORed in with type */
+#define SIGNEXT 0x20
+#define UPDATE 0x40 /* matches bit in opcode 31 instructions */
+#define BYTEREV 0x80
+
+/* Cacheop values, ORed in with type */
+#define CACHEOP_MASK 0x700
+#define DCBST 0
+#define DCBF 0x100
+#define DCBTST 0x200
+#define DCBT 0x300
+#define ICBI 0x400
+
+/* Size field in type word */
+#define SIZE(n) ((n) << 8)
+#define GETSIZE(w) ((w) >> 8)
+
+#define MKOP(t, f, s) ((t) | (f) | SIZE(s))
+
+struct instruction_op {
+ int type;
+ int reg;
+ unsigned long val;
+ /* For LOAD/STORE/LARX/STCX */
+ unsigned long ea;
+ int update_reg;
+ /* For MFSPR */
+ int spr;
+};
+
+extern int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
+ unsigned int instr);
diff --git a/arch/powerpc/include/asm/tsi108.h b/arch/powerpc/include/asm/tsi108.h
index f8b60793b7a9..d531d9e173ef 100644
--- a/arch/powerpc/include/asm/tsi108.h
+++ b/arch/powerpc/include/asm/tsi108.h
@@ -84,10 +84,6 @@
extern u32 tsi108_pci_cfg_base;
/* Exported functions */
-extern int tsi108_bridge_init(struct pci_controller *hose, uint phys_csr_base);
-extern unsigned long tsi108_get_mem_size(void);
-extern unsigned long tsi108_get_cpu_clk(void);
-extern unsigned long tsi108_get_sdc_clk(void);
extern int tsi108_direct_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val);
extern int tsi108_direct_read_config(struct pci_bus *bus, unsigned int devfn,
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index b51fba10e733..78f2675f2aac 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -52,7 +52,6 @@ extern void __init udbg_init_44x_as1(void);
extern void __init udbg_init_40x_realmode(void);
extern void __init udbg_init_cpm(void);
extern void __init udbg_init_usbgecko(void);
-extern void __init udbg_init_wsp(void);
extern void __init udbg_init_memcons(void);
extern void __init udbg_init_ehv_bc(void);
extern void __init udbg_init_ps3gelic(void);
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
index 9a5c928bb3c6..5b3a903adae6 100644
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -42,32 +42,65 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
#else
+#ifdef CONFIG_64BIT
+
+/* unused */
struct word_at_a_time {
- const unsigned long one_bits, high_bits;
};
-#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+#define WORD_AT_A_TIME_CONSTANTS { }
-#ifdef CONFIG_64BIT
+/* This will give us 0xff for a NULL char and 0x00 elsewhere */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long ret;
+ unsigned long zero = 0;
-/* Alan Modra's little-endian strlen tail for 64-bit */
-#define create_zero_mask(mask) (mask)
+ asm("cmpb %0,%1,%2" : "=r" (ret) : "r" (a), "r" (zero));
+ *bits = ret;
-static inline unsigned long find_zero(unsigned long mask)
+ return ret;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+/* Alan Modra's little-endian strlen tail for 64-bit */
+static inline unsigned long create_zero_mask(unsigned long bits)
{
unsigned long leading_zero_bits;
long trailing_zero_bit_mask;
- asm ("addi %1,%2,-1\n\t"
- "andc %1,%1,%2\n\t"
- "popcntd %0,%1"
- : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
- : "r" (mask));
- return leading_zero_bits >> 3;
+ asm("addi %1,%2,-1\n\t"
+ "andc %1,%1,%2\n\t"
+ "popcntd %0,%1"
+ : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
+ : "r" (bits));
+
+ return leading_zero_bits;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return mask >> 3;
+}
+
+/* This assumes that we never ask for an all 1s bitmask */
+static inline unsigned long zero_bytemask(unsigned long mask)
+{
+ return (1UL << mask) - 1;
}
#else /* 32-bit case */
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
/*
* This is largely generic for little-endian machines, but the
* optimal byte mask counting is probably going to be something
@@ -96,8 +129,6 @@ static inline unsigned long find_zero(unsigned long mask)
return count_masked_bytes(mask);
}
-#endif
-
/* Return nonzero if it has a zero */
static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
{
@@ -114,6 +145,59 @@ static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits,
/* The mask we created is directly usable as a bytemask */
#define zero_bytemask(mask) (mask)
+#endif /* CONFIG_64BIT */
+
+#endif /* __BIG_ENDIAN__ */
+
+/*
+ * We use load_unaligned_zero() in a selftest, which builds a userspace
+ * program. Some linker scripts seem to discard the .fixup section, so allow
+ * the test code to use a different section name.
+ */
+#ifndef FIXUP_SECTION
+#define FIXUP_SECTION ".fixup"
+#endif
+
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long ret, offset, tmp;
+
+ asm(
+ "1: " PPC_LL "%[ret], 0(%[addr])\n"
+ "2:\n"
+ ".section " FIXUP_SECTION ",\"ax\"\n"
+ "3: "
+#ifdef __powerpc64__
+ "clrrdi %[tmp], %[addr], 3\n\t"
+ "clrlsldi %[offset], %[addr], 61, 3\n\t"
+ "ld %[ret], 0(%[tmp])\n\t"
+#ifdef __BIG_ENDIAN__
+ "sld %[ret], %[ret], %[offset]\n\t"
+#else
+ "srd %[ret], %[ret], %[offset]\n\t"
#endif
+#else
+ "clrrwi %[tmp], %[addr], 2\n\t"
+ "clrlslwi %[offset], %[addr], 30, 3\n\t"
+ "lwz %[ret], 0(%[tmp])\n\t"
+#ifdef __BIG_ENDIAN__
+ "slw %[ret], %[ret], %[offset]\n\t"
+#else
+ "srw %[ret], %[ret], %[offset]\n\t"
+#endif
+#endif
+ "b 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n\t"
+ PPC_LONG_ALIGN "\n\t"
+ PPC_LONG "1b,3b\n"
+ ".previous"
+ : [tmp] "=&b" (tmp), [offset] "=&r" (offset), [ret] "=&r" (ret)
+ : [addr] "b" (addr), "m" (*(unsigned long *)addr));
+
+ return ret;
+}
+
+#undef FIXUP_SECTION
#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 282d43a0c855..0d050ea37a04 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -29,6 +29,7 @@
/* Native ICP */
#ifdef CONFIG_PPC_ICP_NATIVE
extern int icp_native_init(void);
+extern void icp_native_flush_interrupt(void);
#else
static inline int icp_native_init(void) { return -ENODEV; }
#endif
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 670c312d914e..502cf69b6c89 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -93,6 +93,9 @@ obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_MODULES) += ppc_ksyms.o
+ifeq ($(CONFIG_PPC32),y)
+obj-$(CONFIG_MODULES) += ppc_ksyms_32.o
+endif
obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KPROBES) += kprobes.o
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 7a13f378ca2c..c78e6dac4d7d 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -13,6 +13,7 @@
#include <linux/crash_dump.h>
#include <linux/bootmem.h>
+#include <linux/io.h>
#include <linux/memblock.h>
#include <asm/code-patching.h>
#include <asm/kdump.h>
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index bd1a2aba599f..735979764cd4 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -106,10 +106,14 @@ int __init swiotlb_setup_bus_notifier(void)
return 0;
}
-void swiotlb_detect_4g(void)
+void __init swiotlb_detect_4g(void)
{
- if ((memblock_end_of_DRAM() - 1) > 0xffffffff)
+ if ((memblock_end_of_DRAM() - 1) > 0xffffffff) {
ppc_swiotlb_enable = 1;
+#ifdef CONFIG_ZONE_DMA32
+ limit_zone_pfn(ZONE_DMA32, (1ULL << 32) >> PAGE_SHIFT);
+#endif
+ }
}
static int __init swiotlb_late_init(void)
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index ee78f6e49d64..adac9dc54aee 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -15,6 +15,7 @@
#include <asm/vio.h>
#include <asm/bug.h>
#include <asm/machdep.h>
+#include <asm/swiotlb.h>
/*
* Generic direct DMA implementation
@@ -25,6 +26,18 @@
* default the offset is PCI_DRAM_OFFSET.
*/
+static u64 __maybe_unused get_pfn_limit(struct device *dev)
+{
+ u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1;
+ struct dev_archdata __maybe_unused *sd = &dev->archdata;
+
+#ifdef CONFIG_SWIOTLB
+ if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops)
+ pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
+#endif
+
+ return pfn;
+}
void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
@@ -40,6 +53,26 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
#else
struct page *page;
int node = dev_to_node(dev);
+ u64 pfn = get_pfn_limit(dev);
+ int zone;
+
+ zone = dma_pfn_limit_to_zone(pfn);
+ if (zone < 0) {
+ dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
+ __func__, pfn);
+ return NULL;
+ }
+
+ switch (zone) {
+ case ZONE_DMA:
+ flag |= GFP_DMA;
+ break;
+#ifdef CONFIG_ZONE_DMA32
+ case ZONE_DMA32:
+ flag |= GFP_DMA32;
+ break;
+#endif
+ };
/* ignore region specifiers */
flag &= ~(__GFP_HIGHMEM);
@@ -202,6 +235,7 @@ int __dma_set_mask(struct device *dev, u64 dma_mask)
*dev->dma_mask = dma_mask;
return 0;
}
+
int dma_set_mask(struct device *dev, u64 dma_mask)
{
if (ppc_md.dma_set_mask)
@@ -210,13 +244,10 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
}
EXPORT_SYMBOL(dma_set_mask);
-u64 dma_get_required_mask(struct device *dev)
+u64 __dma_get_required_mask(struct device *dev)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
- if (ppc_md.dma_get_required_mask)
- return ppc_md.dma_get_required_mask(dev);
-
if (unlikely(dma_ops == NULL))
return 0;
@@ -225,6 +256,14 @@ u64 dma_get_required_mask(struct device *dev)
return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
}
+
+u64 dma_get_required_mask(struct device *dev)
+{
+ if (ppc_md.dma_get_required_mask)
+ return ppc_md.dma_get_required_mask(dev);
+
+ return __dma_get_required_mask(dev);
+}
EXPORT_SYMBOL_GPL(dma_get_required_mask);
static int __init dma_init(void)
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 59a64f8dc85f..d543e4179c18 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -117,7 +117,7 @@ static DEFINE_MUTEX(eeh_dev_mutex);
* not dynamically alloced, so that it ends up in RMO where RTAS
* can access it.
*/
-#define EEH_PCI_REGS_LOG_LEN 4096
+#define EEH_PCI_REGS_LOG_LEN 8192
static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN];
/*
@@ -148,16 +148,12 @@ static int __init eeh_setup(char *str)
}
__setup("eeh=", eeh_setup);
-/**
- * eeh_gather_pci_data - Copy assorted PCI config space registers to buff
- * @edev: device to report data for
- * @buf: point to buffer in which to log
- * @len: amount of room in buffer
- *
- * This routine captures assorted PCI configuration space data,
- * and puts them into a buffer for RTAS error logging.
+/*
+ * This routine captures assorted PCI configuration space data
+ * for the indicated PCI device, and puts them into a buffer
+ * for RTAS error logging.
*/
-static size_t eeh_gather_pci_data(struct eeh_dev *edev, char *buf, size_t len)
+static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
{
struct device_node *dn = eeh_dev_to_of_node(edev);
u32 cfg;
@@ -255,6 +251,19 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char *buf, size_t len)
return n;
}
+static void *eeh_dump_pe_log(void *data, void *flag)
+{
+ struct eeh_pe *pe = data;
+ struct eeh_dev *edev, *tmp;
+ size_t *plen = flag;
+
+ eeh_pe_for_each_dev(pe, edev, tmp)
+ *plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen,
+ EEH_PCI_REGS_LOG_LEN - *plen);
+
+ return NULL;
+}
+
/**
* eeh_slot_error_detail - Generate combined log including driver log and error log
* @pe: EEH PE
@@ -268,7 +277,6 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char *buf, size_t len)
void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
{
size_t loglen = 0;
- struct eeh_dev *edev, *tmp;
/*
* When the PHB is fenced or dead, it's pointless to collect
@@ -286,10 +294,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
eeh_pe_restore_bars(pe);
pci_regs_buf[0] = 0;
- eeh_pe_for_each_dev(pe, edev, tmp) {
- loglen += eeh_gather_pci_data(edev, pci_regs_buf + loglen,
- EEH_PCI_REGS_LOG_LEN - loglen);
- }
+ eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
}
eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
@@ -410,7 +415,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
}
dn = eeh_dev_to_of_node(edev);
dev = eeh_dev_to_pci_dev(edev);
- pe = edev->pe;
+ pe = eeh_dev_to_pe(edev);
/* Access to IO BARs might get this far and still not want checking. */
if (!pe) {
@@ -542,17 +547,16 @@ EXPORT_SYMBOL_GPL(eeh_dev_check_failure);
/**
* eeh_check_failure - Check if all 1's data is due to EEH slot freeze
- * @token: I/O token, should be address in the form 0xA....
- * @val: value, should be all 1's (XXX why do we need this arg??)
+ * @token: I/O address
*
- * Check for an EEH failure at the given token address. Call this
+ * Check for an EEH failure at the given I/O address. Call this
* routine if the result of a read was all 0xff's and you want to
- * find out if this is due to an EEH slot freeze event. This routine
+ * find out if this is due to an EEH slot freeze event. This routine
* will query firmware for the EEH status.
*
* Note this routine is safe to call in an interrupt context.
*/
-unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
+int eeh_check_failure(const volatile void __iomem *token)
{
unsigned long addr;
struct eeh_dev *edev;
@@ -562,13 +566,11 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
edev = eeh_addr_cache_get_dev(addr);
if (!edev) {
eeh_stats.no_device++;
- return val;
+ return 0;
}
- eeh_dev_check_failure(edev);
- return val;
+ return eeh_dev_check_failure(edev);
}
-
EXPORT_SYMBOL(eeh_check_failure);
@@ -582,25 +584,51 @@ EXPORT_SYMBOL(eeh_check_failure);
*/
int eeh_pci_enable(struct eeh_pe *pe, int function)
{
- int rc, flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
+ int active_flag, rc;
/*
* pHyp doesn't allow to enable IO or DMA on unfrozen PE.
* Also, it's pointless to enable them on unfrozen PE. So
- * we have the check here.
+ * we have to check before enabling IO or DMA.
*/
- if (function == EEH_OPT_THAW_MMIO ||
- function == EEH_OPT_THAW_DMA) {
+ switch (function) {
+ case EEH_OPT_THAW_MMIO:
+ active_flag = EEH_STATE_MMIO_ACTIVE;
+ break;
+ case EEH_OPT_THAW_DMA:
+ active_flag = EEH_STATE_DMA_ACTIVE;
+ break;
+ case EEH_OPT_DISABLE:
+ case EEH_OPT_ENABLE:
+ case EEH_OPT_FREEZE_PE:
+ active_flag = 0;
+ break;
+ default:
+ pr_warn("%s: Invalid function %d\n",
+ __func__, function);
+ return -EINVAL;
+ }
+
+ /*
+ * Check if IO or DMA has been enabled before
+ * enabling them.
+ */
+ if (active_flag) {
rc = eeh_ops->get_state(pe, NULL);
if (rc < 0)
return rc;
- /* Needn't to enable or already enabled */
- if ((rc == EEH_STATE_NOT_SUPPORT) ||
- ((rc & flags) == flags))
+ /* Needn't enable it at all */
+ if (rc == EEH_STATE_NOT_SUPPORT)
+ return 0;
+
+ /* It's already enabled */
+ if (rc & active_flag)
return 0;
}
+
+ /* Issue the request */
rc = eeh_ops->set_option(pe, function);
if (rc)
pr_warn("%s: Unexpected state change %d on "
@@ -608,17 +636,17 @@ int eeh_pci_enable(struct eeh_pe *pe, int function)
__func__, function, pe->phb->global_number,
pe->addr, rc);
- rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
- if (rc <= 0)
- return rc;
+ /* Check if the request is finished successfully */
+ if (active_flag) {
+ rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
+ if (rc <= 0)
+ return rc;
- if ((function == EEH_OPT_THAW_MMIO) &&
- (rc & EEH_STATE_MMIO_ENABLED))
- return 0;
+ if (rc & active_flag)
+ return 0;
- if ((function == EEH_OPT_THAW_DMA) &&
- (rc & EEH_STATE_DMA_ENABLED))
- return 0;
+ return -EIO;
+ }
return rc;
}
@@ -634,7 +662,7 @@ int eeh_pci_enable(struct eeh_pe *pe, int function)
int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
{
struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
- struct eeh_pe *pe = edev->pe;
+ struct eeh_pe *pe = eeh_dev_to_pe(edev);
if (!pe) {
pr_err("%s: No PE found on PCI device %s\n",
@@ -645,14 +673,18 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
switch (state) {
case pcie_deassert_reset:
eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
+ eeh_pe_state_clear(pe, EEH_PE_RESET);
break;
case pcie_hot_reset:
+ eeh_pe_state_mark(pe, EEH_PE_RESET);
eeh_ops->reset(pe, EEH_RESET_HOT);
break;
case pcie_warm_reset:
+ eeh_pe_state_mark(pe, EEH_PE_RESET);
eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
break;
default:
+ eeh_pe_state_clear(pe, EEH_PE_RESET);
return -EINVAL;
};
@@ -1141,6 +1173,85 @@ void eeh_remove_device(struct pci_dev *dev)
edev->mode &= ~EEH_DEV_SYSFS;
}
+int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state)
+{
+ int ret;
+
+ ret = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
+ if (ret) {
+ pr_warn("%s: Failure %d enabling IO on PHB#%x-PE#%x\n",
+ __func__, ret, pe->phb->global_number, pe->addr);
+ return ret;
+ }
+
+ ret = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
+ if (ret) {
+ pr_warn("%s: Failure %d enabling DMA on PHB#%x-PE#%x\n",
+ __func__, ret, pe->phb->global_number, pe->addr);
+ return ret;
+ }
+
+ /* Clear software isolated state */
+ if (sw_state && (pe->state & EEH_PE_ISOLATED))
+ eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
+
+ return ret;
+}
+
+
+static struct pci_device_id eeh_reset_ids[] = {
+ { PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */
+ { PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */
+ { 0 }
+};
+
+static int eeh_pe_change_owner(struct eeh_pe *pe)
+{
+ struct eeh_dev *edev, *tmp;
+ struct pci_dev *pdev;
+ struct pci_device_id *id;
+ int flags, ret;
+
+ /* Check PE state */
+ flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
+ ret = eeh_ops->get_state(pe, NULL);
+ if (ret < 0 || ret == EEH_STATE_NOT_SUPPORT)
+ return 0;
+
+ /* Unfrozen PE, nothing to do */
+ if ((ret & flags) == flags)
+ return 0;
+
+ /* Frozen PE, check if it needs PE level reset */
+ eeh_pe_for_each_dev(pe, edev, tmp) {
+ pdev = eeh_dev_to_pci_dev(edev);
+ if (!pdev)
+ continue;
+
+ for (id = &eeh_reset_ids[0]; id->vendor != 0; id++) {
+ if (id->vendor != PCI_ANY_ID &&
+ id->vendor != pdev->vendor)
+ continue;
+ if (id->device != PCI_ANY_ID &&
+ id->device != pdev->device)
+ continue;
+ if (id->subvendor != PCI_ANY_ID &&
+ id->subvendor != pdev->subsystem_vendor)
+ continue;
+ if (id->subdevice != PCI_ANY_ID &&
+ id->subdevice != pdev->subsystem_device)
+ continue;
+
+ goto reset;
+ }
+ }
+
+ return eeh_unfreeze_pe(pe, true);
+
+reset:
+ return eeh_pe_reset_and_recover(pe);
+}
+
/**
* eeh_dev_open - Increase count of pass through devices for PE
* @pdev: PCI device
@@ -1153,6 +1264,7 @@ void eeh_remove_device(struct pci_dev *dev)
int eeh_dev_open(struct pci_dev *pdev)
{
struct eeh_dev *edev;
+ int ret = -ENODEV;
mutex_lock(&eeh_dev_mutex);
@@ -1165,6 +1277,16 @@ int eeh_dev_open(struct pci_dev *pdev)
if (!edev || !edev->pe)
goto out;
+ /*
+ * The PE might have been put into frozen state, but we
+ * didn't detect that yet. The passed through PCI devices
+ * in frozen PE won't work properly. Clear the frozen state
+ * in advance.
+ */
+ ret = eeh_pe_change_owner(edev->pe);
+ if (ret)
+ goto out;
+
/* Increase PE's pass through count */
atomic_inc(&edev->pe->pass_dev_cnt);
mutex_unlock(&eeh_dev_mutex);
@@ -1172,7 +1294,7 @@ int eeh_dev_open(struct pci_dev *pdev)
return 0;
out:
mutex_unlock(&eeh_dev_mutex);
- return -ENODEV;
+ return ret;
}
EXPORT_SYMBOL_GPL(eeh_dev_open);
@@ -1202,6 +1324,7 @@ void eeh_dev_release(struct pci_dev *pdev)
/* Decrease PE's pass through count */
atomic_dec(&edev->pe->pass_dev_cnt);
WARN_ON(atomic_read(&edev->pe->pass_dev_cnt) < 0);
+ eeh_pe_change_owner(edev->pe);
out:
mutex_unlock(&eeh_dev_mutex);
}
@@ -1281,8 +1404,10 @@ int eeh_pe_set_option(struct eeh_pe *pe, int option)
*/
switch (option) {
case EEH_OPT_ENABLE:
- if (eeh_enabled())
+ if (eeh_enabled()) {
+ ret = eeh_pe_change_owner(pe);
break;
+ }
ret = -EIO;
break;
case EEH_OPT_DISABLE:
@@ -1294,7 +1419,7 @@ int eeh_pe_set_option(struct eeh_pe *pe, int option)
break;
}
- ret = eeh_ops->set_option(pe, option);
+ ret = eeh_pci_enable(pe, option);
break;
default:
pr_debug("%s: Option %d out of range (%d, %d)\n",
@@ -1345,6 +1470,36 @@ int eeh_pe_get_state(struct eeh_pe *pe)
}
EXPORT_SYMBOL_GPL(eeh_pe_get_state);
+static int eeh_pe_reenable_devices(struct eeh_pe *pe)
+{
+ struct eeh_dev *edev, *tmp;
+ struct pci_dev *pdev;
+ int ret = 0;
+
+ /* Restore config space */
+ eeh_pe_restore_bars(pe);
+
+ /*
+ * Reenable PCI devices as the devices passed
+ * through are always enabled before the reset.
+ */
+ eeh_pe_for_each_dev(pe, edev, tmp) {
+ pdev = eeh_dev_to_pci_dev(edev);
+ if (!pdev)
+ continue;
+
+ ret = pci_reenable_device(pdev);
+ if (ret) {
+ pr_warn("%s: Failure %d reenabling %s\n",
+ __func__, ret, pci_name(pdev));
+ return ret;
+ }
+ }
+
+ /* The PE is still in frozen state */
+ return eeh_unfreeze_pe(pe, true);
+}
+
/**
* eeh_pe_reset - Issue PE reset according to specified type
* @pe: EEH PE
@@ -1368,23 +1523,22 @@ int eeh_pe_reset(struct eeh_pe *pe, int option)
switch (option) {
case EEH_RESET_DEACTIVATE:
ret = eeh_ops->reset(pe, option);
+ eeh_pe_state_clear(pe, EEH_PE_RESET);
if (ret)
break;
- /*
- * The PE is still in frozen state and we need to clear
- * that. It's good to clear frozen state after deassert
- * to avoid messy IO access during reset, which might
- * cause recursive frozen PE.
- */
- ret = eeh_ops->set_option(pe, EEH_OPT_THAW_MMIO);
- if (!ret)
- ret = eeh_ops->set_option(pe, EEH_OPT_THAW_DMA);
- if (!ret)
- eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
+ ret = eeh_pe_reenable_devices(pe);
break;
case EEH_RESET_HOT:
case EEH_RESET_FUNDAMENTAL:
+ /*
+ * Proactively freeze the PE to drop all MMIO access
+ * during reset, which should be banned as it's always
+ * cause recursive EEH error.
+ */
+ eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
+
+ eeh_pe_state_mark(pe, EEH_PE_RESET);
ret = eeh_ops->reset(pe, option);
break;
default:
@@ -1413,9 +1567,6 @@ int eeh_pe_configure(struct eeh_pe *pe)
if (!pe)
return -ENODEV;
- /* Restore config space for the affected devices */
- eeh_pe_restore_bars(pe);
-
return ret;
}
EXPORT_SYMBOL_GPL(eeh_pe_configure);
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 6a0dcee8e931..3fd514f8e4b2 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -180,6 +180,22 @@ static bool eeh_dev_removed(struct eeh_dev *edev)
return false;
}
+static void *eeh_dev_save_state(void *data, void *userdata)
+{
+ struct eeh_dev *edev = data;
+ struct pci_dev *pdev;
+
+ if (!edev)
+ return NULL;
+
+ pdev = eeh_dev_to_pci_dev(edev);
+ if (!pdev)
+ return NULL;
+
+ pci_save_state(pdev);
+ return NULL;
+}
+
/**
* eeh_report_error - Report pci error to each device driver
* @data: eeh device
@@ -303,6 +319,22 @@ static void *eeh_report_reset(void *data, void *userdata)
return NULL;
}
+static void *eeh_dev_restore_state(void *data, void *userdata)
+{
+ struct eeh_dev *edev = data;
+ struct pci_dev *pdev;
+
+ if (!edev)
+ return NULL;
+
+ pdev = eeh_dev_to_pci_dev(edev);
+ if (!pdev)
+ return NULL;
+
+ pci_restore_state(pdev);
+ return NULL;
+}
+
/**
* eeh_report_resume - Tell device to resume normal operations
* @data: eeh device
@@ -450,38 +482,82 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
{
struct eeh_pe *pe = (struct eeh_pe *)data;
- int i, rc;
+ bool *clear_sw_state = flag;
+ int i, rc = 1;
- for (i = 0; i < 3; i++) {
- rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
- if (rc)
- continue;
- rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
- if (!rc)
- break;
- }
+ for (i = 0; rc && i < 3; i++)
+ rc = eeh_unfreeze_pe(pe, clear_sw_state);
- /* The PE has been isolated, clear it */
+ /* Stop immediately on any errors */
if (rc) {
- pr_warn("%s: Can't clear frozen PHB#%x-PE#%x (%d)\n",
- __func__, pe->phb->global_number, pe->addr, rc);
+ pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
+ __func__, rc, pe->phb->global_number, pe->addr);
return (void *)pe;
}
return NULL;
}
-static int eeh_clear_pe_frozen_state(struct eeh_pe *pe)
+static int eeh_clear_pe_frozen_state(struct eeh_pe *pe,
+ bool clear_sw_state)
{
void *rc;
- rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, NULL);
+ rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state);
if (!rc)
eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
return rc ? -EIO : 0;
}
+int eeh_pe_reset_and_recover(struct eeh_pe *pe)
+{
+ int result, ret;
+
+ /* Bail if the PE is being recovered */
+ if (pe->state & EEH_PE_RECOVERING)
+ return 0;
+
+ /* Put the PE into recovery mode */
+ eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
+
+ /* Save states */
+ eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
+
+ /* Report error */
+ eeh_pe_dev_traverse(pe, eeh_report_error, &result);
+
+ /* Issue reset */
+ eeh_pe_state_mark(pe, EEH_PE_RESET);
+ ret = eeh_reset_pe(pe);
+ if (ret) {
+ eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_RESET);
+ return ret;
+ }
+ eeh_pe_state_clear(pe, EEH_PE_RESET);
+
+ /* Unfreeze the PE */
+ ret = eeh_clear_pe_frozen_state(pe, true);
+ if (ret) {
+ eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
+ return ret;
+ }
+
+ /* Notify completion of reset */
+ eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
+
+ /* Restore device state */
+ eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
+
+ /* Resume */
+ eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
+
+ /* Clear recovery mode */
+ eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
+
+ return 0;
+}
+
/**
* eeh_reset_device - Perform actual reset of a pci slot
* @pe: EEH PE
@@ -540,7 +616,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
eeh_pe_state_clear(pe, EEH_PE_RESET);
/* Clear frozen state */
- rc = eeh_clear_pe_frozen_state(pe);
+ rc = eeh_clear_pe_frozen_state(pe, false);
if (rc)
return rc;
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 00e3844525a6..53dd0915e690 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -428,7 +428,7 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
}
/* Remove the EEH device */
- pe = edev->pe;
+ pe = eeh_dev_to_pe(edev);
edev->pe = NULL;
list_del(&edev->list);
@@ -584,6 +584,8 @@ static void *__eeh_pe_state_clear(void *data, void *flag)
{
struct eeh_pe *pe = (struct eeh_pe *)data;
int state = *((int *)flag);
+ struct eeh_dev *edev, *tmp;
+ struct pci_dev *pdev;
/* Keep the state of permanently removed PE intact */
if ((pe->freeze_count > EEH_MAX_ALLOWED_FREEZES) &&
@@ -592,9 +594,22 @@ static void *__eeh_pe_state_clear(void *data, void *flag)
pe->state &= ~state;
- /* Clear check count since last isolation */
- if (state & EEH_PE_ISOLATED)
- pe->check_count = 0;
+ /*
+ * Special treatment on clearing isolated state. Clear
+ * check count since last isolation and put all affected
+ * devices to normal state.
+ */
+ if (!(state & EEH_PE_ISOLATED))
+ return NULL;
+
+ pe->check_count = 0;
+ eeh_pe_for_each_dev(pe, edev, tmp) {
+ pdev = eeh_dev_to_pci_dev(edev);
+ if (!pdev)
+ continue;
+
+ pdev->error_state = pci_channel_io_normal;
+ }
return NULL;
}
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c
index e2595ba4b720..f19b1e5cb060 100644
--- a/arch/powerpc/kernel/eeh_sysfs.c
+++ b/arch/powerpc/kernel/eeh_sysfs.c
@@ -54,6 +54,43 @@ EEH_SHOW_ATTR(eeh_mode, mode, "0x%x");
EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x");
EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x");
+static ssize_t eeh_pe_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
+ int state;
+
+ if (!edev || !edev->pe)
+ return -ENODEV;
+
+ state = eeh_ops->get_state(edev->pe, NULL);
+ return sprintf(buf, "%0x08x %0x08x\n",
+ state, edev->pe->state);
+}
+
+static ssize_t eeh_pe_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
+
+ if (!edev || !edev->pe)
+ return -ENODEV;
+
+ /* Nothing to do if it's not frozen */
+ if (!(edev->pe->state & EEH_PE_ISOLATED))
+ return count;
+
+ if (eeh_unfreeze_pe(edev->pe, true))
+ return -EIO;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(eeh_pe_state);
+
void eeh_sysfs_add_device(struct pci_dev *pdev)
{
struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
@@ -68,9 +105,10 @@ void eeh_sysfs_add_device(struct pci_dev *pdev)
rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode);
rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr);
rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
+ rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_state);
if (rc)
- printk(KERN_WARNING "EEH: Unable to create sysfs entries\n");
+ pr_warn("EEH: Unable to create sysfs entries\n");
else if (edev)
edev->mode |= EEH_DEV_SYSFS;
}
@@ -92,6 +130,7 @@ void eeh_sysfs_remove_device(struct pci_dev *pdev)
device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr);
device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
+ device_remove_file(&pdev->dev, &dev_attr_eeh_pe_state);
if (edev)
edev->mode &= ~EEH_DEV_SYSFS;
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 7ee876d2adb5..fafff8dbd5d9 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -104,12 +104,15 @@ turn_on_mmu:
* task's thread_struct.
*/
#define EXCEPTION_PROLOG \
- mtspr SPRN_SPRG_SCRATCH0,r10; \
- mtspr SPRN_SPRG_SCRATCH1,r11; \
- mfcr r10; \
+ EXCEPTION_PROLOG_0; \
EXCEPTION_PROLOG_1; \
EXCEPTION_PROLOG_2
+#define EXCEPTION_PROLOG_0 \
+ mtspr SPRN_SPRG_SCRATCH0,r10; \
+ mtspr SPRN_SPRG_SCRATCH1,r11; \
+ mfcr r10
+
#define EXCEPTION_PROLOG_1 \
mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
andi. r11,r11,MSR_PR; \
@@ -145,6 +148,14 @@ turn_on_mmu:
SAVE_2GPRS(7, r11)
/*
+ * Exception exit code.
+ */
+#define EXCEPTION_EPILOG_0 \
+ mtcr r10; \
+ mfspr r10,SPRN_SPRG_SCRATCH0; \
+ mfspr r11,SPRN_SPRG_SCRATCH1
+
+/*
* Note: code which follows this uses cr0.eq (set if from kernel),
* r11, r12 (SRR0), and r9 (SRR1).
*
@@ -293,16 +304,8 @@ InstructionTLBMiss:
#ifdef CONFIG_8xx_CPU6
stw r3, 8(r0)
#endif
- DO_8xx_CPU6(0x3f80, r3)
- mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
- mfcr r10
-#ifdef CONFIG_8xx_CPU6
- stw r10, 0(r0)
- stw r11, 4(r0)
-#else
- mtspr SPRN_DAR, r10
- mtspr SPRN_SPRG2, r11
-#endif
+ EXCEPTION_PROLOG_0
+ mtspr SPRN_SPRG_SCRATCH2, r10
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
#ifdef CONFIG_8xx_CPU15
addi r11, r10, 0x1000
@@ -359,18 +362,11 @@ InstructionTLBMiss:
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
/* Restore registers */
-#ifndef CONFIG_8xx_CPU6
- mfspr r10, SPRN_DAR
- mtcr r10
- mtspr SPRN_DAR, r11 /* Tag DAR */
- mfspr r11, SPRN_SPRG2
-#else
- lwz r11, 0(r0)
- mtcr r11
- lwz r11, 4(r0)
+#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0)
#endif
- mfspr r10, SPRN_M_TW
+ mfspr r10, SPRN_SPRG_SCRATCH2
+ EXCEPTION_EPILOG_0
rfi
2:
mfspr r11, SPRN_SRR1
@@ -381,19 +377,11 @@ InstructionTLBMiss:
mtspr SPRN_SRR1, r11
/* Restore registers */
-#ifndef CONFIG_8xx_CPU6
- mfspr r10, SPRN_DAR
- mtcr r10
- li r11, 0x00f0
- mtspr SPRN_DAR, r11 /* Tag DAR */
- mfspr r11, SPRN_SPRG2
-#else
- lwz r11, 0(r0)
- mtcr r11
- lwz r11, 4(r0)
+#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0)
#endif
- mfspr r10, SPRN_M_TW
+ mfspr r10, SPRN_SPRG_SCRATCH2
+ EXCEPTION_EPILOG_0
b InstructionAccess
. = 0x1200
@@ -401,16 +389,8 @@ DataStoreTLBMiss:
#ifdef CONFIG_8xx_CPU6
stw r3, 8(r0)
#endif
- DO_8xx_CPU6(0x3f80, r3)
- mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
- mfcr r10
-#ifdef CONFIG_8xx_CPU6
- stw r10, 0(r0)
- stw r11, 4(r0)
-#else
- mtspr SPRN_DAR, r10
- mtspr SPRN_SPRG2, r11
-#endif
+ EXCEPTION_PROLOG_0
+ mtspr SPRN_SPRG_SCRATCH2, r10
mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
/* If we are faulting a kernel address, we have to use the
@@ -483,19 +463,12 @@ DataStoreTLBMiss:
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
/* Restore registers */
-#ifndef CONFIG_8xx_CPU6
- mfspr r10, SPRN_DAR
- mtcr r10
- mtspr SPRN_DAR, r11 /* Tag DAR */
- mfspr r11, SPRN_SPRG2
-#else
- mtspr SPRN_DAR, r11 /* Tag DAR */
- lwz r11, 0(r0)
- mtcr r11
- lwz r11, 4(r0)
+#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0)
#endif
- mfspr r10, SPRN_M_TW
+ mtspr SPRN_DAR, r11 /* Tag DAR */
+ mfspr r10, SPRN_SPRG_SCRATCH2
+ EXCEPTION_EPILOG_0
rfi
/* This is an instruction TLB error on the MPC8xx. This could be due
@@ -507,35 +480,18 @@ InstructionTLBError:
b InstructionAccess
/* This is the data TLB error on the MPC8xx. This could be due to
- * many reasons, including a dirty update to a pte. We can catch that
- * one here, but anything else is an error. First, we track down the
- * Linux pte. If it is valid, write access is allowed, but the
- * page dirty bit is not set, we will set it and reload the TLB. For
- * any other case, we bail out to a higher level function that can
- * handle it.
+ * many reasons, including a dirty update to a pte. We bail out to
+ * a higher level function that can handle it.
*/
. = 0x1400
DataTLBError:
-#ifdef CONFIG_8xx_CPU6
- stw r3, 8(r0)
-#endif
- DO_8xx_CPU6(0x3f80, r3)
- mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
- mfcr r10
- stw r10, 0(r0)
- stw r11, 4(r0)
+ EXCEPTION_PROLOG_0
- mfspr r10, SPRN_DAR
- cmpwi cr0, r10, 0x00f0
+ mfspr r11, SPRN_DAR
+ cmpwi cr0, r11, 0x00f0
beq- FixupDAR /* must be a buggy dcbX, icbi insn. */
-DARFixed:/* Return from dcbx instruction bug workaround, r10 holds value of DAR */
- mfspr r10, SPRN_M_TW /* Restore registers */
- lwz r11, 0(r0)
- mtcr r11
- lwz r11, 4(r0)
-#ifdef CONFIG_8xx_CPU6
- lwz r3, 8(r0)
-#endif
+DARFixed:/* Return from dcbx instruction bug workaround */
+ EXCEPTION_EPILOG_0
b DataAccess
EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
@@ -559,11 +515,15 @@ DARFixed:/* Return from dcbx instruction bug workaround, r10 holds value of DAR
/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
* by decoding the registers used by the dcbx instruction and adding them.
- * DAR is set to the calculated address and r10 also holds the EA on exit.
+ * DAR is set to the calculated address.
*/
/* define if you don't want to use self modifying code */
#define NO_SELF_MODIFYING_CODE
FixupDAR:/* Entry point for dcbx workaround. */
+#ifdef CONFIG_8xx_CPU6
+ stw r3, 8(r0)
+#endif
+ mtspr SPRN_SPRG_SCRATCH2, r10
/* fetch instruction from memory. */
mfspr r10, SPRN_SRR0
andis. r11, r10, 0x8000 /* Address >= 0x80000000 */
@@ -579,16 +539,17 @@ FixupDAR:/* Entry point for dcbx workaround. */
mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
lwz r11, 0(r11) /* Get the pte */
+#ifdef CONFIG_8xx_CPU6
+ lwz r3, 8(r0) /* restore r3 from memory */
+#endif
/* concat physical page address(r11) and page offset(r10) */
rlwimi r11, r10, 0, 20, 31
lwz r11,0(r11)
/* Check if it really is a dcbx instruction. */
/* dcbt and dcbtst does not generate DTLB Misses/Errors,
* no need to include them here */
- srwi r10, r11, 26 /* check if major OP code is 31 */
- cmpwi cr0, r10, 31
- bne- 141f
- rlwinm r10, r11, 0, 21, 30
+ xoris r10, r11, 0x7c00 /* check if major OP code is 31 */
+ rlwinm r10, r10, 0, 21, 5
cmpwi cr0, r10, 2028 /* Is dcbz? */
beq+ 142f
cmpwi cr0, r10, 940 /* Is dcbi? */
@@ -599,16 +560,13 @@ FixupDAR:/* Entry point for dcbx workaround. */
beq+ 142f
cmpwi cr0, r10, 1964 /* Is icbi? */
beq+ 142f
-141: mfspr r10, SPRN_DAR /* r10 must hold DAR at exit */
+141: mfspr r10,SPRN_SPRG_SCRATCH2
b DARFixed /* Nope, go back to normal TLB processing */
144: mfspr r10, SPRN_DSISR
rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */
mtspr SPRN_DSISR, r10
142: /* continue, it was a dcbx, dcbi instruction. */
-#ifdef CONFIG_8xx_CPU6
- lwz r3, 8(r0) /* restore r3 from memory */
-#endif
#ifndef NO_SELF_MODIFYING_CODE
andis. r10,r11,0x1f /* test if reg RA is r0 */
li r10,modified_instr@l
@@ -619,14 +577,15 @@ FixupDAR:/* Entry point for dcbx workaround. */
stw r11,0(r10) /* store add/and instruction */
dcbf 0,r10 /* flush new instr. to memory. */
icbi 0,r10 /* invalidate instr. cache line */
- lwz r11, 4(r0) /* restore r11 from memory */
- mfspr r10, SPRN_M_TW /* restore r10 from M_TW */
+ mfspr r11, SPRN_SPRG_SCRATCH1 /* restore r11 */
+ mfspr r10, SPRN_SPRG_SCRATCH0 /* restore r10 */
isync /* Wait until new instr is loaded from memory */
modified_instr:
.space 4 /* this is where the add instr. is stored */
bne+ 143f
subf r10,r0,r10 /* r10=r10-r0, only if reg RA is r0 */
143: mtdar r10 /* store faulting EA in DAR */
+ mfspr r10,SPRN_SPRG_SCRATCH2
b DARFixed /* Go back to normal TLB handling */
#else
mfctr r10
@@ -680,13 +639,16 @@ modified_instr:
mfdar r11
mtctr r11 /* restore ctr reg from DAR */
mtdar r10 /* save fault EA to DAR */
+ mfspr r10,SPRN_SPRG_SCRATCH2
b DARFixed /* Go back to normal TLB handling */
/* special handling for r10,r11 since these are modified already */
-153: lwz r11, 4(r0) /* load r11 from memory */
- b 155f
-154: mfspr r11, SPRN_M_TW /* load r10 from M_TW */
-155: add r10, r10, r11 /* add it */
+153: mfspr r11, SPRN_SPRG_SCRATCH1 /* load r11 from SPRN_SPRG_SCRATCH1 */
+ add r10, r10, r11 /* add it */
+ mfctr r11 /* restore r11 */
+ b 151b
+154: mfspr r11, SPRN_SPRG_SCRATCH0 /* load r10 from SPRN_SPRG_SCRATCH0 */
+ add r10, r10, r11 /* add it */
mfctr r11 /* restore r11 */
b 151b
#endif
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 0bb5918faaaf..1f7d84e2e8b2 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -293,7 +293,7 @@ out:
/*
* Handle single-step exceptions following a DABR hit.
*/
-int __kprobes single_step_dabr_instruction(struct die_args *args)
+static int __kprobes single_step_dabr_instruction(struct die_args *args)
{
struct pt_regs *regs = args->regs;
struct perf_event *bp = NULL;
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 1114d13ac19f..ac86c53e2542 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -55,7 +55,7 @@ static struct device ibmebus_bus_device = { /* fake "parent" device */
struct bus_type ibmebus_bus_type;
/* These devices will automatically be added to the bus during init */
-static struct of_device_id __initdata ibmebus_matches[] = {
+static const struct of_device_id ibmebus_matches[] __initconst = {
{ .compatible = "IBM,lhca" },
{ .compatible = "IBM,lhea" },
{},
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index be05841396cf..c0754bbf8118 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -73,7 +73,7 @@ _GLOBAL(power7_powersave_common)
/* Check if something happened while soft-disabled */
lbz r0,PACAIRQHAPPENED(r13)
- cmpwi cr0,r0,0
+ andi. r0,r0,~PACA_IRQ_HARD_DIS@l
beq 1f
cmpwi cr0,r4,0
beq 1f
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 4c5891de162e..8eb857f216c1 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -444,13 +444,13 @@ void migrate_irqs(void)
cpumask_and(mask, data->affinity, map);
if (cpumask_any(mask) >= nr_cpu_ids) {
- printk("Breaking affinity for irq %i\n", irq);
+ pr_warn("Breaking affinity for irq %i\n", irq);
cpumask_copy(mask, map);
}
if (chip->irq_set_affinity)
chip->irq_set_affinity(data, mask, true);
else if (desc->action && !(warned++))
- printk("Cannot set affinity for irq %i\n", irq);
+ pr_err("Cannot set affinity for irq %i\n", irq);
}
free_cpumask_var(mask);
@@ -470,7 +470,7 @@ static inline void check_stack_overflow(void)
/* check for stack overflow: is there less than 2KB free? */
if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
- printk("do_IRQ: stack overflow: %ld\n",
+ pr_err("do_IRQ: stack overflow: %ld\n",
sp - sizeof(struct thread_info));
dump_stack();
}
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 936258881c98..7b750c4ed5c7 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -35,7 +35,7 @@ static struct legacy_serial_info {
phys_addr_t taddr;
} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
-static struct of_device_id legacy_serial_parents[] __initdata = {
+static const struct of_device_id legacy_serial_parents[] __initconst = {
{.type = "soc",},
{.type = "tsi-bridge",},
{.type = "opb", },
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 6cff040bf456..c94d2e018d84 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -15,6 +15,9 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/elf.h>
@@ -28,12 +31,6 @@
#include <linux/sort.h>
#include <asm/setup.h>
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(fmt , ...)
-#endif
-
/* Count how many different relocations (different symbol, different
addend) */
static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
@@ -121,8 +118,8 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
continue;
if (sechdrs[i].sh_type == SHT_RELA) {
- DEBUGP("Found relocations in section %u\n", i);
- DEBUGP("Ptr: %p. Number: %u\n",
+ pr_debug("Found relocations in section %u\n", i);
+ pr_debug("Ptr: %p. Number: %u\n",
(void *)hdr + sechdrs[i].sh_offset,
sechdrs[i].sh_size / sizeof(Elf32_Rela));
@@ -161,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
me->arch.core_plt_section = i;
}
if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
- printk("Module doesn't contain .plt or .init.plt sections.\n");
+ pr_err("Module doesn't contain .plt or .init.plt sections.\n");
return -ENOEXEC;
}
@@ -189,7 +186,7 @@ static uint32_t do_plt_call(void *location,
{
struct ppc_plt_entry *entry;
- DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
+ pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
/* Init, or core PLT? */
if (location >= mod->module_core
&& location < mod->module_core + mod->core_size)
@@ -208,7 +205,7 @@ static uint32_t do_plt_call(void *location,
entry->jump[2] = 0x7d8903a6; /* mtctr r12 */
entry->jump[3] = 0x4e800420; /* bctr */
- DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
+ pr_debug("Initialized plt for 0x%x at %p\n", val, entry);
return (uint32_t)entry;
}
@@ -224,7 +221,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
uint32_t *location;
uint32_t value;
- DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
+ pr_debug("Applying ADD relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
/* This is where to make the change */
@@ -268,17 +265,17 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
sechdrs, module);
/* Only replace bits 2 through 26 */
- DEBUGP("REL24 value = %08X. location = %08X\n",
+ pr_debug("REL24 value = %08X. location = %08X\n",
value, (uint32_t)location);
- DEBUGP("Location before: %08X.\n",
+ pr_debug("Location before: %08X.\n",
*(uint32_t *)location);
*(uint32_t *)location
= (*(uint32_t *)location & ~0x03fffffc)
| ((value - (uint32_t)location)
& 0x03fffffc);
- DEBUGP("Location after: %08X.\n",
+ pr_debug("Location after: %08X.\n",
*(uint32_t *)location);
- DEBUGP("ie. jump to %08X+%08X = %08X\n",
+ pr_debug("ie. jump to %08X+%08X = %08X\n",
*(uint32_t *)location & 0x03fffffc,
(uint32_t)location,
(*(uint32_t *)location & 0x03fffffc)
@@ -291,7 +288,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
break;
default:
- printk("%s: unknown ADD relocation: %u\n",
+ pr_err("%s: unknown ADD relocation: %u\n",
module->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index d807ee626af9..68384514506b 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -15,6 +15,9 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/elf.h>
#include <linux/moduleloader.h>
@@ -36,11 +39,6 @@
Using a magic allocator which places modules within 32MB solves
this, and makes other things simpler. Anton?
--RR. */
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(fmt , ...)
-#endif
#if defined(_CALL_ELF) && _CALL_ELF == 2
#define R2_STACK_OFFSET 24
@@ -279,8 +277,8 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
/* Every relocated section... */
for (i = 1; i < hdr->e_shnum; i++) {
if (sechdrs[i].sh_type == SHT_RELA) {
- DEBUGP("Found relocations in section %u\n", i);
- DEBUGP("Ptr: %p. Number: %lu\n",
+ pr_debug("Found relocations in section %u\n", i);
+ pr_debug("Ptr: %p. Number: %Lu\n",
(void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size / sizeof(Elf64_Rela));
@@ -304,7 +302,7 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
relocs++;
#endif
- DEBUGP("Looks like a total of %lu stubs, max\n", relocs);
+ pr_debug("Looks like a total of %lu stubs, max\n", relocs);
return relocs * sizeof(struct ppc64_stub_entry);
}
@@ -390,7 +388,7 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
}
if (!me->arch.stubs_section) {
- printk("%s: doesn't contain .stubs.\n", me->name);
+ pr_err("%s: doesn't contain .stubs.\n", me->name);
return -ENOEXEC;
}
@@ -434,11 +432,11 @@ static inline int create_stub(Elf64_Shdr *sechdrs,
/* Stub uses address relative to r2. */
reladdr = (unsigned long)entry - my_r2(sechdrs, me);
if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
- printk("%s: Address %p of stub out of range of %p.\n",
+ pr_err("%s: Address %p of stub out of range of %p.\n",
me->name, (void *)reladdr, (void *)my_r2);
return 0;
}
- DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr);
+ pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
entry->jump[0] |= PPC_HA(reladdr);
entry->jump[1] |= PPC_LO(reladdr);
@@ -477,7 +475,7 @@ static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
static int restore_r2(u32 *instruction, struct module *me)
{
if (*instruction != PPC_INST_NOP) {
- printk("%s: Expect noop after relocate, got %08x\n",
+ pr_err("%s: Expect noop after relocate, got %08x\n",
me->name, *instruction);
return 0;
}
@@ -498,7 +496,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
unsigned long *location;
unsigned long value;
- DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
+ pr_debug("Applying ADD relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
/* First time we're called, we can fix up .TOC. */
@@ -519,7 +517,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ ELF64_R_SYM(rela[i].r_info);
- DEBUGP("RELOC at %p: %li-type as %s (%lu) + %li\n",
+ pr_debug("RELOC at %p: %li-type as %s (0x%lx) + %li\n",
location, (long)ELF64_R_TYPE(rela[i].r_info),
strtab + sym->st_name, (unsigned long)sym->st_value,
(long)rela[i].r_addend);
@@ -546,7 +544,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
/* Subtract TOC pointer */
value -= my_r2(sechdrs, me);
if (value + 0x8000 > 0xffff) {
- printk("%s: bad TOC16 relocation (%lu)\n",
+ pr_err("%s: bad TOC16 relocation (0x%lx)\n",
me->name, value);
return -ENOEXEC;
}
@@ -567,7 +565,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
/* Subtract TOC pointer */
value -= my_r2(sechdrs, me);
if ((value & 3) != 0 || value + 0x8000 > 0xffff) {
- printk("%s: bad TOC16_DS relocation (%lu)\n",
+ pr_err("%s: bad TOC16_DS relocation (0x%lx)\n",
me->name, value);
return -ENOEXEC;
}
@@ -580,7 +578,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
/* Subtract TOC pointer */
value -= my_r2(sechdrs, me);
if ((value & 3) != 0) {
- printk("%s: bad TOC16_LO_DS relocation (%lu)\n",
+ pr_err("%s: bad TOC16_LO_DS relocation (0x%lx)\n",
me->name, value);
return -ENOEXEC;
}
@@ -613,7 +611,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
/* Convert value to relative */
value -= (unsigned long)location;
if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){
- printk("%s: REL24 %li out of range!\n",
+ pr_err("%s: REL24 %li out of range!\n",
me->name, (long int)value);
return -ENOEXEC;
}
@@ -655,7 +653,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
break;
default:
- printk("%s: Unknown ADD relocation: %lu\n",
+ pr_err("%s: Unknown ADD relocation: %lu\n",
me->name,
(unsigned long)ELF64_R_TYPE(rela[i].r_info));
return -ENOEXEC;
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 28b898e68185..34f7c9b7cd96 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -567,7 +567,7 @@ static int __init nvram_init(void)
return rc;
}
-void __exit nvram_cleanup(void)
+static void __exit nvram_cleanup(void)
{
misc_deregister( &nvram_dev );
}
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index a7b743076720..f87bc1b4bdda 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -97,7 +97,7 @@ static int of_pci_phb_probe(struct platform_device *dev)
return 0;
}
-static struct of_device_id of_pci_phb_ids[] = {
+static const struct of_device_id of_pci_phb_ids[] = {
{ .type = "pci", },
{ .type = "pcix", },
{ .type = "pcie", },
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index b2814e23e1ed..bd70a51d5747 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1140,7 +1140,7 @@ static int reparent_resources(struct resource *parent,
* as well.
*/
-void pcibios_allocate_bus_resources(struct pci_bus *bus)
+static void pcibios_allocate_bus_resources(struct pci_bus *bus)
{
struct pci_bus *b;
int i;
@@ -1561,7 +1561,6 @@ EARLY_PCI_OP(write, byte, u8)
EARLY_PCI_OP(write, word, u16)
EARLY_PCI_OP(write, dword, u32)
-extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int cap);
int early_find_capability(struct pci_controller *hose, int bus, int devfn,
int cap)
{
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 44562aa97f16..e6245e9c7d8d 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -38,7 +38,7 @@ static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
* @addr0: value of 1st cell of a device tree PCI address.
* @bridge: Set this flag if the address is from a bridge 'ranges' property
*/
-unsigned int pci_parse_of_flags(u32 addr0, int bridge)
+static unsigned int pci_parse_of_flags(u32 addr0, int bridge)
{
unsigned int flags = 0;
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 48d17d6fca5b..c4dfff6c2719 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -1,207 +1,41 @@
-#include <linux/export.h>
-#include <linux/threads.h>
-#include <linux/smp.h>
-#include <linux/sched.h>
-#include <linux/elfcore.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/screen_info.h>
-#include <linux/vt_kern.h>
-#include <linux/nvram.h>
-#include <linux/irq.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
+#include <linux/ftrace.h>
+#include <linux/mm.h>
-#include <asm/page.h>
#include <asm/processor.h>
-#include <asm/cacheflush.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <linux/atomic.h>
-#include <asm/checksum.h>
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
-#include <linux/adb.h>
-#include <linux/cuda.h>
-#include <linux/pmu.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/irq.h>
-#include <asm/pmac_feature.h>
-#include <asm/dma.h>
-#include <asm/machdep.h>
-#include <asm/hw_irq.h>
-#include <asm/nvram.h>
-#include <asm/mmu_context.h>
-#include <asm/backlight.h>
-#include <asm/time.h>
-#include <asm/cputable.h>
-#include <asm/btext.h>
-#include <asm/div64.h>
-#include <asm/signal.h>
-#include <asm/dcr.h>
-#include <asm/ftrace.h>
#include <asm/switch_to.h>
+#include <asm/cacheflush.h>
#include <asm/epapr_hcalls.h>
-#ifdef CONFIG_PPC32
-extern void transfer_to_handler(void);
-extern void do_IRQ(struct pt_regs *regs);
-extern void machine_check_exception(struct pt_regs *regs);
-extern void alignment_exception(struct pt_regs *regs);
-extern void program_check_exception(struct pt_regs *regs);
-extern void single_step_exception(struct pt_regs *regs);
-extern int sys_sigreturn(struct pt_regs *regs);
+EXPORT_SYMBOL(flush_dcache_range);
+EXPORT_SYMBOL(flush_icache_range);
-EXPORT_SYMBOL(clear_pages);
-EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
-EXPORT_SYMBOL(DMA_MODE_READ);
-EXPORT_SYMBOL(DMA_MODE_WRITE);
+EXPORT_SYMBOL(empty_zero_page);
-EXPORT_SYMBOL(transfer_to_handler);
-EXPORT_SYMBOL(do_IRQ);
-EXPORT_SYMBOL(machine_check_exception);
-EXPORT_SYMBOL(alignment_exception);
-EXPORT_SYMBOL(program_check_exception);
-EXPORT_SYMBOL(single_step_exception);
-EXPORT_SYMBOL(sys_sigreturn);
-#endif
+long long __bswapdi2(long long);
+EXPORT_SYMBOL(__bswapdi2);
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
#endif
-EXPORT_SYMBOL(strcpy);
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strncmp);
-
-#ifndef CONFIG_GENERIC_CSUM
-EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_generic);
-EXPORT_SYMBOL(ip_fast_csum);
-EXPORT_SYMBOL(csum_tcpudp_magic);
-#endif
-
-EXPORT_SYMBOL(__copy_tofrom_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(copy_page);
-
-#if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
-EXPORT_SYMBOL(isa_io_base);
-EXPORT_SYMBOL(isa_mem_base);
-EXPORT_SYMBOL(pci_dram_offset);
-#endif /* CONFIG_PCI */
-
-EXPORT_SYMBOL(start_thread);
-
#ifdef CONFIG_PPC_FPU
EXPORT_SYMBOL(giveup_fpu);
EXPORT_SYMBOL(load_fp_state);
EXPORT_SYMBOL(store_fp_state);
#endif
+
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL(giveup_altivec);
EXPORT_SYMBOL(load_vr_state);
EXPORT_SYMBOL(store_vr_state);
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_VSX
-EXPORT_SYMBOL(giveup_vsx);
-EXPORT_SYMBOL_GPL(__giveup_vsx);
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
-EXPORT_SYMBOL(giveup_spe);
-#endif /* CONFIG_SPE */
-
-#ifndef CONFIG_PPC64
-EXPORT_SYMBOL(flush_instruction_cache);
#endif
-EXPORT_SYMBOL(flush_dcache_range);
-EXPORT_SYMBOL(flush_icache_range);
-#ifdef CONFIG_SMP
-#ifdef CONFIG_PPC32
-EXPORT_SYMBOL(smp_hw_index);
-#endif
-#endif
-
-#ifdef CONFIG_ADB
-EXPORT_SYMBOL(adb_request);
-EXPORT_SYMBOL(adb_register);
-EXPORT_SYMBOL(adb_unregister);
-EXPORT_SYMBOL(adb_poll);
-EXPORT_SYMBOL(adb_try_handler_change);
-#endif /* CONFIG_ADB */
-#ifdef CONFIG_ADB_CUDA
-EXPORT_SYMBOL(cuda_request);
-EXPORT_SYMBOL(cuda_poll);
-#endif /* CONFIG_ADB_CUDA */
-EXPORT_SYMBOL(to_tm);
-
-#ifdef CONFIG_PPC32
-long long __ashrdi3(long long, int);
-long long __ashldi3(long long, int);
-long long __lshrdi3(long long, int);
-EXPORT_SYMBOL(__ashrdi3);
-EXPORT_SYMBOL(__ashldi3);
-EXPORT_SYMBOL(__lshrdi3);
-int __ucmpdi2(unsigned long long, unsigned long long);
-EXPORT_SYMBOL(__ucmpdi2);
-int __cmpdi2(long long, long long);
-EXPORT_SYMBOL(__cmpdi2);
-#endif
-long long __bswapdi2(long long);
-EXPORT_SYMBOL(__bswapdi2);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memchr);
-
-#if defined(CONFIG_FB_VGA16_MODULE)
-EXPORT_SYMBOL(screen_info);
-#endif
-
-#ifdef CONFIG_PPC32
-EXPORT_SYMBOL(timer_interrupt);
-EXPORT_SYMBOL(tb_ticks_per_jiffy);
-EXPORT_SYMBOL(cacheable_memcpy);
-EXPORT_SYMBOL(cacheable_memzero);
-#endif
-
-#ifdef CONFIG_PPC32
-EXPORT_SYMBOL(switch_mmu_context);
-#endif
-
-#ifdef CONFIG_PPC_STD_MMU_32
-extern long mol_trampoline;
-EXPORT_SYMBOL(mol_trampoline); /* For MOL */
-EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
-#ifdef CONFIG_SMP
-extern int mmu_hash_lock;
-EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
-#endif /* CONFIG_SMP */
-extern long *intercept_table;
-EXPORT_SYMBOL(intercept_table);
-#endif /* CONFIG_PPC_STD_MMU_32 */
-#ifdef CONFIG_PPC_DCR_NATIVE
-EXPORT_SYMBOL(__mtdcr);
-EXPORT_SYMBOL(__mfdcr);
-#endif
-EXPORT_SYMBOL(empty_zero_page);
-
-#ifdef CONFIG_PPC64
-EXPORT_SYMBOL(__arch_hweight8);
-EXPORT_SYMBOL(__arch_hweight16);
-EXPORT_SYMBOL(__arch_hweight32);
-EXPORT_SYMBOL(__arch_hweight64);
+#ifdef CONFIG_VSX
+EXPORT_SYMBOL_GPL(__giveup_vsx);
#endif
-#ifdef CONFIG_PPC_BOOK3S_64
-EXPORT_SYMBOL_GPL(mmu_psize_defs);
+#ifdef CONFIG_SPE
+EXPORT_SYMBOL(giveup_spe);
#endif
#ifdef CONFIG_EPAPR_PARAVIRT
diff --git a/arch/powerpc/kernel/ppc_ksyms_32.c b/arch/powerpc/kernel/ppc_ksyms_32.c
new file mode 100644
index 000000000000..30ddd8a24eee
--- /dev/null
+++ b/arch/powerpc/kernel/ppc_ksyms_32.c
@@ -0,0 +1,61 @@
+#include <linux/export.h>
+#include <linux/smp.h>
+
+#include <asm/page.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/hw_irq.h>
+#include <asm/time.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/dcr.h>
+
+EXPORT_SYMBOL(clear_pages);
+EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
+EXPORT_SYMBOL(DMA_MODE_READ);
+EXPORT_SYMBOL(DMA_MODE_WRITE);
+
+#if defined(CONFIG_PCI)
+EXPORT_SYMBOL(isa_io_base);
+EXPORT_SYMBOL(isa_mem_base);
+EXPORT_SYMBOL(pci_dram_offset);
+#endif
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(smp_hw_index);
+#endif
+
+long long __ashrdi3(long long, int);
+long long __ashldi3(long long, int);
+long long __lshrdi3(long long, int);
+int __ucmpdi2(unsigned long long, unsigned long long);
+int __cmpdi2(long long, long long);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__ucmpdi2);
+EXPORT_SYMBOL(__cmpdi2);
+
+EXPORT_SYMBOL(timer_interrupt);
+EXPORT_SYMBOL(tb_ticks_per_jiffy);
+
+EXPORT_SYMBOL(switch_mmu_context);
+
+#ifdef CONFIG_PPC_STD_MMU_32
+extern long mol_trampoline;
+EXPORT_SYMBOL(mol_trampoline); /* For MOL */
+EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
+#ifdef CONFIG_SMP
+extern int mmu_hash_lock;
+EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
+#endif /* CONFIG_SMP */
+extern long *intercept_table;
+EXPORT_SYMBOL(intercept_table);
+#endif /* CONFIG_PPC_STD_MMU_32 */
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+EXPORT_SYMBOL(__mtdcr);
+EXPORT_SYMBOL(__mfdcr);
+#endif
+
+EXPORT_SYMBOL(flush_instruction_cache);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index bf44ae962ab8..aa1df89c8b2a 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -228,6 +228,7 @@ void giveup_vsx(struct task_struct *tsk)
giveup_altivec_maybe_transactional(tsk);
__giveup_vsx(tsk);
}
+EXPORT_SYMBOL(giveup_vsx);
void flush_vsx_to_thread(struct task_struct *tsk)
{
@@ -1316,6 +1317,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.tm_tfiar = 0;
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
}
+EXPORT_SYMBOL(start_thread);
#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
| PR_FP_EXC_RES | PR_FP_EXC_INV)
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 4e139f8a69ef..099f27e6d1b0 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -386,8 +386,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
return 0;
}
-int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname,
- int depth, void *data)
+static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
+ const char *uname,
+ int depth, void *data)
{
const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
@@ -641,6 +642,10 @@ void __init early_init_devtree(void *params)
DBG(" -> early_init_devtree(%p)\n", params);
+ /* Too early to BUG_ON(), do it by hand */
+ if (!early_init_dt_verify(params))
+ panic("BUG: Failed verifying flat device tree, bad version?");
+
/* Setup flat device-tree pointer */
initial_boot_params = params;
@@ -663,14 +668,12 @@ void __init early_init_devtree(void *params)
* device-tree, including the platform type, initrd location and
* size, TCE reserve, and more ...
*/
- of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line);
+ of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
/* Scan memory nodes and rebuild MEMBLOCKs */
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
- /* Save command line for /proc/cmdline and then parse parameters */
- strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
parse_early_param();
/* make sure we've parsed cmdline for mem= before this */
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index fe8e54b9ef7d..12640f7e726b 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -50,24 +50,14 @@ do
done
# ignore register save/restore funcitons
- if [ "${UNDEF:0:9}" = "_restgpr_" ]; then
+ case $UNDEF in
+ _restgpr_*|_restgpr0_*|_rest32gpr_*)
OK=1
- fi
- if [ "${UNDEF:0:10}" = "_restgpr0_" ]; then
- OK=1
- fi
- if [ "${UNDEF:0:11}" = "_rest32gpr_" ]; then
- OK=1
- fi
- if [ "${UNDEF:0:9}" = "_savegpr_" ]; then
+ ;;
+ _savegpr_*|_savegpr0_*|_save32gpr_*)
OK=1
- fi
- if [ "${UNDEF:0:10}" = "_savegpr0_" ]; then
- OK=1
- fi
- if [ "${UNDEF:0:11}" = "_save32gpr_" ]; then
- OK=1
- fi
+ ;;
+ esac
if [ $OK -eq 0 ]; then
ERROR=1
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 2e3d2bf536c5..cdb404ea3468 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -932,7 +932,7 @@ void ptrace_triggered(struct perf_event *bp,
}
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
+static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
unsigned long data)
{
#ifdef CONFIG_HAVE_HW_BREAKPOINT
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index e736387fee6a..5a2c049c1c61 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -286,7 +286,7 @@ static void prrn_work_fn(struct work_struct *work)
static DECLARE_WORK(prrn_work, prrn_work_fn);
-void prrn_schedule_update(u32 scope)
+static void prrn_schedule_update(u32 scope)
{
flush_work(&prrn_work);
prrn_update_scope = scope;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 1b0e26013a62..1362cd62b3fa 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -81,8 +81,6 @@ EXPORT_SYMBOL_GPL(boot_cpuid);
unsigned long klimit = (unsigned long) _end;
-char cmd_line[COMMAND_LINE_SIZE];
-
/*
* This still seems to be needed... -- paulus
*/
@@ -94,6 +92,9 @@ struct screen_info screen_info = {
.orig_video_isVGA = 1,
.orig_video_points = 16
};
+#if defined(CONFIG_FB_VGA16_MODULE)
+EXPORT_SYMBOL(screen_info);
+#endif
/* Variables required to store legacy IO irq routing */
int of_i8042_kbd_irq;
@@ -382,7 +383,7 @@ void __init check_for_initrd(void)
initrd_start = initrd_end = 0;
if (initrd_start)
- printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
+ pr_info("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
DBG(" <- check_for_initrd()\n");
#endif /* CONFIG_BLK_DEV_INITRD */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index ea4fda60e57b..07831ed0d9ef 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -268,7 +268,7 @@ static void __init exc_lvl_early_init(void)
/* Warning, IO base is not yet inited */
void __init setup_arch(char **cmdline_p)
{
- *cmdline_p = cmd_line;
+ *cmdline_p = boot_command_line;
/* so udelay does something sensible, assume <= 1000 bogomips */
loops_per_jiffy = 500000000 / HZ;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 75d62d63fe68..cd07d79ad21c 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -525,21 +525,31 @@ void __init setup_system(void)
printk("Starting Linux PPC64 %s\n", init_utsname()->version);
printk("-----------------------------------------------------\n");
- printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
- printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size());
+ printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
+ printk("phys_mem_size = 0x%llx\n", memblock_phys_mem_size());
+
if (ppc64_caches.dline_size != 0x80)
- printk("ppc64_caches.dcache_line_size = 0x%x\n",
- ppc64_caches.dline_size);
+ printk("dcache_line_size = 0x%x\n", ppc64_caches.dline_size);
if (ppc64_caches.iline_size != 0x80)
- printk("ppc64_caches.icache_line_size = 0x%x\n",
- ppc64_caches.iline_size);
+ printk("icache_line_size = 0x%x\n", ppc64_caches.iline_size);
+
+ printk("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
+ printk(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE);
+ printk(" always = 0x%016lx\n", CPU_FTRS_ALWAYS);
+ printk("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features,
+ cur_cpu_spec->cpu_user_features2);
+ printk("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
+ printk("firmware_features = 0x%016lx\n", powerpc_firmware_features);
+
#ifdef CONFIG_PPC_STD_MMU_64
if (htab_address)
- printk("htab_address = 0x%p\n", htab_address);
- printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
-#endif /* CONFIG_PPC_STD_MMU_64 */
+ printk("htab_address = 0x%p\n", htab_address);
+
+ printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
+#endif
+
if (PHYSICAL_START > 0)
- printk("physical_start = 0x%llx\n",
+ printk("physical_start = 0x%llx\n",
(unsigned long long)PHYSICAL_START);
printk("-----------------------------------------------------\n");
@@ -657,7 +667,7 @@ void __init setup_arch(char **cmdline_p)
{
ppc64_boot_msg(0x12, "Setup Arch");
- *cmdline_p = cmd_line;
+ *cmdline_p = boot_command_line;
/*
* Set cache line size based on type of cpu as a default.
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index a0738af4aba6..71e186d5f331 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -52,6 +52,7 @@
#endif
#include <asm/vdso.h>
#include <asm/debug.h>
+#include <asm/kexec.h>
#ifdef DEBUG
#include <asm/udbg.h>
@@ -379,8 +380,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/*
* numa_node_id() works after this.
*/
- set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
- set_cpu_numa_mem(cpu, local_memory_node(numa_cpu_lookup_table[cpu]));
+ if (cpu_present(cpu)) {
+ set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
+ set_cpu_numa_mem(cpu,
+ local_memory_node(numa_cpu_lookup_table[cpu]));
+ }
}
cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
@@ -728,6 +732,9 @@ void start_secondary(void *unused)
}
traverse_core_siblings(cpu, true);
+ set_numa_node(numa_cpu_lookup_table[cpu]);
+ set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
+
smp_wmb();
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 368ab374d33c..7505599c2593 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -479,7 +479,7 @@ void arch_irq_work_raise(void)
#endif /* CONFIG_IRQ_WORK */
-void __timer_interrupt(void)
+static void __timer_interrupt(void)
{
struct pt_regs *regs = get_irq_regs();
u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
@@ -643,7 +643,7 @@ static int __init get_freq(char *name, int cells, unsigned long *val)
return found;
}
-void start_cpu_decrementer(void)
+static void start_cpu_decrementer(void)
{
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
/* Clear any pending timer interrupts */
@@ -1024,6 +1024,7 @@ void to_tm(int tim, struct rtc_time * tm)
*/
GregorianDay(tm);
}
+EXPORT_SYMBOL(to_tm);
/*
* Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 59fa2de9546d..9f342f134ae4 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -10,7 +10,7 @@ CFLAGS_REMOVE_code-patching.o = -pg
CFLAGS_REMOVE_feature-fixups.o = -pg
obj-y := string.o alloc.o \
- crtsavres.o
+ crtsavres.o ppc_ksyms.o
obj-$(CONFIG_PPC32) += div64.o copy_32.o
obj-$(CONFIG_HAS_IOMEM) += devres.o
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 7a8a7487cee8..7ce3870d7ddd 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -164,7 +164,7 @@ static long calc_offset(struct fixup_entry *entry, unsigned int *p)
return (unsigned long)p - (unsigned long)entry;
}
-void test_basic_patching(void)
+static void test_basic_patching(void)
{
extern unsigned int ftr_fixup_test1;
extern unsigned int end_ftr_fixup_test1;
diff --git a/arch/powerpc/lib/ppc_ksyms.c b/arch/powerpc/lib/ppc_ksyms.c
new file mode 100644
index 000000000000..f993959647b5
--- /dev/null
+++ b/arch/powerpc/lib/ppc_ksyms.c
@@ -0,0 +1,39 @@
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include <net/checksum.h>
+
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memchr);
+#ifdef CONFIG_PPC32
+EXPORT_SYMBOL(cacheable_memcpy);
+EXPORT_SYMBOL(cacheable_memzero);
+#endif
+
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strncmp);
+
+#ifndef CONFIG_GENERIC_CSUM
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_generic);
+EXPORT_SYMBOL(ip_fast_csum);
+EXPORT_SYMBOL(csum_tcpudp_magic);
+#endif
+
+EXPORT_SYMBOL(__copy_tofrom_user);
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(copy_page);
+
+#ifdef CONFIG_PPC64
+EXPORT_SYMBOL(__arch_hweight8);
+EXPORT_SYMBOL(__arch_hweight16);
+EXPORT_SYMBOL(__arch_hweight32);
+EXPORT_SYMBOL(__arch_hweight64);
+#endif
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 5c09f365c842..54651fc2d412 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -98,13 +98,8 @@ static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs
ra = (instr >> 16) & 0x1f;
ea = (signed short) instr; /* sign-extend */
- if (ra) {
+ if (ra)
ea += regs->gpr[ra];
- if (instr & 0x04000000) { /* update forms */
- if ((instr>>26) != 47) /* stmw is not an update form */
- regs->gpr[ra] = ea;
- }
- }
return truncate_if_32bit(regs->msr, ea);
}
@@ -120,11 +115,8 @@ static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *reg
ra = (instr >> 16) & 0x1f;
ea = (signed short) (instr & ~3); /* sign-extend */
- if (ra) {
+ if (ra)
ea += regs->gpr[ra];
- if ((instr & 3) == 1) /* update forms */
- regs->gpr[ra] = ea;
- }
return truncate_if_32bit(regs->msr, ea);
}
@@ -133,8 +125,8 @@ static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *reg
/*
* Calculate effective address for an X-form instruction
*/
-static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs,
- int do_update)
+static unsigned long __kprobes xform_ea(unsigned int instr,
+ struct pt_regs *regs)
{
int ra, rb;
unsigned long ea;
@@ -142,11 +134,8 @@ static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs
ra = (instr >> 16) & 0x1f;
rb = (instr >> 11) & 0x1f;
ea = regs->gpr[rb];
- if (ra) {
+ if (ra)
ea += regs->gpr[ra];
- if (do_update) /* update forms */
- regs->gpr[ra] = ea;
- }
return truncate_if_32bit(regs->msr, ea);
}
@@ -611,6 +600,23 @@ static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
}
+static int __kprobes trap_compare(long v1, long v2)
+{
+ int ret = 0;
+
+ if (v1 < v2)
+ ret |= 0x10;
+ else if (v1 > v2)
+ ret |= 0x08;
+ else
+ ret |= 0x04;
+ if ((unsigned long)v1 < (unsigned long)v2)
+ ret |= 0x02;
+ else if ((unsigned long)v1 > (unsigned long)v2)
+ ret |= 0x01;
+ return ret;
+}
+
/*
* Elements of 32-bit rotate and mask instructions.
*/
@@ -627,26 +633,27 @@ static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
#define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
/*
- * Emulate instructions that cause a transfer of control,
- * loads and stores, and a few other instructions.
- * Returns 1 if the step was emulated, 0 if not,
- * or -1 if the instruction is one that should not be stepped,
- * such as an rfid, or a mtmsrd that would clear MSR_RI.
+ * Decode an instruction, and execute it if that can be done just by
+ * modifying *regs (i.e. integer arithmetic and logical instructions,
+ * branches, and barrier instructions).
+ * Returns 1 if the instruction has been executed, or 0 if not.
+ * Sets *op to indicate what the instruction does.
*/
-int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
+int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs,
+ unsigned int instr)
{
unsigned int opcode, ra, rb, rd, spr, u;
unsigned long int imm;
unsigned long int val, val2;
- unsigned long int ea;
- unsigned int cr, mb, me, sh;
- int err;
- unsigned long old_ra, val3;
+ unsigned int mb, me, sh;
long ival;
+ op->type = COMPUTE;
+
opcode = instr >> 26;
switch (opcode) {
case 16: /* bc */
+ op->type = BRANCH;
imm = (signed short)(instr & 0xfffc);
if ((instr & 2) == 0)
imm += regs->nip;
@@ -659,26 +666,14 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
return 1;
#ifdef CONFIG_PPC64
case 17: /* sc */
- /*
- * N.B. this uses knowledge about how the syscall
- * entry code works. If that is changed, this will
- * need to be changed also.
- */
- if (regs->gpr[0] == 0x1ebe &&
- cpu_has_feature(CPU_FTR_REAL_LE)) {
- regs->msr ^= MSR_LE;
- goto instr_done;
- }
- regs->gpr[9] = regs->gpr[13];
- regs->gpr[10] = MSR_KERNEL;
- regs->gpr[11] = regs->nip + 4;
- regs->gpr[12] = regs->msr & MSR_MASK;
- regs->gpr[13] = (unsigned long) get_paca();
- regs->nip = (unsigned long) &system_call_common;
- regs->msr = MSR_KERNEL;
- return 1;
+ if ((instr & 0xfe2) == 2)
+ op->type = SYSCALL;
+ else
+ op->type = UNKNOWN;
+ return 0;
#endif
case 18: /* b */
+ op->type = BRANCH;
imm = instr & 0x03fffffc;
if (imm & 0x02000000)
imm -= 0x04000000;
@@ -691,8 +686,16 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
return 1;
case 19:
switch ((instr >> 1) & 0x3ff) {
+ case 0: /* mcrf */
+ rd = (instr >> 21) & 0x1c;
+ ra = (instr >> 16) & 0x1c;
+ val = (regs->ccr >> ra) & 0xf;
+ regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
+ goto instr_done;
+
case 16: /* bclr */
case 528: /* bcctr */
+ op->type = BRANCH;
imm = (instr & 0x400)? regs->ctr: regs->link;
regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
imm = truncate_if_32bit(regs->msr, imm);
@@ -703,9 +706,13 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
return 1;
case 18: /* rfid, scary */
- return -1;
+ if (regs->msr & MSR_PR)
+ goto priv;
+ op->type = RFI;
+ return 0;
case 150: /* isync */
+ op->type = BARRIER;
isync();
goto instr_done;
@@ -731,6 +738,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
case 31:
switch ((instr >> 1) & 0x3ff) {
case 598: /* sync */
+ op->type = BARRIER;
#ifdef __powerpc64__
switch ((instr >> 21) & 3) {
case 1: /* lwsync */
@@ -745,6 +753,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
goto instr_done;
case 854: /* eieio */
+ op->type = BARRIER;
eieio();
goto instr_done;
}
@@ -760,6 +769,17 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
rb = (instr >> 11) & 0x1f;
switch (opcode) {
+#ifdef __powerpc64__
+ case 2: /* tdi */
+ if (rd & trap_compare(regs->gpr[ra], (short) instr))
+ goto trap;
+ goto instr_done;
+#endif
+ case 3: /* twi */
+ if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
+ goto trap;
+ goto instr_done;
+
case 7: /* mulli */
regs->gpr[rd] = regs->gpr[ra] * (short) instr;
goto instr_done;
@@ -908,35 +928,44 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
case 31:
switch ((instr >> 1) & 0x3ff) {
+ case 4: /* tw */
+ if (rd == 0x1f ||
+ (rd & trap_compare((int)regs->gpr[ra],
+ (int)regs->gpr[rb])))
+ goto trap;
+ goto instr_done;
+#ifdef __powerpc64__
+ case 68: /* td */
+ if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
+ goto trap;
+ goto instr_done;
+#endif
case 83: /* mfmsr */
if (regs->msr & MSR_PR)
- break;
- regs->gpr[rd] = regs->msr & MSR_MASK;
- goto instr_done;
+ goto priv;
+ op->type = MFMSR;
+ op->reg = rd;
+ return 0;
case 146: /* mtmsr */
if (regs->msr & MSR_PR)
- break;
- imm = regs->gpr[rd];
- if ((imm & MSR_RI) == 0)
- /* can't step mtmsr that would clear MSR_RI */
- return -1;
- regs->msr = imm;
- goto instr_done;
+ goto priv;
+ op->type = MTMSR;
+ op->reg = rd;
+ op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
+ return 0;
#ifdef CONFIG_PPC64
case 178: /* mtmsrd */
- /* only MSR_EE and MSR_RI get changed if bit 15 set */
- /* mtmsrd doesn't change MSR_HV and MSR_ME */
if (regs->msr & MSR_PR)
- break;
- imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL;
- imm = (regs->msr & MSR_MASK & ~imm)
- | (regs->gpr[rd] & imm);
- if ((imm & MSR_RI) == 0)
- /* can't step mtmsrd that would clear MSR_RI */
- return -1;
- regs->msr = imm;
- goto instr_done;
+ goto priv;
+ op->type = MTMSR;
+ op->reg = rd;
+ /* only MSR_EE and MSR_RI get changed if bit 15 set */
+ /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
+ imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
+ op->val = imm;
+ return 0;
#endif
+
case 19: /* mfcr */
regs->gpr[rd] = regs->ccr;
regs->gpr[rd] &= 0xffffffffUL;
@@ -954,33 +983,43 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
goto instr_done;
case 339: /* mfspr */
- spr = (instr >> 11) & 0x3ff;
+ spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
switch (spr) {
- case 0x20: /* mfxer */
+ case SPRN_XER: /* mfxer */
regs->gpr[rd] = regs->xer;
regs->gpr[rd] &= 0xffffffffUL;
goto instr_done;
- case 0x100: /* mflr */
+ case SPRN_LR: /* mflr */
regs->gpr[rd] = regs->link;
goto instr_done;
- case 0x120: /* mfctr */
+ case SPRN_CTR: /* mfctr */
regs->gpr[rd] = regs->ctr;
goto instr_done;
+ default:
+ op->type = MFSPR;
+ op->reg = rd;
+ op->spr = spr;
+ return 0;
}
break;
case 467: /* mtspr */
- spr = (instr >> 11) & 0x3ff;
+ spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
switch (spr) {
- case 0x20: /* mtxer */
+ case SPRN_XER: /* mtxer */
regs->xer = (regs->gpr[rd] & 0xffffffffUL);
goto instr_done;
- case 0x100: /* mtlr */
+ case SPRN_LR: /* mtlr */
regs->link = regs->gpr[rd];
goto instr_done;
- case 0x120: /* mtctr */
+ case SPRN_CTR: /* mtctr */
regs->ctr = regs->gpr[rd];
goto instr_done;
+ default:
+ op->type = MTSPR;
+ op->val = regs->gpr[rd];
+ op->spr = spr;
+ return 0;
}
break;
@@ -1257,294 +1296,242 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
* Cache instructions
*/
case 54: /* dcbst */
- ea = xform_ea(instr, regs, 0);
- if (!address_ok(regs, ea, 8))
- return 0;
- err = 0;
- __cacheop_user_asmx(ea, err, "dcbst");
- if (err)
- return 0;
- goto instr_done;
+ op->type = MKOP(CACHEOP, DCBST, 0);
+ op->ea = xform_ea(instr, regs);
+ return 0;
case 86: /* dcbf */
- ea = xform_ea(instr, regs, 0);
- if (!address_ok(regs, ea, 8))
- return 0;
- err = 0;
- __cacheop_user_asmx(ea, err, "dcbf");
- if (err)
- return 0;
- goto instr_done;
+ op->type = MKOP(CACHEOP, DCBF, 0);
+ op->ea = xform_ea(instr, regs);
+ return 0;
case 246: /* dcbtst */
- if (rd == 0) {
- ea = xform_ea(instr, regs, 0);
- prefetchw((void *) ea);
- }
- goto instr_done;
+ op->type = MKOP(CACHEOP, DCBTST, 0);
+ op->ea = xform_ea(instr, regs);
+ op->reg = rd;
+ return 0;
case 278: /* dcbt */
- if (rd == 0) {
- ea = xform_ea(instr, regs, 0);
- prefetch((void *) ea);
- }
- goto instr_done;
+ op->type = MKOP(CACHEOP, DCBTST, 0);
+ op->ea = xform_ea(instr, regs);
+ op->reg = rd;
+ return 0;
+ case 982: /* icbi */
+ op->type = MKOP(CACHEOP, ICBI, 0);
+ op->ea = xform_ea(instr, regs);
+ return 0;
}
break;
}
/*
- * Following cases are for loads and stores, so bail out
- * if we're in little-endian mode.
+ * Loads and stores.
*/
- if (regs->msr & MSR_LE)
- return 0;
-
- /*
- * Save register RA in case it's an update form load or store
- * and the access faults.
- */
- old_ra = regs->gpr[ra];
+ op->type = UNKNOWN;
+ op->update_reg = ra;
+ op->reg = rd;
+ op->val = regs->gpr[rd];
+ u = (instr >> 20) & UPDATE;
switch (opcode) {
case 31:
- u = instr & 0x40;
+ u = instr & UPDATE;
+ op->ea = xform_ea(instr, regs);
switch ((instr >> 1) & 0x3ff) {
case 20: /* lwarx */
- ea = xform_ea(instr, regs, 0);
- if (ea & 3)
- break; /* can't handle misaligned */
- err = -EFAULT;
- if (!address_ok(regs, ea, 4))
- goto ldst_done;
- err = 0;
- __get_user_asmx(val, ea, err, "lwarx");
- if (!err)
- regs->gpr[rd] = val;
- goto ldst_done;
+ op->type = MKOP(LARX, 0, 4);
+ break;
case 150: /* stwcx. */
- ea = xform_ea(instr, regs, 0);
- if (ea & 3)
- break; /* can't handle misaligned */
- err = -EFAULT;
- if (!address_ok(regs, ea, 4))
- goto ldst_done;
- err = 0;
- __put_user_asmx(regs->gpr[rd], ea, err, "stwcx.", cr);
- if (!err)
- regs->ccr = (regs->ccr & 0x0fffffff) |
- (cr & 0xe0000000) |
- ((regs->xer >> 3) & 0x10000000);
- goto ldst_done;
+ op->type = MKOP(STCX, 0, 4);
+ break;
#ifdef __powerpc64__
case 84: /* ldarx */
- ea = xform_ea(instr, regs, 0);
- if (ea & 7)
- break; /* can't handle misaligned */
- err = -EFAULT;
- if (!address_ok(regs, ea, 8))
- goto ldst_done;
- err = 0;
- __get_user_asmx(val, ea, err, "ldarx");
- if (!err)
- regs->gpr[rd] = val;
- goto ldst_done;
+ op->type = MKOP(LARX, 0, 8);
+ break;
case 214: /* stdcx. */
- ea = xform_ea(instr, regs, 0);
- if (ea & 7)
- break; /* can't handle misaligned */
- err = -EFAULT;
- if (!address_ok(regs, ea, 8))
- goto ldst_done;
- err = 0;
- __put_user_asmx(regs->gpr[rd], ea, err, "stdcx.", cr);
- if (!err)
- regs->ccr = (regs->ccr & 0x0fffffff) |
- (cr & 0xe0000000) |
- ((regs->xer >> 3) & 0x10000000);
- goto ldst_done;
+ op->type = MKOP(STCX, 0, 8);
+ break;
case 21: /* ldx */
case 53: /* ldux */
- err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
- 8, regs);
- goto ldst_done;
+ op->type = MKOP(LOAD, u, 8);
+ break;
#endif
case 23: /* lwzx */
case 55: /* lwzux */
- err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
- 4, regs);
- goto ldst_done;
+ op->type = MKOP(LOAD, u, 4);
+ break;
case 87: /* lbzx */
case 119: /* lbzux */
- err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
- 1, regs);
- goto ldst_done;
+ op->type = MKOP(LOAD, u, 1);
+ break;
#ifdef CONFIG_ALTIVEC
case 103: /* lvx */
case 359: /* lvxl */
if (!(regs->msr & MSR_VEC))
- break;
- ea = xform_ea(instr, regs, 0);
- err = do_vec_load(rd, do_lvx, ea, regs);
- goto ldst_done;
+ goto vecunavail;
+ op->type = MKOP(LOAD_VMX, 0, 16);
+ break;
case 231: /* stvx */
case 487: /* stvxl */
if (!(regs->msr & MSR_VEC))
- break;
- ea = xform_ea(instr, regs, 0);
- err = do_vec_store(rd, do_stvx, ea, regs);
- goto ldst_done;
+ goto vecunavail;
+ op->type = MKOP(STORE_VMX, 0, 16);
+ break;
#endif /* CONFIG_ALTIVEC */
#ifdef __powerpc64__
case 149: /* stdx */
case 181: /* stdux */
- val = regs->gpr[rd];
- err = write_mem(val, xform_ea(instr, regs, u), 8, regs);
- goto ldst_done;
+ op->type = MKOP(STORE, u, 8);
+ break;
#endif
case 151: /* stwx */
case 183: /* stwux */
- val = regs->gpr[rd];
- err = write_mem(val, xform_ea(instr, regs, u), 4, regs);
- goto ldst_done;
+ op->type = MKOP(STORE, u, 4);
+ break;
case 215: /* stbx */
case 247: /* stbux */
- val = regs->gpr[rd];
- err = write_mem(val, xform_ea(instr, regs, u), 1, regs);
- goto ldst_done;
+ op->type = MKOP(STORE, u, 1);
+ break;
case 279: /* lhzx */
case 311: /* lhzux */
- err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
- 2, regs);
- goto ldst_done;
+ op->type = MKOP(LOAD, u, 2);
+ break;
#ifdef __powerpc64__
case 341: /* lwax */
case 373: /* lwaux */
- err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
- 4, regs);
- if (!err)
- regs->gpr[rd] = (signed int) regs->gpr[rd];
- goto ldst_done;
+ op->type = MKOP(LOAD, SIGNEXT | u, 4);
+ break;
#endif
case 343: /* lhax */
case 375: /* lhaux */
- err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
- 2, regs);
- if (!err)
- regs->gpr[rd] = (signed short) regs->gpr[rd];
- goto ldst_done;
+ op->type = MKOP(LOAD, SIGNEXT | u, 2);
+ break;
case 407: /* sthx */
case 439: /* sthux */
- val = regs->gpr[rd];
- err = write_mem(val, xform_ea(instr, regs, u), 2, regs);
- goto ldst_done;
+ op->type = MKOP(STORE, u, 2);
+ break;
#ifdef __powerpc64__
case 532: /* ldbrx */
- err = read_mem(&val, xform_ea(instr, regs, 0), 8, regs);
- if (!err)
- regs->gpr[rd] = byterev_8(val);
- goto ldst_done;
+ op->type = MKOP(LOAD, BYTEREV, 8);
+ break;
#endif
+ case 533: /* lswx */
+ op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
+ break;
case 534: /* lwbrx */
- err = read_mem(&val, xform_ea(instr, regs, 0), 4, regs);
- if (!err)
- regs->gpr[rd] = byterev_4(val);
- goto ldst_done;
+ op->type = MKOP(LOAD, BYTEREV, 4);
+ break;
+
+ case 597: /* lswi */
+ if (rb == 0)
+ rb = 32; /* # bytes to load */
+ op->type = MKOP(LOAD_MULTI, 0, rb);
+ op->ea = 0;
+ if (ra)
+ op->ea = truncate_if_32bit(regs->msr,
+ regs->gpr[ra]);
+ break;
#ifdef CONFIG_PPC_FPU
case 535: /* lfsx */
case 567: /* lfsux */
if (!(regs->msr & MSR_FP))
- break;
- ea = xform_ea(instr, regs, u);
- err = do_fp_load(rd, do_lfs, ea, 4, regs);
- goto ldst_done;
+ goto fpunavail;
+ op->type = MKOP(LOAD_FP, u, 4);
+ break;
case 599: /* lfdx */
case 631: /* lfdux */
if (!(regs->msr & MSR_FP))
- break;
- ea = xform_ea(instr, regs, u);
- err = do_fp_load(rd, do_lfd, ea, 8, regs);
- goto ldst_done;
+ goto fpunavail;
+ op->type = MKOP(LOAD_FP, u, 8);
+ break;
case 663: /* stfsx */
case 695: /* stfsux */
if (!(regs->msr & MSR_FP))
- break;
- ea = xform_ea(instr, regs, u);
- err = do_fp_store(rd, do_stfs, ea, 4, regs);
- goto ldst_done;
+ goto fpunavail;
+ op->type = MKOP(STORE_FP, u, 4);
+ break;
case 727: /* stfdx */
case 759: /* stfdux */
if (!(regs->msr & MSR_FP))
- break;
- ea = xform_ea(instr, regs, u);
- err = do_fp_store(rd, do_stfd, ea, 8, regs);
- goto ldst_done;
+ goto fpunavail;
+ op->type = MKOP(STORE_FP, u, 8);
+ break;
#endif
#ifdef __powerpc64__
case 660: /* stdbrx */
- val = byterev_8(regs->gpr[rd]);
- err = write_mem(val, xform_ea(instr, regs, 0), 8, regs);
- goto ldst_done;
+ op->type = MKOP(STORE, BYTEREV, 8);
+ op->val = byterev_8(regs->gpr[rd]);
+ break;
#endif
+ case 661: /* stswx */
+ op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
+ break;
+
case 662: /* stwbrx */
- val = byterev_4(regs->gpr[rd]);
- err = write_mem(val, xform_ea(instr, regs, 0), 4, regs);
- goto ldst_done;
+ op->type = MKOP(STORE, BYTEREV, 4);
+ op->val = byterev_4(regs->gpr[rd]);
+ break;
+
+ case 725:
+ if (rb == 0)
+ rb = 32; /* # bytes to store */
+ op->type = MKOP(STORE_MULTI, 0, rb);
+ op->ea = 0;
+ if (ra)
+ op->ea = truncate_if_32bit(regs->msr,
+ regs->gpr[ra]);
+ break;
case 790: /* lhbrx */
- err = read_mem(&val, xform_ea(instr, regs, 0), 2, regs);
- if (!err)
- regs->gpr[rd] = byterev_2(val);
- goto ldst_done;
+ op->type = MKOP(LOAD, BYTEREV, 2);
+ break;
case 918: /* sthbrx */
- val = byterev_2(regs->gpr[rd]);
- err = write_mem(val, xform_ea(instr, regs, 0), 2, regs);
- goto ldst_done;
+ op->type = MKOP(STORE, BYTEREV, 2);
+ op->val = byterev_2(regs->gpr[rd]);
+ break;
#ifdef CONFIG_VSX
case 844: /* lxvd2x */
case 876: /* lxvd2ux */
if (!(regs->msr & MSR_VSX))
- break;
- rd |= (instr & 1) << 5;
- ea = xform_ea(instr, regs, u);
- err = do_vsx_load(rd, do_lxvd2x, ea, regs);
- goto ldst_done;
+ goto vsxunavail;
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(LOAD_VSX, u, 16);
+ break;
case 972: /* stxvd2x */
case 1004: /* stxvd2ux */
if (!(regs->msr & MSR_VSX))
- break;
- rd |= (instr & 1) << 5;
- ea = xform_ea(instr, regs, u);
- err = do_vsx_store(rd, do_stxvd2x, ea, regs);
- goto ldst_done;
+ goto vsxunavail;
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(STORE_VSX, u, 16);
+ break;
#endif /* CONFIG_VSX */
}
@@ -1552,178 +1539,123 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
case 32: /* lwz */
case 33: /* lwzu */
- err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 4, regs);
- goto ldst_done;
+ op->type = MKOP(LOAD, u, 4);
+ op->ea = dform_ea(instr, regs);
+ break;
case 34: /* lbz */
case 35: /* lbzu */
- err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 1, regs);
- goto ldst_done;
+ op->type = MKOP(LOAD, u, 1);
+ op->ea = dform_ea(instr, regs);
+ break;
case 36: /* stw */
- val = regs->gpr[rd];
- err = write_mem(val, dform_ea(instr, regs), 4, regs);
- goto ldst_done;
-
case 37: /* stwu */
- val = regs->gpr[rd];
- val3 = dform_ea(instr, regs);
- /*
- * For PPC32 we always use stwu to change stack point with r1. So
- * this emulated store may corrupt the exception frame, now we
- * have to provide the exception frame trampoline, which is pushed
- * below the kprobed function stack. So we only update gpr[1] but
- * don't emulate the real store operation. We will do real store
- * operation safely in exception return code by checking this flag.
- */
- if ((ra == 1) && !(regs->msr & MSR_PR) \
- && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
-#ifdef CONFIG_PPC32
- /*
- * Check if we will touch kernel sack overflow
- */
- if (val3 - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
- printk(KERN_CRIT "Can't kprobe this since Kernel stack overflow.\n");
- err = -EINVAL;
- break;
- }
-#endif /* CONFIG_PPC32 */
- /*
- * Check if we already set since that means we'll
- * lose the previous value.
- */
- WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
- set_thread_flag(TIF_EMULATE_STACK_STORE);
- err = 0;
- } else
- err = write_mem(val, val3, 4, regs);
- goto ldst_done;
+ op->type = MKOP(STORE, u, 4);
+ op->ea = dform_ea(instr, regs);
+ break;
case 38: /* stb */
case 39: /* stbu */
- val = regs->gpr[rd];
- err = write_mem(val, dform_ea(instr, regs), 1, regs);
- goto ldst_done;
+ op->type = MKOP(STORE, u, 1);
+ op->ea = dform_ea(instr, regs);
+ break;
case 40: /* lhz */
case 41: /* lhzu */
- err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
- goto ldst_done;
+ op->type = MKOP(LOAD, u, 2);
+ op->ea = dform_ea(instr, regs);
+ break;
case 42: /* lha */
case 43: /* lhau */
- err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
- if (!err)
- regs->gpr[rd] = (signed short) regs->gpr[rd];
- goto ldst_done;
+ op->type = MKOP(LOAD, SIGNEXT | u, 2);
+ op->ea = dform_ea(instr, regs);
+ break;
case 44: /* sth */
case 45: /* sthu */
- val = regs->gpr[rd];
- err = write_mem(val, dform_ea(instr, regs), 2, regs);
- goto ldst_done;
+ op->type = MKOP(STORE, u, 2);
+ op->ea = dform_ea(instr, regs);
+ break;
case 46: /* lmw */
- ra = (instr >> 16) & 0x1f;
if (ra >= rd)
break; /* invalid form, ra in range to load */
- ea = dform_ea(instr, regs);
- do {
- err = read_mem(&regs->gpr[rd], ea, 4, regs);
- if (err)
- return 0;
- ea += 4;
- } while (++rd < 32);
- goto instr_done;
+ op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
+ op->ea = dform_ea(instr, regs);
+ break;
case 47: /* stmw */
- ea = dform_ea(instr, regs);
- do {
- err = write_mem(regs->gpr[rd], ea, 4, regs);
- if (err)
- return 0;
- ea += 4;
- } while (++rd < 32);
- goto instr_done;
+ op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
+ op->ea = dform_ea(instr, regs);
+ break;
#ifdef CONFIG_PPC_FPU
case 48: /* lfs */
case 49: /* lfsu */
if (!(regs->msr & MSR_FP))
- break;
- ea = dform_ea(instr, regs);
- err = do_fp_load(rd, do_lfs, ea, 4, regs);
- goto ldst_done;
+ goto fpunavail;
+ op->type = MKOP(LOAD_FP, u, 4);
+ op->ea = dform_ea(instr, regs);
+ break;
case 50: /* lfd */
case 51: /* lfdu */
if (!(regs->msr & MSR_FP))
- break;
- ea = dform_ea(instr, regs);
- err = do_fp_load(rd, do_lfd, ea, 8, regs);
- goto ldst_done;
+ goto fpunavail;
+ op->type = MKOP(LOAD_FP, u, 8);
+ op->ea = dform_ea(instr, regs);
+ break;
case 52: /* stfs */
case 53: /* stfsu */
if (!(regs->msr & MSR_FP))
- break;
- ea = dform_ea(instr, regs);
- err = do_fp_store(rd, do_stfs, ea, 4, regs);
- goto ldst_done;
+ goto fpunavail;
+ op->type = MKOP(STORE_FP, u, 4);
+ op->ea = dform_ea(instr, regs);
+ break;
case 54: /* stfd */
case 55: /* stfdu */
if (!(regs->msr & MSR_FP))
- break;
- ea = dform_ea(instr, regs);
- err = do_fp_store(rd, do_stfd, ea, 8, regs);
- goto ldst_done;
+ goto fpunavail;
+ op->type = MKOP(STORE_FP, u, 8);
+ op->ea = dform_ea(instr, regs);
+ break;
#endif
#ifdef __powerpc64__
case 58: /* ld[u], lwa */
+ op->ea = dsform_ea(instr, regs);
switch (instr & 3) {
case 0: /* ld */
- err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
- 8, regs);
- goto ldst_done;
+ op->type = MKOP(LOAD, 0, 8);
+ break;
case 1: /* ldu */
- err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
- 8, regs);
- goto ldst_done;
+ op->type = MKOP(LOAD, UPDATE, 8);
+ break;
case 2: /* lwa */
- err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
- 4, regs);
- if (!err)
- regs->gpr[rd] = (signed int) regs->gpr[rd];
- goto ldst_done;
+ op->type = MKOP(LOAD, SIGNEXT, 4);
+ break;
}
break;
case 62: /* std[u] */
- val = regs->gpr[rd];
+ op->ea = dsform_ea(instr, regs);
switch (instr & 3) {
case 0: /* std */
- err = write_mem(val, dsform_ea(instr, regs), 8, regs);
- goto ldst_done;
+ op->type = MKOP(STORE, 0, 8);
+ break;
case 1: /* stdu */
- err = write_mem(val, dsform_ea(instr, regs), 8, regs);
- goto ldst_done;
+ op->type = MKOP(STORE, UPDATE, 8);
+ break;
}
break;
#endif /* __powerpc64__ */
}
- err = -EINVAL;
-
- ldst_done:
- if (err) {
- regs->gpr[ra] = old_ra;
- return 0; /* invoke DSI if -EFAULT? */
- }
- instr_done:
- regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
- return 1;
+ return 0;
logical_done:
if (instr & 1)
@@ -1733,5 +1665,349 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
arith_done:
if (instr & 1)
set_cr0(regs, rd);
- goto instr_done;
+
+ instr_done:
+ regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
+ return 1;
+
+ priv:
+ op->type = INTERRUPT | 0x700;
+ op->val = SRR1_PROGPRIV;
+ return 0;
+
+ trap:
+ op->type = INTERRUPT | 0x700;
+ op->val = SRR1_PROGTRAP;
+ return 0;
+
+#ifdef CONFIG_PPC_FPU
+ fpunavail:
+ op->type = INTERRUPT | 0x800;
+ return 0;
+#endif
+
+#ifdef CONFIG_ALTIVEC
+ vecunavail:
+ op->type = INTERRUPT | 0xf20;
+ return 0;
+#endif
+
+#ifdef CONFIG_VSX
+ vsxunavail:
+ op->type = INTERRUPT | 0xf40;
+ return 0;
+#endif
+}
+EXPORT_SYMBOL_GPL(analyse_instr);
+
+/*
+ * For PPC32 we always use stwu with r1 to change the stack pointer.
+ * So this emulated store may corrupt the exception frame, now we
+ * have to provide the exception frame trampoline, which is pushed
+ * below the kprobed function stack. So we only update gpr[1] but
+ * don't emulate the real store operation. We will do real store
+ * operation safely in exception return code by checking this flag.
+ */
+static __kprobes int handle_stack_update(unsigned long ea, struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC32
+ /*
+ * Check if we will touch kernel stack overflow
+ */
+ if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
+ printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
+ return -EINVAL;
+ }
+#endif /* CONFIG_PPC32 */
+ /*
+ * Check if we already set since that means we'll
+ * lose the previous value.
+ */
+ WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
+ set_thread_flag(TIF_EMULATE_STACK_STORE);
+ return 0;
+}
+
+static __kprobes void do_signext(unsigned long *valp, int size)
+{
+ switch (size) {
+ case 2:
+ *valp = (signed short) *valp;
+ break;
+ case 4:
+ *valp = (signed int) *valp;
+ break;
+ }
+}
+
+static __kprobes void do_byterev(unsigned long *valp, int size)
+{
+ switch (size) {
+ case 2:
+ *valp = byterev_2(*valp);
+ break;
+ case 4:
+ *valp = byterev_4(*valp);
+ break;
+#ifdef __powerpc64__
+ case 8:
+ *valp = byterev_8(*valp);
+ break;
+#endif
+ }
+}
+
+/*
+ * Emulate instructions that cause a transfer of control,
+ * loads and stores, and a few other instructions.
+ * Returns 1 if the step was emulated, 0 if not,
+ * or -1 if the instruction is one that should not be stepped,
+ * such as an rfid, or a mtmsrd that would clear MSR_RI.
+ */
+int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
+{
+ struct instruction_op op;
+ int r, err, size;
+ unsigned long val;
+ unsigned int cr;
+ int i, rd, nb;
+
+ r = analyse_instr(&op, regs, instr);
+ if (r != 0)
+ return r;
+
+ err = 0;
+ size = GETSIZE(op.type);
+ switch (op.type & INSTR_TYPE_MASK) {
+ case CACHEOP:
+ if (!address_ok(regs, op.ea, 8))
+ return 0;
+ switch (op.type & CACHEOP_MASK) {
+ case DCBST:
+ __cacheop_user_asmx(op.ea, err, "dcbst");
+ break;
+ case DCBF:
+ __cacheop_user_asmx(op.ea, err, "dcbf");
+ break;
+ case DCBTST:
+ if (op.reg == 0)
+ prefetchw((void *) op.ea);
+ break;
+ case DCBT:
+ if (op.reg == 0)
+ prefetch((void *) op.ea);
+ break;
+ case ICBI:
+ __cacheop_user_asmx(op.ea, err, "icbi");
+ break;
+ }
+ if (err)
+ return 0;
+ goto instr_done;
+
+ case LARX:
+ if (regs->msr & MSR_LE)
+ return 0;
+ if (op.ea & (size - 1))
+ break; /* can't handle misaligned */
+ err = -EFAULT;
+ if (!address_ok(regs, op.ea, size))
+ goto ldst_done;
+ err = 0;
+ switch (size) {
+ case 4:
+ __get_user_asmx(val, op.ea, err, "lwarx");
+ break;
+ case 8:
+ __get_user_asmx(val, op.ea, err, "ldarx");
+ break;
+ default:
+ return 0;
+ }
+ if (!err)
+ regs->gpr[op.reg] = val;
+ goto ldst_done;
+
+ case STCX:
+ if (regs->msr & MSR_LE)
+ return 0;
+ if (op.ea & (size - 1))
+ break; /* can't handle misaligned */
+ err = -EFAULT;
+ if (!address_ok(regs, op.ea, size))
+ goto ldst_done;
+ err = 0;
+ switch (size) {
+ case 4:
+ __put_user_asmx(op.val, op.ea, err, "stwcx.", cr);
+ break;
+ case 8:
+ __put_user_asmx(op.val, op.ea, err, "stdcx.", cr);
+ break;
+ default:
+ return 0;
+ }
+ if (!err)
+ regs->ccr = (regs->ccr & 0x0fffffff) |
+ (cr & 0xe0000000) |
+ ((regs->xer >> 3) & 0x10000000);
+ goto ldst_done;
+
+ case LOAD:
+ if (regs->msr & MSR_LE)
+ return 0;
+ err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
+ if (!err) {
+ if (op.type & SIGNEXT)
+ do_signext(&regs->gpr[op.reg], size);
+ if (op.type & BYTEREV)
+ do_byterev(&regs->gpr[op.reg], size);
+ }
+ goto ldst_done;
+
+ case LOAD_FP:
+ if (regs->msr & MSR_LE)
+ return 0;
+ if (size == 4)
+ err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
+ else
+ err = do_fp_load(op.reg, do_lfd, op.ea, size, regs);
+ goto ldst_done;
+
+#ifdef CONFIG_ALTIVEC
+ case LOAD_VMX:
+ if (regs->msr & MSR_LE)
+ return 0;
+ err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
+ goto ldst_done;
+#endif
+#ifdef CONFIG_VSX
+ case LOAD_VSX:
+ if (regs->msr & MSR_LE)
+ return 0;
+ err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
+ goto ldst_done;
+#endif
+ case LOAD_MULTI:
+ if (regs->msr & MSR_LE)
+ return 0;
+ rd = op.reg;
+ for (i = 0; i < size; i += 4) {
+ nb = size - i;
+ if (nb > 4)
+ nb = 4;
+ err = read_mem(&regs->gpr[rd], op.ea, nb, regs);
+ if (err)
+ return 0;
+ if (nb < 4) /* left-justify last bytes */
+ regs->gpr[rd] <<= 32 - 8 * nb;
+ op.ea += 4;
+ ++rd;
+ }
+ goto instr_done;
+
+ case STORE:
+ if (regs->msr & MSR_LE)
+ return 0;
+ if ((op.type & UPDATE) && size == sizeof(long) &&
+ op.reg == 1 && op.update_reg == 1 &&
+ !(regs->msr & MSR_PR) &&
+ op.ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
+ err = handle_stack_update(op.ea, regs);
+ goto ldst_done;
+ }
+ err = write_mem(op.val, op.ea, size, regs);
+ goto ldst_done;
+
+ case STORE_FP:
+ if (regs->msr & MSR_LE)
+ return 0;
+ if (size == 4)
+ err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
+ else
+ err = do_fp_store(op.reg, do_stfd, op.ea, size, regs);
+ goto ldst_done;
+
+#ifdef CONFIG_ALTIVEC
+ case STORE_VMX:
+ if (regs->msr & MSR_LE)
+ return 0;
+ err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
+ goto ldst_done;
+#endif
+#ifdef CONFIG_VSX
+ case STORE_VSX:
+ if (regs->msr & MSR_LE)
+ return 0;
+ err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
+ goto ldst_done;
+#endif
+ case STORE_MULTI:
+ if (regs->msr & MSR_LE)
+ return 0;
+ rd = op.reg;
+ for (i = 0; i < size; i += 4) {
+ val = regs->gpr[rd];
+ nb = size - i;
+ if (nb > 4)
+ nb = 4;
+ else
+ val >>= 32 - 8 * nb;
+ err = write_mem(val, op.ea, nb, regs);
+ if (err)
+ return 0;
+ op.ea += 4;
+ ++rd;
+ }
+ goto instr_done;
+
+ case MFMSR:
+ regs->gpr[op.reg] = regs->msr & MSR_MASK;
+ goto instr_done;
+
+ case MTMSR:
+ val = regs->gpr[op.reg];
+ if ((val & MSR_RI) == 0)
+ /* can't step mtmsr[d] that would clear MSR_RI */
+ return -1;
+ /* here op.val is the mask of bits to change */
+ regs->msr = (regs->msr & ~op.val) | (val & op.val);
+ goto instr_done;
+
+#ifdef CONFIG_PPC64
+ case SYSCALL: /* sc */
+ /*
+ * N.B. this uses knowledge about how the syscall
+ * entry code works. If that is changed, this will
+ * need to be changed also.
+ */
+ if (regs->gpr[0] == 0x1ebe &&
+ cpu_has_feature(CPU_FTR_REAL_LE)) {
+ regs->msr ^= MSR_LE;
+ goto instr_done;
+ }
+ regs->gpr[9] = regs->gpr[13];
+ regs->gpr[10] = MSR_KERNEL;
+ regs->gpr[11] = regs->nip + 4;
+ regs->gpr[12] = regs->msr & MSR_MASK;
+ regs->gpr[13] = (unsigned long) get_paca();
+ regs->nip = (unsigned long) &system_call_common;
+ regs->msr = MSR_KERNEL;
+ return 1;
+
+ case RFI:
+ return -1;
+#endif
+ }
+ return 0;
+
+ ldst_done:
+ if (err)
+ return 0;
+ if (op.type & UPDATE)
+ regs->gpr[op.update_reg] = op.ea;
+
+ instr_done:
+ regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
+ return 1;
}
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index d0130fff20e5..325e861616a1 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -34,3 +34,4 @@ obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hugepage-hash64.o
obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
obj-$(CONFIG_HIGHMEM) += highmem.o
+obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o
diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/mm/copro_fault.c
index 641e7273d75a..0f9939e693df 100644
--- a/arch/powerpc/platforms/cell/spu_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -1,5 +1,5 @@
/*
- * SPU mm fault handler
+ * CoProcessor (SPU/AFU) mm fault handler
*
* (C) Copyright IBM Deutschland Entwicklung GmbH 2007
*
@@ -23,16 +23,17 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/export.h>
-
+#include <asm/reg.h>
+#include <asm/copro.h>
#include <asm/spu.h>
-#include <asm/spu_csa.h>
+#include <misc/cxl.h>
/*
* This ought to be kept in sync with the powerpc specific do_page_fault
* function. Currently, there are a few corner cases that we haven't had
* to handle fortunately.
*/
-int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
+int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
unsigned long dsisr, unsigned *flt)
{
struct vm_area_struct *vma;
@@ -58,12 +59,12 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
goto out_unlock;
}
- is_write = dsisr & MFC_DSISR_ACCESS_PUT;
+ is_write = dsisr & DSISR_ISSTORE;
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto out_unlock;
} else {
- if (dsisr & MFC_DSISR_ACCESS_DENIED)
+ if (dsisr & DSISR_PROTFAULT)
goto out_unlock;
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto out_unlock;
@@ -91,4 +92,58 @@ out_unlock:
up_read(&mm->mmap_sem);
return ret;
}
-EXPORT_SYMBOL_GPL(spu_handle_mm_fault);
+EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
+
+int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
+{
+ u64 vsid;
+ int psize, ssize;
+
+ slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
+
+ switch (REGION_ID(ea)) {
+ case USER_REGION_ID:
+ pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
+ psize = get_slice_psize(mm, ea);
+ ssize = user_segment_size(ea);
+ vsid = get_vsid(mm->context.id, ea, ssize);
+ break;
+ case VMALLOC_REGION_ID:
+ pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea);
+ if (ea < VMALLOC_END)
+ psize = mmu_vmalloc_psize;
+ else
+ psize = mmu_io_psize;
+ ssize = mmu_kernel_ssize;
+ vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
+ break;
+ case KERNEL_REGION_ID:
+ pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea);
+ psize = mmu_linear_psize;
+ ssize = mmu_kernel_ssize;
+ vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
+ break;
+ default:
+ pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
+ return 1;
+ }
+
+ vsid = (vsid << slb_vsid_shift(ssize)) | SLB_VSID_USER;
+
+ vsid |= mmu_psize_defs[psize].sllp |
+ ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
+
+ slb->vsid = vsid;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(copro_calculate_slb);
+
+void copro_flush_all_slbs(struct mm_struct *mm)
+{
+#ifdef CONFIG_SPU_BASE
+ spu_flush_all_slbs(mm);
+#endif
+ cxl_slbia(mm);
+}
+EXPORT_SYMBOL_GPL(copro_flush_all_slbs);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 51ab9e7e6c39..08d659a9fcdb 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -30,9 +30,9 @@
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/perf_event.h>
-#include <linux/magic.h>
#include <linux/ratelimit.h>
#include <linux/context_tracking.h>
+#include <linux/hugetlb.h>
#include <asm/firmware.h>
#include <asm/page.h>
@@ -114,22 +114,37 @@ static int store_updates_sp(struct pt_regs *regs)
#define MM_FAULT_CONTINUE -1
#define MM_FAULT_ERR(sig) (sig)
-static int do_sigbus(struct pt_regs *regs, unsigned long address)
+static int do_sigbus(struct pt_regs *regs, unsigned long address,
+ unsigned int fault)
{
siginfo_t info;
+ unsigned int lsb = 0;
up_read(&current->mm->mmap_sem);
- if (user_mode(regs)) {
- current->thread.trap_nr = BUS_ADRERR;
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRERR;
- info.si_addr = (void __user *)address;
- force_sig_info(SIGBUS, &info, current);
- return MM_FAULT_RETURN;
+ if (!user_mode(regs))
+ return MM_FAULT_ERR(SIGBUS);
+
+ current->thread.trap_nr = BUS_ADRERR;
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *)address;
+#ifdef CONFIG_MEMORY_FAILURE
+ if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
+ pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
+ current->comm, current->pid, address);
+ info.si_code = BUS_MCEERR_AR;
}
- return MM_FAULT_ERR(SIGBUS);
+
+ if (fault & VM_FAULT_HWPOISON_LARGE)
+ lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
+ if (fault & VM_FAULT_HWPOISON)
+ lsb = PAGE_SHIFT;
+#endif
+ info.si_addr_lsb = lsb;
+ force_sig_info(SIGBUS, &info, current);
+ return MM_FAULT_RETURN;
}
static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
@@ -170,11 +185,8 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
return MM_FAULT_RETURN;
}
- /* Bus error. x86 handles HWPOISON here, we'll add this if/when
- * we support the feature in HW
- */
- if (fault & VM_FAULT_SIGBUS)
- return do_sigbus(regs, addr);
+ if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE))
+ return do_sigbus(regs, addr, fault);
/* We don't understand the fault code, this is fatal */
BUG();
@@ -508,7 +520,6 @@ bail:
void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
const struct exception_table_entry *entry;
- unsigned long *stackend;
/* Are we prepared to handle this fault? */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
@@ -537,8 +548,7 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
regs->nip);
- stackend = end_of_stack(current);
- if (current != &init_task && *stackend != STACK_END_MAGIC)
+ if (task_stack_end_corrupted(current))
printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
die("Kernel access of bad area", regs, sig);
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index afc0a8295f84..ae4962a06476 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -29,6 +29,8 @@
#include <asm/kexec.h>
#include <asm/ppc-opcode.h>
+#include <misc/cxl.h>
+
#ifdef DEBUG_LOW
#define DBG_LOW(fmt...) udbg_printf(fmt)
#else
@@ -149,9 +151,11 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
static inline void tlbie(unsigned long vpn, int psize, int apsize,
int ssize, int local)
{
- unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
+ unsigned int use_local;
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+ use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
+
if (use_local)
use_local = mmu_psize_defs[psize].tlbiel;
if (lock_tlbie && !use_local)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index daee7f4e5a14..d5339a3b9945 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -51,7 +51,7 @@
#include <asm/cacheflush.h>
#include <asm/cputable.h>
#include <asm/sections.h>
-#include <asm/spu.h>
+#include <asm/copro.h>
#include <asm/udbg.h>
#include <asm/code-patching.h>
#include <asm/fadump.h>
@@ -92,12 +92,14 @@ extern unsigned long dart_tablebase;
static unsigned long _SDR1;
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+EXPORT_SYMBOL_GPL(mmu_psize_defs);
struct hash_pte *htab_address;
unsigned long htab_size_bytes;
unsigned long htab_hash_mask;
EXPORT_SYMBOL_GPL(htab_hash_mask);
int mmu_linear_psize = MMU_PAGE_4K;
+EXPORT_SYMBOL_GPL(mmu_linear_psize);
int mmu_virtual_psize = MMU_PAGE_4K;
int mmu_vmalloc_psize = MMU_PAGE_4K;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
@@ -105,6 +107,7 @@ int mmu_vmemmap_psize = MMU_PAGE_4K;
#endif
int mmu_io_psize = MMU_PAGE_4K;
int mmu_kernel_ssize = MMU_SEGSIZE_256M;
+EXPORT_SYMBOL_GPL(mmu_kernel_ssize);
int mmu_highuser_ssize = MMU_SEGSIZE_256M;
u16 mmu_slb_size = 64;
EXPORT_SYMBOL_GPL(mmu_slb_size);
@@ -333,70 +336,69 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
return 0;
prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size);
- if (prop != NULL) {
- pr_info("Page sizes from device-tree:\n");
- size /= 4;
- cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
- while(size > 0) {
- unsigned int base_shift = be32_to_cpu(prop[0]);
- unsigned int slbenc = be32_to_cpu(prop[1]);
- unsigned int lpnum = be32_to_cpu(prop[2]);
- struct mmu_psize_def *def;
- int idx, base_idx;
-
- size -= 3; prop += 3;
- base_idx = get_idx_from_shift(base_shift);
- if (base_idx < 0) {
- /*
- * skip the pte encoding also
- */
- prop += lpnum * 2; size -= lpnum * 2;
+ if (!prop)
+ return 0;
+
+ pr_info("Page sizes from device-tree:\n");
+ size /= 4;
+ cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
+ while(size > 0) {
+ unsigned int base_shift = be32_to_cpu(prop[0]);
+ unsigned int slbenc = be32_to_cpu(prop[1]);
+ unsigned int lpnum = be32_to_cpu(prop[2]);
+ struct mmu_psize_def *def;
+ int idx, base_idx;
+
+ size -= 3; prop += 3;
+ base_idx = get_idx_from_shift(base_shift);
+ if (base_idx < 0) {
+ /* skip the pte encoding also */
+ prop += lpnum * 2; size -= lpnum * 2;
+ continue;
+ }
+ def = &mmu_psize_defs[base_idx];
+ if (base_idx == MMU_PAGE_16M)
+ cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE;
+
+ def->shift = base_shift;
+ if (base_shift <= 23)
+ def->avpnm = 0;
+ else
+ def->avpnm = (1 << (base_shift - 23)) - 1;
+ def->sllp = slbenc;
+ /*
+ * We don't know for sure what's up with tlbiel, so
+ * for now we only set it for 4K and 64K pages
+ */
+ if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K)
+ def->tlbiel = 1;
+ else
+ def->tlbiel = 0;
+
+ while (size > 0 && lpnum) {
+ unsigned int shift = be32_to_cpu(prop[0]);
+ int penc = be32_to_cpu(prop[1]);
+
+ prop += 2; size -= 2;
+ lpnum--;
+
+ idx = get_idx_from_shift(shift);
+ if (idx < 0)
continue;
- }
- def = &mmu_psize_defs[base_idx];
- if (base_idx == MMU_PAGE_16M)
- cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE;
-
- def->shift = base_shift;
- if (base_shift <= 23)
- def->avpnm = 0;
- else
- def->avpnm = (1 << (base_shift - 23)) - 1;
- def->sllp = slbenc;
- /*
- * We don't know for sure what's up with tlbiel, so
- * for now we only set it for 4K and 64K pages
- */
- if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K)
- def->tlbiel = 1;
- else
- def->tlbiel = 0;
-
- while (size > 0 && lpnum) {
- unsigned int shift = be32_to_cpu(prop[0]);
- int penc = be32_to_cpu(prop[1]);
-
- prop += 2; size -= 2;
- lpnum--;
-
- idx = get_idx_from_shift(shift);
- if (idx < 0)
- continue;
-
- if (penc == -1)
- pr_err("Invalid penc for base_shift=%d "
- "shift=%d\n", base_shift, shift);
-
- def->penc[idx] = penc;
- pr_info("base_shift=%d: shift=%d, sllp=0x%04lx,"
- " avpnm=0x%08lx, tlbiel=%d, penc=%d\n",
- base_shift, shift, def->sllp,
- def->avpnm, def->tlbiel, def->penc[idx]);
- }
+
+ if (penc == -1)
+ pr_err("Invalid penc for base_shift=%d "
+ "shift=%d\n", base_shift, shift);
+
+ def->penc[idx] = penc;
+ pr_info("base_shift=%d: shift=%d, sllp=0x%04lx,"
+ " avpnm=0x%08lx, tlbiel=%d, penc=%d\n",
+ base_shift, shift, def->sllp,
+ def->avpnm, def->tlbiel, def->penc[idx]);
}
- return 1;
}
- return 0;
+
+ return 1;
}
#ifdef CONFIG_HUGETLB_PAGE
@@ -867,7 +869,7 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
}
#ifdef CONFIG_PPC_MM_SLICES
-unsigned int get_paca_psize(unsigned long addr)
+static unsigned int get_paca_psize(unsigned long addr)
{
u64 lpsizes;
unsigned char *hpsizes;
@@ -901,10 +903,8 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
return;
slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
-#ifdef CONFIG_SPU_BASE
- spu_flush_all_slbs(mm);
-#endif
- if (get_paca_psize(addr) != MMU_PAGE_4K) {
+ copro_flush_all_slbs(mm);
+ if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
get_paca()->context = mm->context;
slb_flush_and_rebolt();
}
@@ -989,12 +989,11 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
* -1 - critical hash insertion error
* -2 - access not permitted by subpage protection mechanism
*/
-int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
+int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap)
{
enum ctx_state prev_state = exception_enter();
pgd_t *pgdir;
unsigned long vsid;
- struct mm_struct *mm;
pte_t *ptep;
unsigned hugeshift;
const struct cpumask *tmp;
@@ -1008,7 +1007,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
switch (REGION_ID(ea)) {
case USER_REGION_ID:
user_region = 1;
- mm = current->mm;
if (! mm) {
DBG_LOW(" user region with no mm !\n");
rc = 1;
@@ -1019,7 +1017,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
vsid = get_vsid(mm->context.id, ea, ssize);
break;
case VMALLOC_REGION_ID:
- mm = &init_mm;
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
if (ea < VMALLOC_END)
psize = mmu_vmalloc_psize;
@@ -1104,7 +1101,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
WARN_ON(1);
}
#endif
- check_paca_psize(ea, mm, psize, user_region);
+ if (current->mm == mm)
+ check_paca_psize(ea, mm, psize, user_region);
goto bail;
}
@@ -1141,13 +1139,12 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
"to 4kB pages because of "
"non-cacheable mapping\n");
psize = mmu_vmalloc_psize = MMU_PAGE_4K;
-#ifdef CONFIG_SPU_BASE
- spu_flush_all_slbs(mm);
-#endif
+ copro_flush_all_slbs(mm);
}
}
- check_paca_psize(ea, mm, psize, user_region);
+ if (current->mm == mm)
+ check_paca_psize(ea, mm, psize, user_region);
#endif /* CONFIG_PPC_64K_PAGES */
#ifdef CONFIG_PPC_HAS_HASH_64K
@@ -1182,6 +1179,17 @@ bail:
exception_exit(prev_state);
return rc;
}
+EXPORT_SYMBOL_GPL(hash_page_mm);
+
+int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
+{
+ struct mm_struct *mm = current->mm;
+
+ if (REGION_ID(ea) == VMALLOC_REGION_ID)
+ mm = &init_mm;
+
+ return hash_page_mm(mm, ea, access, trap);
+}
EXPORT_SYMBOL_GPL(hash_page);
void hash_preload(struct mm_struct *mm, unsigned long ea,
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index cff59f1bec23..cad68ff8eca5 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -106,11 +106,11 @@ unsigned long __max_low_memory = MAX_LOW_MEM;
void MMU_setup(void)
{
/* Check for nobats option (used in mapin_ram). */
- if (strstr(cmd_line, "nobats")) {
+ if (strstr(boot_command_line, "nobats")) {
__map_without_bats = 1;
}
- if (strstr(cmd_line, "noltlbs")) {
+ if (strstr(boot_command_line, "noltlbs")) {
__map_without_ltlbs = 1;
}
#ifdef CONFIG_DEBUG_PAGEALLOC
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 253b4b971c8a..3481556a1880 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -233,9 +233,6 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
}
#ifdef CONFIG_MEMORY_HOTPLUG
-extern int htab_remove_mapping(unsigned long vstart, unsigned long vend,
- int psize, int ssize);
-
static void vmemmap_remove_mapping(unsigned long start,
unsigned long page_size)
{
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index e0f7a189c48e..8ebaac75c940 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -260,6 +260,60 @@ static int __init mark_nonram_nosave(void)
}
return 0;
}
+#else /* CONFIG_NEED_MULTIPLE_NODES */
+static int __init mark_nonram_nosave(void)
+{
+ return 0;
+}
+#endif
+
+static bool zone_limits_final;
+
+static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
+ [0 ... MAX_NR_ZONES - 1] = ~0UL
+};
+
+/*
+ * Restrict the specified zone and all more restrictive zones
+ * to be below the specified pfn. May not be called after
+ * paging_init().
+ */
+void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
+{
+ int i;
+
+ if (WARN_ON(zone_limits_final))
+ return;
+
+ for (i = zone; i >= 0; i--) {
+ if (max_zone_pfns[i] > pfn_limit)
+ max_zone_pfns[i] = pfn_limit;
+ }
+}
+
+/*
+ * Find the least restrictive zone that is entirely below the
+ * specified pfn limit. Returns < 0 if no suitable zone is found.
+ *
+ * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
+ * systems -- the DMA limit can be higher than any possible real pfn.
+ */
+int dma_pfn_limit_to_zone(u64 pfn_limit)
+{
+ enum zone_type top_zone = ZONE_NORMAL;
+ int i;
+
+#ifdef CONFIG_HIGHMEM
+ top_zone = ZONE_HIGHMEM;
+#endif
+
+ for (i = top_zone; i >= 0; i--) {
+ if (max_zone_pfns[i] <= pfn_limit)
+ return i;
+ }
+
+ return -EPERM;
+}
/*
* paging_init() sets up the page tables - in fact we've already done this.
@@ -268,7 +322,7 @@ void __init paging_init(void)
{
unsigned long long total_ram = memblock_phys_mem_size();
phys_addr_t top_of_ram = memblock_end_of_DRAM();
- unsigned long max_zone_pfns[MAX_NR_ZONES];
+ enum zone_type top_zone;
#ifdef CONFIG_PPC32
unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
@@ -290,18 +344,20 @@ void __init paging_init(void)
(unsigned long long)top_of_ram, total_ram);
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(long int)((top_of_ram - total_ram) >> 20));
- memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+
#ifdef CONFIG_HIGHMEM
- max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
- max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
+ top_zone = ZONE_HIGHMEM;
+ limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
#else
- max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
+ top_zone = ZONE_NORMAL;
#endif
+
+ limit_zone_pfn(top_zone, top_of_ram >> PAGE_SHIFT);
+ zone_limits_final = true;
free_area_init_nodes(max_zone_pfns);
mark_nonram_nosave();
}
-#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
static void __init register_page_bootmem_info(void)
{
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index d7737a542fd7..649666d5d1c2 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -538,7 +538,7 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
*/
static int numa_setup_cpu(unsigned long lcpu)
{
- int nid;
+ int nid = -1;
struct device_node *cpu;
/*
@@ -555,19 +555,21 @@ static int numa_setup_cpu(unsigned long lcpu)
if (!cpu) {
WARN_ON(1);
- nid = 0;
- goto out;
+ if (cpu_present(lcpu))
+ goto out_present;
+ else
+ goto out;
}
nid = of_node_to_nid_single(cpu);
+out_present:
if (nid < 0 || !node_online(nid))
nid = first_online_node;
-out:
- map_cpu_to_node(lcpu, nid);
+ map_cpu_to_node(lcpu, nid);
of_node_put(cpu);
-
+out:
return nid;
}
@@ -1127,20 +1129,11 @@ void __init do_init_bootmem(void)
* even before we online them, so that we can use cpu_to_{node,mem}
* early in boot, cf. smp_prepare_cpus().
*/
- for_each_possible_cpu(cpu) {
- cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
- (void *)(unsigned long)cpu);
+ for_each_present_cpu(cpu) {
+ numa_setup_cpu((unsigned long)cpu);
}
}
-void __init paging_init(void)
-{
- unsigned long max_zone_pfns[MAX_NR_ZONES];
- memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
- max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
- free_area_init_nodes(max_zone_pfns);
-}
-
static int __init early_numa(char *p)
{
if (!p)
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index c695943a513c..c90e602677c9 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -48,7 +48,7 @@ static inline int pte_looks_normal(pte_t pte)
(_PAGE_PRESENT | _PAGE_USER);
}
-struct page * maybe_pte_to_page(pte_t pte)
+static struct page *maybe_pte_to_page(pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
struct page *page;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 0399a6702958..6e450ca66526 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -46,9 +46,6 @@ static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
}
-#define slb_vsid_shift(ssize) \
- ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
-
static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
unsigned long flags)
{
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index b0c75cc15efc..8d7bda94d196 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -30,9 +30,11 @@
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/export.h>
+#include <linux/hugetlb.h>
#include <asm/mman.h>
#include <asm/mmu.h>
-#include <asm/spu.h>
+#include <asm/copro.h>
+#include <asm/hugetlb.h>
/* some sanity checks */
#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
@@ -232,9 +234,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
spin_unlock_irqrestore(&slice_convert_lock, flags);
-#ifdef CONFIG_SPU_BASE
- spu_flush_all_slbs(mm);
-#endif
+ copro_flush_all_slbs(mm);
}
/*
@@ -671,9 +671,7 @@ void slice_set_psize(struct mm_struct *mm, unsigned long address,
spin_unlock_irqrestore(&slice_convert_lock, flags);
-#ifdef CONFIG_SPU_BASE
- spu_flush_all_slbs(mm);
-#endif
+ copro_flush_all_slbs(mm);
}
void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c
index f75301f2c85f..6adf55fa5d88 100644
--- a/arch/powerpc/oprofile/backtrace.c
+++ b/arch/powerpc/oprofile/backtrace.c
@@ -12,6 +12,7 @@
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/compat.h>
+#include <asm/oprofile_impl.h>
#define STACK_SP(STACK) *(STACK)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index b7cd00b0171e..a6995d4e93d4 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -59,9 +59,9 @@ struct cpu_hw_events {
struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES];
};
-DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
-struct power_pmu *ppmu;
+static struct power_pmu *ppmu;
/*
* Normally, to ignore kernel events we set the FCS (freeze counters
@@ -124,7 +124,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
-void power_pmu_flush_branch_stack(void) {}
+static void power_pmu_flush_branch_stack(void) {}
static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { }
#endif /* CONFIG_PPC32 */
@@ -375,7 +375,7 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
/* Called from ctxsw to prevent one process's branch entries to
* mingle with the other process's entries during context switch.
*/
-void power_pmu_flush_branch_stack(void)
+static void power_pmu_flush_branch_stack(void)
{
if (ppmu->bhrb_nr)
power_pmu_bhrb_reset();
@@ -408,7 +408,7 @@ static __u64 power_pmu_bhrb_to(u64 addr)
}
/* Processing BHRB entries */
-void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
+static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
{
u64 val;
u64 addr;
@@ -1573,7 +1573,7 @@ static void power_pmu_stop(struct perf_event *event, int ef_flags)
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
-void power_pmu_start_txn(struct pmu *pmu)
+static void power_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
@@ -1587,7 +1587,7 @@ void power_pmu_start_txn(struct pmu *pmu)
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
-void power_pmu_cancel_txn(struct pmu *pmu)
+static void power_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
@@ -1600,7 +1600,7 @@ void power_pmu_cancel_txn(struct pmu *pmu)
* Perform the group schedulability test as a whole
* Return 0 if success
*/
-int power_pmu_commit_txn(struct pmu *pmu)
+static int power_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
long i, n;
@@ -1888,7 +1888,7 @@ ssize_t power_events_sysfs_show(struct device *dev,
return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
}
-struct pmu power_pmu = {
+static struct pmu power_pmu = {
.pmu_enable = power_pmu_enable,
.pmu_disable = power_pmu_disable,
.event_init = power_pmu_event_init,
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 70d4f748b54b..6c8710dd90c9 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -75,86 +75,6 @@ static struct attribute_group format_group = {
static struct kmem_cache *hv_page_cache;
-/*
- * read_offset_data - copy data from one buffer to another while treating the
- * source buffer as a small view on the total avaliable
- * source data.
- *
- * @dest: buffer to copy into
- * @dest_len: length of @dest in bytes
- * @requested_offset: the offset within the source data we want. Must be > 0
- * @src: buffer to copy data from
- * @src_len: length of @src in bytes
- * @source_offset: the offset in the sorce data that (src,src_len) refers to.
- * Must be > 0
- *
- * returns the number of bytes copied.
- *
- * The following ascii art shows the various buffer possitioning we need to
- * handle, assigns some arbitrary varibles to points on the buffer, and then
- * shows how we fiddle with those values to get things we care about (copy
- * start in src and copy len)
- *
- * s = @src buffer
- * d = @dest buffer
- * '.' areas in d are written to.
- *
- * u
- * x w v z
- * d |.........|
- * s |----------------------|
- *
- * u
- * x w z v
- * d |........------|
- * s |------------------|
- *
- * x w u,z,v
- * d |........|
- * s |------------------|
- *
- * x,w u,v,z
- * d |..................|
- * s |------------------|
- *
- * x u
- * w v z
- * d |........|
- * s |------------------|
- *
- * x z w v
- * d |------|
- * s |------|
- *
- * x = source_offset
- * w = requested_offset
- * z = source_offset + src_len
- * v = requested_offset + dest_len
- *
- * w_offset_in_s = w - x = requested_offset - source_offset
- * z_offset_in_s = z - x = src_len
- * v_offset_in_s = v - x = request_offset + dest_len - src_len
- */
-static ssize_t read_offset_data(void *dest, size_t dest_len,
- loff_t requested_offset, void *src,
- size_t src_len, loff_t source_offset)
-{
- size_t w_offset_in_s = requested_offset - source_offset;
- size_t z_offset_in_s = src_len;
- size_t v_offset_in_s = requested_offset + dest_len - src_len;
- size_t u_offset_in_s = min(z_offset_in_s, v_offset_in_s);
- size_t copy_len = u_offset_in_s - w_offset_in_s;
-
- if (requested_offset < 0 || source_offset < 0)
- return -EINVAL;
-
- if (z_offset_in_s <= w_offset_in_s)
- return 0;
-
- memcpy(dest, src + w_offset_in_s, copy_len);
- return copy_len;
-}
-
static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
unsigned long version,
unsigned long index)
@@ -183,8 +103,10 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
{
unsigned long hret;
ssize_t ret = 0;
- size_t catalog_len = 0, catalog_page_len = 0, page_count = 0;
+ size_t catalog_len = 0, catalog_page_len = 0;
loff_t page_offset = 0;
+ loff_t offset_in_page;
+ size_t copy_len;
uint64_t catalog_version_num = 0;
void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
struct hv_24x7_catalog_page_0 *page_0 = page;
@@ -202,7 +124,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
catalog_len = catalog_page_len * 4096;
page_offset = offset / 4096;
- page_count = count / 4096;
+ offset_in_page = offset % 4096;
if (page_offset >= catalog_page_len)
goto e_free;
@@ -216,8 +138,13 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
}
}
- ret = read_offset_data(buf, count, offset,
- page, 4096, page_offset * 4096);
+ copy_len = 4096 - offset_in_page;
+ if (copy_len > count)
+ copy_len = count;
+
+ memcpy(buf, page+offset_in_page, copy_len);
+ ret = copy_len;
+
e_free:
if (hret)
pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
@@ -225,9 +152,9 @@ e_free:
catalog_version_num, page_offset, hret);
kmem_cache_free(hv_page_cache, page);
- pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n",
- offset, page_offset, count, page_count, catalog_len,
- catalog_page_len, ret);
+ pr_devel("catalog_read: offset=%lld(%lld) count=%zu "
+ "catalog_len=%zu(%zu) => %zd\n", offset, page_offset,
+ count, catalog_len, catalog_page_len, ret);
return ret;
}
@@ -294,7 +221,7 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
u16 lpar, u64 *res,
bool success_expected)
{
- unsigned long ret;
+ unsigned long ret = -ENOMEM;
/*
* request_buffer and result_buffer are not required to be 4k aligned,
@@ -304,7 +231,27 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
struct reqb {
struct hv_24x7_request_buffer buf;
struct hv_24x7_request req;
- } __packed __aligned(4096) request_buffer = {
+ } __packed *request_buffer;
+
+ struct {
+ struct hv_24x7_data_result_buffer buf;
+ struct hv_24x7_result res;
+ struct hv_24x7_result_element elem;
+ __be64 result;
+ } __packed *result_buffer;
+
+ BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
+ BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
+
+ request_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER);
+ if (!request_buffer)
+ goto out;
+
+ result_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER);
+ if (!result_buffer)
+ goto out_free_request_buffer;
+
+ *request_buffer = (struct reqb) {
.buf = {
.interface_version = HV_24X7_IF_VERSION_CURRENT,
.num_requests = 1,
@@ -320,28 +267,27 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
}
};
- struct resb {
- struct hv_24x7_data_result_buffer buf;
- struct hv_24x7_result res;
- struct hv_24x7_result_element elem;
- __be64 result;
- } __packed __aligned(4096) result_buffer = {};
-
ret = plpar_hcall_norets(H_GET_24X7_DATA,
- virt_to_phys(&request_buffer), sizeof(request_buffer),
- virt_to_phys(&result_buffer), sizeof(result_buffer));
+ virt_to_phys(request_buffer), sizeof(*request_buffer),
+ virt_to_phys(result_buffer), sizeof(*result_buffer));
if (ret) {
if (success_expected)
- pr_err_ratelimited("hcall failed: %d %#x %#x %d => 0x%lx (%ld) detail=0x%x failing ix=%x\n",
- domain, offset, ix, lpar,
- ret, ret,
- result_buffer.buf.detailed_rc,
- result_buffer.buf.failing_request_ix);
- return ret;
+ pr_err_ratelimited("hcall failed: %d %#x %#x %d => "
+ "0x%lx (%ld) detail=0x%x failing ix=%x\n",
+ domain, offset, ix, lpar, ret, ret,
+ result_buffer->buf.detailed_rc,
+ result_buffer->buf.failing_request_ix);
+ goto out_free_result_buffer;
}
- *res = be64_to_cpu(result_buffer.result);
+ *res = be64_to_cpu(result_buffer->result);
+
+out_free_result_buffer:
+ kfree(result_buffer);
+out_free_request_buffer:
+ kfree(request_buffer);
+out:
return ret;
}
diff --git a/arch/powerpc/platforms/40x/ep405.c b/arch/powerpc/platforms/40x/ep405.c
index b0389bbe4f94..ddc12a1926ef 100644
--- a/arch/powerpc/platforms/40x/ep405.c
+++ b/arch/powerpc/platforms/40x/ep405.c
@@ -49,7 +49,7 @@ static void __iomem *bcsr_regs;
/* there's more, can't be bothered typing them tho */
-static __initdata struct of_device_id ep405_of_bus[] = {
+static const struct of_device_id ep405_of_bus[] __initconst = {
{ .compatible = "ibm,plb3", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
index 8f3920e5a046..b0c46375dd95 100644
--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -24,7 +24,7 @@
#include <linux/init.h>
#include <linux/of_platform.h>
-static __initdata struct of_device_id ppc40x_of_bus[] = {
+static const struct of_device_id ppc40x_of_bus[] __initconst = {
{ .compatible = "ibm,plb3", },
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,opb", },
diff --git a/arch/powerpc/platforms/40x/virtex.c b/arch/powerpc/platforms/40x/virtex.c
index d0fc6866b00c..9aa7ae2f4164 100644
--- a/arch/powerpc/platforms/40x/virtex.c
+++ b/arch/powerpc/platforms/40x/virtex.c
@@ -17,7 +17,7 @@
#include <asm/xilinx_pci.h>
#include <asm/ppc4xx.h>
-static struct of_device_id xilinx_of_bus_ids[] __initdata = {
+static const struct of_device_id xilinx_of_bus_ids[] __initconst = {
{ .compatible = "xlnx,plb-v46-1.00.a", },
{ .compatible = "xlnx,plb-v34-1.01.a", },
{ .compatible = "xlnx,plb-v34-1.02.a", },
diff --git a/arch/powerpc/platforms/40x/walnut.c b/arch/powerpc/platforms/40x/walnut.c
index 8b691df72f74..f7ac2d0fcb44 100644
--- a/arch/powerpc/platforms/40x/walnut.c
+++ b/arch/powerpc/platforms/40x/walnut.c
@@ -28,7 +28,7 @@
#include <asm/pci-bridge.h>
#include <asm/ppc4xx.h>
-static __initdata struct of_device_id walnut_of_bus[] = {
+static const struct of_device_id walnut_of_bus[] __initconst = {
{ .compatible = "ibm,plb3", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 4d88f6a19058..82f2da28cd27 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -215,9 +215,9 @@ config AKEBONO
select NET_VENDOR_IBM
select IBM_EMAC_EMAC4
select IBM_EMAC_RGMII_WOL
- select USB
- select USB_OHCI_HCD_PLATFORM
- select USB_EHCI_HCD_PLATFORM
+ select USB if USB_SUPPORT
+ select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD
+ select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD
select MMC_SDHCI
select MMC_SDHCI_PLTFM
select MMC_SDHCI_OF_476GTR
diff --git a/arch/powerpc/platforms/44x/canyonlands.c b/arch/powerpc/platforms/44x/canyonlands.c
index e300dd4c89bf..22ca5430c9cb 100644
--- a/arch/powerpc/platforms/44x/canyonlands.c
+++ b/arch/powerpc/platforms/44x/canyonlands.c
@@ -33,7 +33,7 @@
#define BCSR_USB_EN 0x11
-static __initdata struct of_device_id ppc460ex_of_bus[] = {
+static const struct of_device_id ppc460ex_of_bus[] __initconst = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
diff --git a/arch/powerpc/platforms/44x/ebony.c b/arch/powerpc/platforms/44x/ebony.c
index 6a4232bbdf88..ae893226392d 100644
--- a/arch/powerpc/platforms/44x/ebony.c
+++ b/arch/powerpc/platforms/44x/ebony.c
@@ -28,7 +28,7 @@
#include <asm/pci-bridge.h>
#include <asm/ppc4xx.h>
-static __initdata struct of_device_id ebony_of_bus[] = {
+static const struct of_device_id ebony_of_bus[] __initconst = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
index 4241bc825800..c7c6758b3cfe 100644
--- a/arch/powerpc/platforms/44x/iss4xx.c
+++ b/arch/powerpc/platforms/44x/iss4xx.c
@@ -32,7 +32,7 @@
#include <asm/mpic.h>
#include <asm/mmu.h>
-static __initdata struct of_device_id iss4xx_of_bus[] = {
+static const struct of_device_id iss4xx_of_bus[] __initconst = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,plb6", },
{ .compatible = "ibm,opb", },
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c
index 3ffb915446e3..573c3d2689c6 100644
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -24,7 +24,7 @@
#include <linux/init.h>
#include <linux/of_platform.h>
-static __initdata struct of_device_id ppc44x_of_bus[] = {
+static const struct of_device_id ppc44x_of_bus[] __initconst = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c
index 33986c1a05da..58db9d083969 100644
--- a/arch/powerpc/platforms/44x/ppc476.c
+++ b/arch/powerpc/platforms/44x/ppc476.c
@@ -38,7 +38,7 @@
#include <linux/pci.h>
#include <linux/i2c.h>
-static struct of_device_id ppc47x_of_bus[] __initdata = {
+static const struct of_device_id ppc47x_of_bus[] __initconst = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,plb6", },
{ .compatible = "ibm,opb", },
diff --git a/arch/powerpc/platforms/44x/sam440ep.c b/arch/powerpc/platforms/44x/sam440ep.c
index 9e09b835758b..3ee4a03c1496 100644
--- a/arch/powerpc/platforms/44x/sam440ep.c
+++ b/arch/powerpc/platforms/44x/sam440ep.c
@@ -29,7 +29,7 @@
#include <asm/ppc4xx.h>
#include <linux/i2c.h>
-static __initdata struct of_device_id sam440ep_of_bus[] = {
+static const struct of_device_id sam440ep_of_bus[] __initconst = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
diff --git a/arch/powerpc/platforms/44x/virtex.c b/arch/powerpc/platforms/44x/virtex.c
index cf96ccaa760c..ad272c17c640 100644
--- a/arch/powerpc/platforms/44x/virtex.c
+++ b/arch/powerpc/platforms/44x/virtex.c
@@ -21,7 +21,7 @@
#include <asm/ppc4xx.h>
#include "44x.h"
-static struct of_device_id xilinx_of_bus_ids[] __initdata = {
+static const struct of_device_id xilinx_of_bus_ids[] __initconst = {
{ .compatible = "simple-bus", },
{ .compatible = "xlnx,plb-v46-1.00.a", },
{ .compatible = "xlnx,plb-v46-1.02.a", },
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 3a104284b338..501333cf42cf 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -28,7 +28,7 @@
#include <asm/dma.h>
-static __initdata struct of_device_id warp_of_bus[] = {
+static const struct of_device_id warp_of_bus[] __initconst = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index adb95f03d4d4..e996e007bc44 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -337,7 +337,7 @@ void __init mpc512x_init_IRQ(void)
/*
* Nodes to do bus probe on, soc and localbus
*/
-static struct of_device_id __initdata of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .compatible = "fsl,mpc5121-immr", },
{ .compatible = "fsl,mpc5121-localbus", },
{ .compatible = "fsl,mpc5121-mbx", },
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
index 1843bc932011..7492de3cf6d0 100644
--- a/arch/powerpc/platforms/52xx/lite5200.c
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -34,13 +34,13 @@
*/
/* mpc5200 device tree match tables */
-static struct of_device_id mpc5200_cdm_ids[] __initdata = {
+static const struct of_device_id mpc5200_cdm_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-cdm", },
{ .compatible = "mpc5200-cdm", },
{}
};
-static struct of_device_id mpc5200_gpio_ids[] __initdata = {
+static const struct of_device_id mpc5200_gpio_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-gpio", },
{ .compatible = "mpc5200-gpio", },
{}
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 070d315dd6cd..32cae33c4266 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -30,7 +30,7 @@
#include <asm/machdep.h>
#include <asm/mpc52xx.h>
-static struct of_device_id mpc5200_gpio_ids[] __initdata = {
+static const struct of_device_id mpc5200_gpio_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-gpio", },
{ .compatible = "mpc5200-gpio", },
{}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index d7e94f49532a..26993826a797 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -23,12 +23,12 @@
#include <asm/mpc52xx.h>
/* MPC5200 device tree match tables */
-static struct of_device_id mpc52xx_xlb_ids[] __initdata = {
+static const struct of_device_id mpc52xx_xlb_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-xlb", },
{ .compatible = "mpc5200-xlb", },
{}
};
-static struct of_device_id mpc52xx_bus_ids[] __initdata = {
+static const struct of_device_id mpc52xx_bus_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-immr", },
{ .compatible = "fsl,mpc5200b-immr", },
{ .compatible = "simple-bus", },
@@ -108,21 +108,21 @@ void __init mpc52xx_declare_of_platform_devices(void)
/*
* match tables used by mpc52xx_map_common_devices()
*/
-static struct of_device_id mpc52xx_gpt_ids[] __initdata = {
+static const struct of_device_id mpc52xx_gpt_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-gpt", },
{ .compatible = "mpc5200-gpt", }, /* old */
{}
};
-static struct of_device_id mpc52xx_cdm_ids[] __initdata = {
+static const struct of_device_id mpc52xx_cdm_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-cdm", },
{ .compatible = "mpc5200-cdm", }, /* old */
{}
};
-static const struct of_device_id mpc52xx_gpio_simple[] = {
+static const struct of_device_id mpc52xx_gpio_simple[] __initconst = {
{ .compatible = "fsl,mpc5200-gpio", },
{}
};
-static const struct of_device_id mpc52xx_gpio_wkup[] = {
+static const struct of_device_id mpc52xx_gpio_wkup[] __initconst = {
{ .compatible = "fsl,mpc5200-gpio-wkup", },
{}
};
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index 37f7a89c10f2..f8f0081759fb 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -564,7 +564,7 @@ static int mpc52xx_lpbfifo_remove(struct platform_device *op)
return 0;
}
-static struct of_device_id mpc52xx_lpbfifo_match[] = {
+static const struct of_device_id mpc52xx_lpbfifo_match[] = {
{ .compatible = "fsl,mpc5200-lpbfifo", },
{},
};
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 2898b737deb7..2944bc84b9d6 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -119,12 +119,12 @@
/* MPC5200 device tree match tables */
-static struct of_device_id mpc52xx_pic_ids[] __initdata = {
+static const struct of_device_id mpc52xx_pic_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-pic", },
{ .compatible = "mpc5200-pic", },
{}
};
-static struct of_device_id mpc52xx_sdma_ids[] __initdata = {
+static const struct of_device_id mpc52xx_sdma_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-bestcomm", },
{ .compatible = "mpc5200-bestcomm", },
{}
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c
index 79799b29ffe2..3d0c3a01143d 100644
--- a/arch/powerpc/platforms/82xx/ep8248e.c
+++ b/arch/powerpc/platforms/82xx/ep8248e.c
@@ -298,7 +298,7 @@ static void __init ep8248e_setup_arch(void)
ppc_md.progress("ep8248e_setup_arch(), finish", 0);
}
-static __initdata struct of_device_id of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .compatible = "simple-bus", },
{ .compatible = "fsl,ep8248e-bcsr", },
{},
diff --git a/arch/powerpc/platforms/82xx/km82xx.c b/arch/powerpc/platforms/82xx/km82xx.c
index 058cc1895c88..387b446f4161 100644
--- a/arch/powerpc/platforms/82xx/km82xx.c
+++ b/arch/powerpc/platforms/82xx/km82xx.c
@@ -180,7 +180,7 @@ static void __init km82xx_setup_arch(void)
ppc_md.progress("km82xx_setup_arch(), finish", 0);
}
-static __initdata struct of_device_id of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .compatible = "simple-bus", },
{},
};
diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c
index 6a14cf50f4a2..d24deacf07d0 100644
--- a/arch/powerpc/platforms/82xx/mpc8272_ads.c
+++ b/arch/powerpc/platforms/82xx/mpc8272_ads.c
@@ -181,7 +181,7 @@ static void __init mpc8272_ads_setup_arch(void)
ppc_md.progress("mpc8272_ads_setup_arch(), finish", 0);
}
-static struct of_device_id __initdata of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .name = "soc", },
{ .name = "cpm", },
{ .name = "localbus", },
diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c
index e5f82ec8df17..3a5164ad10ad 100644
--- a/arch/powerpc/platforms/82xx/pq2fads.c
+++ b/arch/powerpc/platforms/82xx/pq2fads.c
@@ -168,7 +168,7 @@ static int __init pq2fads_probe(void)
return of_flat_dt_is_compatible(root, "fsl,pq2fads");
}
-static struct of_device_id __initdata of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .name = "soc", },
{ .name = "cpm", },
{ .name = "localbus", },
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index 73997027b085..463fa91ee5b6 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -214,7 +214,7 @@ static const struct i2c_device_id mcu_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, mcu_ids);
-static struct of_device_id mcu_of_match_table[] = {
+static const struct of_device_id mcu_of_match_table[] = {
{ .compatible = "fsl,mcu-mpc8349emitx", },
{ },
};
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index 125336f750c6..ef9d01a049c1 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -114,7 +114,7 @@ void __init mpc83xx_ipic_and_qe_init_IRQ(void)
}
#endif /* CONFIG_QUICC_ENGINE */
-static struct of_device_id __initdata of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .type = "soc", },
{ .compatible = "soc", },
{ .compatible = "simple-bus" },
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index a494fa57bdf9..80aea8c4b5a3 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -38,7 +38,7 @@
#include "mpc83xx.h"
-static struct of_device_id __initdata mpc834x_itx_ids[] = {
+static const struct of_device_id mpc834x_itx_ids[] __initconst = {
{ .compatible = "fsl,pq2pro-localbus", },
{},
};
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 4b4c081df94d..eeb80e25214d 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -321,7 +321,7 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = {
.end = mpc83xx_suspend_end,
};
-static struct of_device_id pmc_match[];
+static const struct of_device_id pmc_match[];
static int pmc_probe(struct platform_device *ofdev)
{
const struct of_device_id *match;
@@ -420,7 +420,7 @@ static struct pmc_type pmc_types[] = {
}
};
-static struct of_device_id pmc_match[] = {
+static const struct of_device_id pmc_match[] = {
{
.compatible = "fsl,mpc8313-pmc",
.data = &pmc_types[0],
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 0c1e6903597e..f22635a71d01 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -276,7 +276,7 @@ config CORENET_GENERIC
For 64bit kernel, the following boards are supported:
T208x QDS/RDB, T4240 QDS/RDB and B4 QDS
The following boards are supported for both 32bit and 64bit kernel:
- P5020 DS, P5040 DS and T104xQDS
+ P5020 DS, P5040 DS and T104xQDS/RDB
endif # FSL_SOC_BOOKE
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index b564b5e23f7c..4a9ad871a168 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -14,7 +14,7 @@
#include "mpc85xx.h"
-static struct of_device_id __initdata mpc85xx_common_ids[] = {
+static const struct of_device_id mpc85xx_common_ids[] __initconst = {
{ .type = "soc", },
{ .compatible = "soc", },
{ .compatible = "simple-bus", },
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index d22dd85e50bf..e56b89a792ed 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -20,6 +20,7 @@
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
+#include <asm/pgtable.h>
#include <asm/ppc-pci.h>
#include <mm/mmu_decl.h>
#include <asm/prom.h>
@@ -67,6 +68,16 @@ void __init corenet_gen_setup_arch(void)
swiotlb_detect_4g();
+#if defined(CONFIG_FSL_PCI) && defined(CONFIG_ZONE_DMA32)
+ /*
+ * Inbound windows don't cover the full lower 4 GiB
+ * due to conflicts with PCICSRBAR and outbound windows,
+ * so limit the DMA32 zone to 2 GiB, to allow consistent
+ * allocations to succeed.
+ */
+ limit_zone_pfn(ZONE_DMA32, 1UL << (31 - PAGE_SHIFT));
+#endif
+
pr_info("%s board\n", ppc_md.name);
mpc85xx_qe_init();
@@ -129,6 +140,9 @@ static const char * const boards[] __initconst = {
"fsl,B4220QDS",
"fsl,T1040QDS",
"fsl,T1042QDS",
+ "fsl,T1040RDB",
+ "fsl,T1042RDB",
+ "fsl,T1042RDB_PI",
"keymile,kmcoge4",
NULL
};
diff --git a/arch/powerpc/platforms/85xx/ppa8548.c b/arch/powerpc/platforms/85xx/ppa8548.c
index 3daff7c63569..12019f17f297 100644
--- a/arch/powerpc/platforms/85xx/ppa8548.c
+++ b/arch/powerpc/platforms/85xx/ppa8548.c
@@ -59,7 +59,7 @@ static void ppa8548_show_cpuinfo(struct seq_file *m)
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
}
-static struct of_device_id __initdata of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .name = "soc", },
{ .type = "soc", },
{ .compatible = "simple-bus", },
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
index 7f2673293549..8ad2fe6f200a 100644
--- a/arch/powerpc/platforms/85xx/qemu_e500.c
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/of_fdt.h>
#include <asm/machdep.h>
+#include <asm/pgtable.h>
#include <asm/time.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
@@ -44,6 +45,15 @@ static void __init qemu_e500_setup_arch(void)
fsl_pci_assign_primary();
swiotlb_detect_4g();
+#if defined(CONFIG_FSL_PCI) && defined(CONFIG_ZONE_DMA32)
+ /*
+ * Inbound windows don't cover the full lower 4 GiB
+ * due to conflicts with PCICSRBAR and outbound windows,
+ * so limit the DMA32 zone to 2 GiB, to allow consistent
+ * allocations to succeed.
+ */
+ limit_zone_pfn(ZONE_DMA32, 1UL << (31 - PAGE_SHIFT));
+#endif
mpc85xx_smp_init();
}
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index bb75add67084..8162b0412117 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -24,7 +24,7 @@
static struct device_node *halt_node;
-static struct of_device_id child_match[] = {
+static const struct of_device_id child_match[] = {
{
.compatible = "sgy,gpio-halt",
},
@@ -147,7 +147,7 @@ static int gpio_halt_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id gpio_halt_match[] = {
+static const struct of_device_id gpio_halt_match[] = {
/* We match on the gpio bus itself and scan the children since they
* wont be matched against us. We know the bus wont match until it
* has been registered too. */
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index c23f3443880a..bf17933b20f3 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -213,7 +213,7 @@ static long __init mpc86xx_time_init(void)
return 0;
}
-static __initdata struct of_device_id of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
{ .compatible = "fsl,mpc8641-pcie", },
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index 8a6ac20686ea..8facf5873866 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -200,7 +200,7 @@ static long __init mpc86xx_time_init(void)
return 0;
}
-static __initdata struct of_device_id of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
{ .compatible = "fsl,mpc8641-pcie", },
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index 06c72636f299..8c9058df5642 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -190,7 +190,7 @@ static long __init mpc86xx_time_init(void)
return 0;
}
-static __initdata struct of_device_id of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
{ .compatible = "fsl,mpc8641-pcie", },
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index d479d68fbb2b..55413a547ea8 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -85,7 +85,7 @@ static void __init mpc8610_suspend_init(void)
static inline void mpc8610_suspend_init(void) { }
#endif /* CONFIG_SUSPEND */
-static struct of_device_id __initdata mpc8610_ids[] = {
+static const struct of_device_id mpc8610_ids[] __initconst = {
{ .compatible = "fsl,mpc8610-immr", },
{ .compatible = "fsl,mpc8610-guts", },
{ .compatible = "simple-bus", },
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index e8bf3fae5606..07ccb1b0cc7d 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -127,7 +127,7 @@ mpc86xx_time_init(void)
return 0;
}
-static __initdata struct of_device_id of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .compatible = "simple-bus", },
{ .compatible = "fsl,srio", },
{ .compatible = "gianfar", },
diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c
index b47a8fd0f3d3..6810b71d54a7 100644
--- a/arch/powerpc/platforms/86xx/sbc8641d.c
+++ b/arch/powerpc/platforms/86xx/sbc8641d.c
@@ -92,7 +92,7 @@ mpc86xx_time_init(void)
return 0;
}
-static __initdata struct of_device_id of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
{ .compatible = "fsl,mpc8641-pcie", },
diff --git a/arch/powerpc/platforms/8xx/adder875.c b/arch/powerpc/platforms/8xx/adder875.c
index 82363e98f50e..61cae4c1edb8 100644
--- a/arch/powerpc/platforms/8xx/adder875.c
+++ b/arch/powerpc/platforms/8xx/adder875.c
@@ -92,7 +92,7 @@ static int __init adder875_probe(void)
return of_flat_dt_is_compatible(root, "analogue-and-micro,adder875");
}
-static __initdata struct of_device_id of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .compatible = "simple-bus", },
{},
};
diff --git a/arch/powerpc/platforms/8xx/ep88xc.c b/arch/powerpc/platforms/8xx/ep88xc.c
index e62166681d08..2bedeb7d5f8f 100644
--- a/arch/powerpc/platforms/8xx/ep88xc.c
+++ b/arch/powerpc/platforms/8xx/ep88xc.c
@@ -147,7 +147,7 @@ static int __init ep88xc_probe(void)
return of_flat_dt_is_compatible(root, "fsl,ep88xc");
}
-static struct of_device_id __initdata of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .name = "soc", },
{ .name = "cpm", },
{ .name = "localbus", },
diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
index 63084640c5c5..78180c5e73ff 100644
--- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
@@ -122,7 +122,7 @@ static int __init mpc86xads_probe(void)
return of_flat_dt_is_compatible(root, "fsl,mpc866ads");
}
-static struct of_device_id __initdata of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .name = "soc", },
{ .name = "cpm", },
{ .name = "localbus", },
diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
index 5921dcb498fd..4d62bf9dc789 100644
--- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
@@ -197,7 +197,7 @@ static int __init mpc885ads_probe(void)
return of_flat_dt_is_compatible(root, "fsl,mpc885ads");
}
-static struct of_device_id __initdata of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .name = "soc", },
{ .name = "cpm", },
{ .name = "localbus", },
diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
index dda607807def..bee47a2b23e6 100644
--- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
@@ -124,7 +124,7 @@ static int __init tqm8xx_probe(void)
return of_flat_dt_is_compatible(node, "tqc,tqm8xx");
}
-static struct of_device_id __initdata of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .name = "soc", },
{ .name = "cpm", },
{ .name = "localbus", },
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 7d9ee3d8c618..76483e3acd60 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -116,6 +116,12 @@ config POWER6_CPU
config POWER7_CPU
bool "POWER7"
depends on PPC_BOOK3S_64
+ select ARCH_HAS_FAST_MULTIPLIER
+
+config POWER8_CPU
+ bool "POWER8"
+ depends on PPC_BOOK3S_64
+ select ARCH_HAS_FAST_MULTIPLIER
config E5500_CPU
bool "Freescale e5500"
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 9978f594cac0..870b6dbd4d18 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -86,6 +86,7 @@ config SPU_FS_64K_LS
config SPU_BASE
bool
default n
+ select PPC_COPRO_BASE
config CBE_RAS
bool "RAS features for bare metal Cell BE"
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index fe053e7c73ee..2d16884f67b9 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -20,7 +20,7 @@ spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o
obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
spu_notify.o \
- spu_syscalls.o spu_fault.o \
+ spu_syscalls.o \
$(spu-priv1-y) \
$(spu-manage-y) \
spufs/
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c
index 173568140a32..2b98a36ef8fb 100644
--- a/arch/powerpc/platforms/cell/celleb_pci.c
+++ b/arch/powerpc/platforms/cell/celleb_pci.c
@@ -454,7 +454,7 @@ static struct celleb_phb_spec celleb_fake_pci_spec __initdata = {
.setup = celleb_setup_fake_pci,
};
-static struct of_device_id celleb_phb_match[] __initdata = {
+static const struct of_device_id celleb_phb_match[] __initconst = {
{
.name = "pci-pseudo",
.data = &celleb_fake_pci_spec,
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
index 1d5a4d8ddad9..34e8ce2976aa 100644
--- a/arch/powerpc/platforms/cell/celleb_setup.c
+++ b/arch/powerpc/platforms/cell/celleb_setup.c
@@ -102,7 +102,7 @@ static void __init celleb_setup_arch_common(void)
#endif
}
-static struct of_device_id celleb_bus_ids[] __initdata = {
+static const struct of_device_id celleb_bus_ids[] __initconst = {
{ .type = "scc", },
{ .type = "ioif", }, /* old style */
{},
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 2930d1e81a05..ffcbd242e669 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -76,10 +76,6 @@ static LIST_HEAD(spu_full_list);
static DEFINE_SPINLOCK(spu_full_list_lock);
static DEFINE_MUTEX(spu_full_list_mutex);
-struct spu_slb {
- u64 esid, vsid;
-};
-
void spu_invalidate_slbs(struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
@@ -149,7 +145,7 @@ static void spu_restart_dma(struct spu *spu)
}
}
-static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
+static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
@@ -167,45 +163,12 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
{
- struct mm_struct *mm = spu->mm;
- struct spu_slb slb;
- int psize;
-
- pr_debug("%s\n", __func__);
-
- slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
+ struct copro_slb slb;
+ int ret;
- switch(REGION_ID(ea)) {
- case USER_REGION_ID:
-#ifdef CONFIG_PPC_MM_SLICES
- psize = get_slice_psize(mm, ea);
-#else
- psize = mm->context.user_psize;
-#endif
- slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
- << SLB_VSID_SHIFT) | SLB_VSID_USER;
- break;
- case VMALLOC_REGION_ID:
- if (ea < VMALLOC_END)
- psize = mmu_vmalloc_psize;
- else
- psize = mmu_io_psize;
- slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
- << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
- break;
- case KERNEL_REGION_ID:
- psize = mmu_linear_psize;
- slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
- << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
- break;
- default:
- /* Future: support kernel segments so that drivers
- * can use SPUs.
- */
- pr_debug("invalid region access at %016lx\n", ea);
- return 1;
- }
- slb.vsid |= mmu_psize_defs[psize].sllp;
+ ret = copro_calculate_slb(spu->mm, ea, &slb);
+ if (ret)
+ return ret;
spu_load_slb(spu, spu->slb_replace, &slb);
@@ -253,7 +216,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
return 0;
}
-static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
+static void __spu_kernel_slb(void *addr, struct copro_slb *slb)
{
unsigned long ea = (unsigned long)addr;
u64 llp;
@@ -272,7 +235,7 @@ static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
* Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
* address @new_addr is present.
*/
-static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
+static inline int __slb_present(struct copro_slb *slbs, int nr_slbs,
void *new_addr)
{
unsigned long ea = (unsigned long)new_addr;
@@ -297,7 +260,7 @@ static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
void *code, int code_size)
{
- struct spu_slb slbs[4];
+ struct copro_slb slbs[4];
int i, nr_slbs = 0;
/* start and end addresses of both mappings */
void *addrs[] = {
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index 8cb6260cc80f..e45894a08118 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -138,7 +138,7 @@ int spufs_handle_class1(struct spu_context *ctx)
if (ctx->state == SPU_STATE_RUNNABLE)
ctx->spu->stats.hash_flt++;
- /* we must not hold the lock when entering spu_handle_mm_fault */
+ /* we must not hold the lock when entering copro_handle_mm_fault */
spu_release(ctx);
access = (_PAGE_PRESENT | _PAGE_USER);
@@ -149,7 +149,7 @@ int spufs_handle_class1(struct spu_context *ctx)
/* hashing failed, so try the actual fault handler */
if (ret)
- ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);
+ ret = copro_handle_mm_fault(current->mm, ea, dsisr, &flt);
/*
* This is nasty: we need the state_mutex for all the bookkeeping even
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 7044fd36197b..5b77b1919fd2 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -258,7 +258,7 @@ static void chrp_init_early(void)
struct device_node *node;
const char *property;
- if (strstr(cmd_line, "console="))
+ if (strstr(boot_command_line, "console="))
return;
/* find the boot console from /chosen/stdout */
if (!of_chosen)
diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c
index a138e14bad2e..bd4ba5d7d568 100644
--- a/arch/powerpc/platforms/embedded6xx/gamecube.c
+++ b/arch/powerpc/platforms/embedded6xx/gamecube.c
@@ -90,7 +90,7 @@ define_machine(gamecube) {
};
-static struct of_device_id gamecube_of_bus[] = {
+static const struct of_device_id gamecube_of_bus[] = {
{ .compatible = "nintendo,flipper", },
{ },
};
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index 455e7c087422..168e1d80b2e5 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -21,7 +21,7 @@
#include "mpc10x.h"
-static __initdata struct of_device_id of_bus_ids[] = {
+static const struct of_device_id of_bus_ids[] __initconst = {
{ .type = "soc", },
{ .compatible = "simple-bus", },
{},
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c
index 25e3bfb64efb..1613303177e6 100644
--- a/arch/powerpc/platforms/embedded6xx/mvme5100.c
+++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c
@@ -149,7 +149,7 @@ static int __init mvme5100_add_bridge(struct device_node *dev)
return 0;
}
-static struct of_device_id mvme5100_of_bus_ids[] __initdata = {
+static const struct of_device_id mvme5100_of_bus_ids[] __initconst = {
{ .compatible = "hawk-bridge", },
{},
};
diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c
index c458b60d14c4..d572833ebd00 100644
--- a/arch/powerpc/platforms/embedded6xx/storcenter.c
+++ b/arch/powerpc/platforms/embedded6xx/storcenter.c
@@ -24,7 +24,7 @@
#include "mpc10x.h"
-static __initdata struct of_device_id storcenter_of_bus[] = {
+static const struct of_device_id storcenter_of_bus[] __initconst = {
{ .name = "soc", },
{},
};
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 6d8dadf19f0b..388e29bab8f6 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -235,7 +235,7 @@ define_machine(wii) {
.machine_shutdown = wii_shutdown,
};
-static struct of_device_id wii_of_bus[] = {
+static const struct of_device_id wii_of_bus[] = {
{ .compatible = "nintendo,hollywood", },
{ },
};
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c
index 15adee544638..ada33358950d 100644
--- a/arch/powerpc/platforms/pasemi/gpio_mdio.c
+++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c
@@ -290,7 +290,7 @@ static int gpio_mdio_remove(struct platform_device *dev)
return 0;
}
-static struct of_device_id gpio_mdio_match[] =
+static const struct of_device_id gpio_mdio_match[] =
{
{
.compatible = "gpio-mdio",
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 8c54de6d8ec4..d71b2c7e8403 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -393,7 +393,7 @@ static inline void pasemi_pcmcia_init(void)
#endif
-static struct of_device_id pasemi_bus_ids[] = {
+static const struct of_device_id pasemi_bus_ids[] = {
/* Unfortunately needed for legacy firmwares */
{ .type = "localbus", },
{ .type = "sdc", },
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 141f8899a633..b127a29ac526 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -336,7 +336,7 @@ static void __init pmac_setup_arch(void)
#endif
#ifdef CONFIG_ADB
- if (strstr(cmd_line, "adb_sync")) {
+ if (strstr(boot_command_line, "adb_sync")) {
extern int __adb_probe_sync;
__adb_probe_sync = 1;
}
@@ -460,7 +460,7 @@ pmac_halt(void)
static void __init pmac_init_early(void)
{
/* Enable early btext debug if requested */
- if (strstr(cmd_line, "btextdbg")) {
+ if (strstr(boot_command_line, "btextdbg")) {
udbg_adb_init_early();
register_early_udbg_console();
}
@@ -469,8 +469,8 @@ static void __init pmac_init_early(void)
pmac_feature_init();
/* Initialize debug stuff */
- udbg_scc_init(!!strstr(cmd_line, "sccdbg"));
- udbg_adb_init(!!strstr(cmd_line, "btextdbg"));
+ udbg_scc_init(!!strstr(boot_command_line, "sccdbg"));
+ udbg_adb_init(!!strstr(boot_command_line, "btextdbg"));
#ifdef CONFIG_PPC64
iommu_init_early_dart();
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index c945bed4dc9e..426814a2ede3 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -66,6 +66,54 @@ static struct notifier_block ioda_eeh_nb = {
};
#ifdef CONFIG_DEBUG_FS
+static ssize_t ioda_eeh_ei_write(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct pci_controller *hose = filp->private_data;
+ struct pnv_phb *phb = hose->private_data;
+ struct eeh_dev *edev;
+ struct eeh_pe *pe;
+ int pe_no, type, func;
+ unsigned long addr, mask;
+ char buf[50];
+ int ret;
+
+ if (!phb->eeh_ops || !phb->eeh_ops->err_inject)
+ return -ENXIO;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
+ if (!ret)
+ return -EFAULT;
+
+ /* Retrieve parameters */
+ ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
+ &pe_no, &type, &func, &addr, &mask);
+ if (ret != 5)
+ return -EINVAL;
+
+ /* Retrieve PE */
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ return -ENOMEM;
+ edev->phb = hose;
+ edev->pe_config_addr = pe_no;
+ pe = eeh_pe_get(edev);
+ kfree(edev);
+ if (!pe)
+ return -ENODEV;
+
+ /* Do error injection */
+ ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask);
+ return ret < 0 ? ret : count;
+}
+
+static const struct file_operations ioda_eeh_ei_fops = {
+ .open = simple_open,
+ .llseek = no_llseek,
+ .write = ioda_eeh_ei_write,
+};
+
static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
{
struct pci_controller *hose = data;
@@ -152,6 +200,10 @@ static int ioda_eeh_post_init(struct pci_controller *hose)
if (!phb->has_dbgfs && phb->dbgfs) {
phb->has_dbgfs = 1;
+ debugfs_create_file("err_injct", 0200,
+ phb->dbgfs, hose,
+ &ioda_eeh_ei_fops);
+
debugfs_create_file("err_injct_outbound", 0600,
phb->dbgfs, hose,
&ioda_eeh_outb_dbgfs_ops);
@@ -189,6 +241,7 @@ static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
{
struct pci_controller *hose = pe->phb;
struct pnv_phb *phb = hose->private_data;
+ bool freeze_pe = false;
int enable, ret = 0;
s64 rc;
@@ -212,6 +265,10 @@ static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
case EEH_OPT_THAW_DMA:
enable = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA;
break;
+ case EEH_OPT_FREEZE_PE:
+ freeze_pe = true;
+ enable = OPAL_EEH_ACTION_SET_FREEZE_ALL;
+ break;
default:
pr_warn("%s: Invalid option %d\n",
__func__, option);
@@ -219,17 +276,35 @@ static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
}
/* If PHB supports compound PE, to handle it */
- if (phb->unfreeze_pe) {
- ret = phb->unfreeze_pe(phb, pe->addr, enable);
+ if (freeze_pe) {
+ if (phb->freeze_pe) {
+ phb->freeze_pe(phb, pe->addr);
+ } else {
+ rc = opal_pci_eeh_freeze_set(phb->opal_id,
+ pe->addr,
+ enable);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld freezing "
+ "PHB#%x-PE#%x\n",
+ __func__, rc,
+ phb->hose->global_number, pe->addr);
+ ret = -EIO;
+ }
+ }
} else {
- rc = opal_pci_eeh_freeze_clear(phb->opal_id,
- pe->addr,
- enable);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld enable %d for PHB#%x-PE#%x\n",
- __func__, rc, option, phb->hose->global_number,
- pe->addr);
- ret = -EIO;
+ if (phb->unfreeze_pe) {
+ ret = phb->unfreeze_pe(phb, pe->addr, enable);
+ } else {
+ rc = opal_pci_eeh_freeze_clear(phb->opal_id,
+ pe->addr,
+ enable);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld enable %d "
+ "for PHB#%x-PE#%x\n",
+ __func__, rc, option,
+ phb->hose->global_number, pe->addr);
+ ret = -EIO;
+ }
}
}
@@ -439,11 +514,11 @@ int ioda_eeh_phb_reset(struct pci_controller *hose, int option)
if (option == EEH_RESET_FUNDAMENTAL ||
option == EEH_RESET_HOT)
rc = opal_pci_reset(phb->opal_id,
- OPAL_PHB_COMPLETE,
+ OPAL_RESET_PHB_COMPLETE,
OPAL_ASSERT_RESET);
else if (option == EEH_RESET_DEACTIVATE)
rc = opal_pci_reset(phb->opal_id,
- OPAL_PHB_COMPLETE,
+ OPAL_RESET_PHB_COMPLETE,
OPAL_DEASSERT_RESET);
if (rc < 0)
goto out;
@@ -483,15 +558,15 @@ static int ioda_eeh_root_reset(struct pci_controller *hose, int option)
*/
if (option == EEH_RESET_FUNDAMENTAL)
rc = opal_pci_reset(phb->opal_id,
- OPAL_PCI_FUNDAMENTAL_RESET,
+ OPAL_RESET_PCI_FUNDAMENTAL,
OPAL_ASSERT_RESET);
else if (option == EEH_RESET_HOT)
rc = opal_pci_reset(phb->opal_id,
- OPAL_PCI_HOT_RESET,
+ OPAL_RESET_PCI_HOT,
OPAL_ASSERT_RESET);
else if (option == EEH_RESET_DEACTIVATE)
rc = opal_pci_reset(phb->opal_id,
- OPAL_PCI_HOT_RESET,
+ OPAL_RESET_PCI_HOT,
OPAL_DEASSERT_RESET);
if (rc < 0)
goto out;
@@ -607,6 +682,31 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
if (pe->type & EEH_PE_PHB) {
ret = ioda_eeh_phb_reset(hose, option);
} else {
+ struct pnv_phb *phb;
+ s64 rc;
+
+ /*
+ * The frozen PE might be caused by PAPR error injection
+ * registers, which are expected to be cleared after hitting
+ * frozen PE as stated in the hardware spec. Unfortunately,
+ * that's not true on P7IOC. So we have to clear it manually
+ * to avoid recursive EEH errors during recovery.
+ */
+ phb = hose->private_data;
+ if (phb->model == PNV_PHB_MODEL_P7IOC &&
+ (option == EEH_RESET_HOT ||
+ option == EEH_RESET_FUNDAMENTAL)) {
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PHB_ERROR,
+ OPAL_ASSERT_RESET);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld clearing "
+ "error injection registers\n",
+ __func__, rc);
+ return -EIO;
+ }
+ }
+
bus = eeh_pe_bus_get(pe);
if (pci_is_root_bus(bus) ||
pci_is_root_bus(bus->parent))
@@ -628,8 +728,8 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
* Retrieve error log, which contains log from device driver
* and firmware.
*/
-int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
- char *drv_log, unsigned long len)
+static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
+ char *drv_log, unsigned long len)
{
pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
@@ -650,6 +750,49 @@ static int ioda_eeh_configure_bridge(struct eeh_pe *pe)
return 0;
}
+static int ioda_eeh_err_inject(struct eeh_pe *pe, int type, int func,
+ unsigned long addr, unsigned long mask)
+{
+ struct pci_controller *hose = pe->phb;
+ struct pnv_phb *phb = hose->private_data;
+ s64 ret;
+
+ /* Sanity check on error type */
+ if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
+ type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
+ pr_warn("%s: Invalid error type %d\n",
+ __func__, type);
+ return -ERANGE;
+ }
+
+ if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
+ func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
+ pr_warn("%s: Invalid error function %d\n",
+ __func__, func);
+ return -ERANGE;
+ }
+
+ /* Firmware supports error injection ? */
+ if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
+ pr_warn("%s: Firmware doesn't support error injection\n",
+ __func__);
+ return -ENXIO;
+ }
+
+ /* Do error injection */
+ ret = opal_pci_err_inject(phb->opal_id, pe->addr,
+ type, func, addr, mask);
+ if (ret != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld injecting error "
+ "%d-%d to PHB#%x-PE#%x\n",
+ __func__, ret, type, func,
+ hose->global_number, pe->addr);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
{
/* GEM */
@@ -743,14 +886,12 @@ static int ioda_eeh_get_pe(struct pci_controller *hose,
* the master PE because slave PE is invisible
* to EEH core.
*/
- if (phb->get_pe_state) {
- pnv_pe = &phb->ioda.pe_array[pe_no];
- if (pnv_pe->flags & PNV_IODA_PE_SLAVE) {
- pnv_pe = pnv_pe->master;
- WARN_ON(!pnv_pe ||
- !(pnv_pe->flags & PNV_IODA_PE_MASTER));
- pe_no = pnv_pe->pe_number;
- }
+ pnv_pe = &phb->ioda.pe_array[pe_no];
+ if (pnv_pe->flags & PNV_IODA_PE_SLAVE) {
+ pnv_pe = pnv_pe->master;
+ WARN_ON(!pnv_pe ||
+ !(pnv_pe->flags & PNV_IODA_PE_MASTER));
+ pe_no = pnv_pe->pe_number;
}
/* Find the PE according to PE# */
@@ -761,15 +902,37 @@ static int ioda_eeh_get_pe(struct pci_controller *hose,
if (!dev_pe)
return -EEXIST;
- /*
- * At this point, we're sure the compound PE should
- * be put into frozen state.
- */
+ /* Freeze the (compound) PE */
*pe = dev_pe;
- if (phb->freeze_pe &&
- !(dev_pe->state & EEH_PE_ISOLATED))
+ if (!(dev_pe->state & EEH_PE_ISOLATED))
phb->freeze_pe(phb, pe_no);
+ /*
+ * At this point, we're sure the (compound) PE should
+ * have been frozen. However, we still need poke until
+ * hitting the frozen PE on top level.
+ */
+ dev_pe = dev_pe->parent;
+ while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) {
+ int ret;
+ int active_flags = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_DMA_ACTIVE);
+
+ ret = eeh_ops->get_state(dev_pe, NULL);
+ if (ret <= 0 || (ret & active_flags) == active_flags) {
+ dev_pe = dev_pe->parent;
+ continue;
+ }
+
+ /* Frozen parent PE */
+ *pe = dev_pe;
+ if (!(dev_pe->state & EEH_PE_ISOLATED))
+ phb->freeze_pe(phb, dev_pe->addr);
+
+ /* Next one */
+ dev_pe = dev_pe->parent;
+ }
+
return 0;
}
@@ -971,5 +1134,6 @@ struct pnv_eeh_ops ioda_eeh_ops = {
.reset = ioda_eeh_reset,
.get_log = ioda_eeh_get_log,
.configure_bridge = ioda_eeh_configure_bridge,
+ .err_inject = ioda_eeh_err_inject,
.next_error = ioda_eeh_next_error
};
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index fd7a16f855ed..3e89cbf55885 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -359,6 +359,31 @@ static int powernv_eeh_configure_bridge(struct eeh_pe *pe)
}
/**
+ * powernv_pe_err_inject - Inject specified error to the indicated PE
+ * @pe: the indicated PE
+ * @type: error type
+ * @func: specific error type
+ * @addr: address
+ * @mask: address mask
+ *
+ * The routine is called to inject specified error, which is
+ * determined by @type and @func, to the indicated PE for
+ * testing purpose.
+ */
+static int powernv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
+ unsigned long addr, unsigned long mask)
+{
+ struct pci_controller *hose = pe->phb;
+ struct pnv_phb *phb = hose->private_data;
+ int ret = -EEXIST;
+
+ if (phb->eeh_ops && phb->eeh_ops->err_inject)
+ ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask);
+
+ return ret;
+}
+
+/**
* powernv_eeh_next_error - Retrieve next EEH error to handle
* @pe: Affected PE
*
@@ -414,6 +439,7 @@ static struct eeh_ops powernv_eeh_ops = {
.wait_state = powernv_eeh_wait_state,
.get_log = powernv_eeh_get_log,
.configure_bridge = powernv_eeh_configure_bridge,
+ .err_inject = powernv_eeh_err_inject,
.read_config = pnv_pci_cfg_read,
.write_config = pnv_pci_cfg_write,
.next_error = powernv_eeh_next_error,
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index 85bb8fff7947..23260f7dfa7a 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -112,7 +112,7 @@ static ssize_t init_dump_show(struct dump_obj *dump_obj,
struct dump_attribute *attr,
char *buf)
{
- return sprintf(buf, "1 - initiate dump\n");
+ return sprintf(buf, "1 - initiate Service Processor(FSP) dump\n");
}
static int64_t dump_fips_init(uint8_t type)
@@ -121,7 +121,7 @@ static int64_t dump_fips_init(uint8_t type)
rc = opal_dump_init(type);
if (rc)
- pr_warn("%s: Failed to initiate FipS dump (%d)\n",
+ pr_warn("%s: Failed to initiate FSP dump (%d)\n",
__func__, rc);
return rc;
}
@@ -131,8 +131,12 @@ static ssize_t init_dump_store(struct dump_obj *dump_obj,
const char *buf,
size_t count)
{
- dump_fips_init(DUMP_TYPE_FSP);
- pr_info("%s: Initiated FSP dump\n", __func__);
+ int rc;
+
+ rc = dump_fips_init(DUMP_TYPE_FSP);
+ if (rc == OPAL_SUCCESS)
+ pr_info("%s: Initiated FSP dump\n", __func__);
+
return count;
}
@@ -297,7 +301,7 @@ static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
* and rely on userspace to ask us to try
* again.
*/
- pr_info("%s: Platform dump partially read.ID = 0x%x\n",
+ pr_info("%s: Platform dump partially read. ID = 0x%x\n",
__func__, dump->id);
return -EIO;
}
@@ -423,6 +427,10 @@ void __init opal_platform_dump_init(void)
{
int rc;
+ /* ELOG not supported by firmware */
+ if (!opal_check_token(OPAL_DUMP_READ))
+ return;
+
dump_kset = kset_create_and_add("dump", NULL, opal_kobj);
if (!dump_kset) {
pr_warn("%s: Failed to create dump kset\n", __func__);
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index bbdb3ffaab98..518fe95dbf24 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -295,6 +295,10 @@ int __init opal_elog_init(void)
{
int rc = 0;
+ /* ELOG not supported by firmware */
+ if (!opal_check_token(OPAL_ELOG_READ))
+ return -1;
+
elog_kset = kset_create_and_add("elog", NULL, opal_kobj);
if (!elog_kset) {
pr_warn("%s: failed to create elog kset\n", __func__);
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index ad4b31df779a..dd2c285ad170 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -191,6 +191,7 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
{
struct lpc_debugfs_entry *lpc = filp->private_data;
u32 data, pos, len, todo;
+ __be32 bedata;
int rc;
if (!access_ok(VERIFY_WRITE, ubuf, count))
@@ -213,9 +214,10 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
len = 2;
}
rc = opal_lpc_read(opal_lpc_chip_id, lpc->lpc_type, pos,
- &data, len);
+ &bedata, len);
if (rc)
return -ENXIO;
+ data = be32_to_cpu(bedata);
switch(len) {
case 4:
rc = __put_user((u32)data, (u32 __user *)ubuf);
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
index acd9f7e96678..f9896fd5d04a 100644
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -78,7 +78,7 @@ void __init opal_nvram_init(void)
}
nvram_size = be32_to_cpup(nbytes_p);
- printk(KERN_INFO "OPAL nvram setup, %u bytes\n", nvram_size);
+ pr_info("OPAL nvram setup, %u bytes\n", nvram_size);
of_node_put(np);
ppc_md.nvram_read = opal_nvram_read;
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index b1885db8fdf3..499707ddaa9c 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -42,6 +42,9 @@ unsigned long __init opal_get_boot_time(void)
__be64 __h_m_s_ms;
long rc = OPAL_BUSY;
+ if (!opal_check_token(OPAL_RTC_READ))
+ goto out;
+
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
if (rc == OPAL_BUSY_EVENT)
@@ -49,16 +52,18 @@ unsigned long __init opal_get_boot_time(void)
else
mdelay(10);
}
- if (rc != OPAL_SUCCESS) {
- ppc_md.get_rtc_time = NULL;
- ppc_md.set_rtc_time = NULL;
- return 0;
- }
+ if (rc != OPAL_SUCCESS)
+ goto out;
+
y_m_d = be32_to_cpu(__y_m_d);
h_m_s_ms = be64_to_cpu(__h_m_s_ms);
opal_to_tm(y_m_d, h_m_s_ms, &tm);
return mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec);
+out:
+ ppc_md.get_rtc_time = NULL;
+ ppc_md.set_rtc_time = NULL;
+ return 0;
}
void opal_get_rtc_time(struct rtc_time *tm)
diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c
index d8a000a9988b..ae14c40b4b1c 100644
--- a/arch/powerpc/platforms/powernv/opal-tracepoints.c
+++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c
@@ -2,7 +2,7 @@
#include <linux/jump_label.h>
#include <asm/trace.h>
-#ifdef CONFIG_JUMP_LABEL
+#ifdef HAVE_JUMP_LABEL
struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
void opal_tracepoint_regfunc(void)
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 2e6ce1b8dc8f..e9e2450c1fdd 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -184,6 +184,7 @@ OPAL_CALL(opal_register_exception_handler, OPAL_REGISTER_OPAL_EXCEPTION_HANDLER)
OPAL_CALL(opal_pci_eeh_freeze_status, OPAL_PCI_EEH_FREEZE_STATUS);
OPAL_CALL(opal_pci_eeh_freeze_clear, OPAL_PCI_EEH_FREEZE_CLEAR);
OPAL_CALL(opal_pci_eeh_freeze_set, OPAL_PCI_EEH_FREEZE_SET);
+OPAL_CALL(opal_pci_err_inject, OPAL_PCI_ERR_INJECT);
OPAL_CALL(opal_pci_shpc, OPAL_PCI_SHPC);
OPAL_CALL(opal_pci_phb_mmio_enable, OPAL_PCI_PHB_MMIO_ENABLE);
OPAL_CALL(opal_pci_set_phb_mem_window, OPAL_PCI_SET_PHB_MEM_WINDOW);
@@ -232,6 +233,7 @@ OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE);
OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE);
OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE);
OPAL_CALL(opal_resync_timebase, OPAL_RESYNC_TIMEBASE);
+OPAL_CALL(opal_check_token, OPAL_CHECK_TOKEN);
OPAL_CALL(opal_dump_init, OPAL_DUMP_INIT);
OPAL_CALL(opal_dump_info, OPAL_DUMP_INFO);
OPAL_CALL(opal_dump_info2, OPAL_DUMP_INFO2);
@@ -247,3 +249,4 @@ OPAL_CALL(opal_set_param, OPAL_SET_PARAM);
OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI);
OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION);
OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION);
+OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CXL_MODE);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 4b005ae5dc4b..b642b0562f5a 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -105,12 +105,12 @@ int __init early_init_dt_scan_opal(unsigned long node,
if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
powerpc_firmware_features |= FW_FEATURE_OPALv2;
powerpc_firmware_features |= FW_FEATURE_OPALv3;
- printk("OPAL V3 detected !\n");
+ pr_info("OPAL V3 detected !\n");
} else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
powerpc_firmware_features |= FW_FEATURE_OPALv2;
- printk("OPAL V2 detected !\n");
+ pr_info("OPAL V2 detected !\n");
} else {
- printk("OPAL V1 detected !\n");
+ pr_info("OPAL V1 detected !\n");
}
/* Reinit all cores with the right endian */
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index df241b11d4f7..468a0f23c7f2 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -37,41 +37,43 @@
#include <asm/xics.h>
#include <asm/debug.h>
#include <asm/firmware.h>
+#include <asm/pnv-pci.h>
+
+#include <misc/cxl.h>
#include "powernv.h"
#include "pci.h"
-#define define_pe_printk_level(func, kern_level) \
-static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \
-{ \
- struct va_format vaf; \
- va_list args; \
- char pfix[32]; \
- int r; \
- \
- va_start(args, fmt); \
- \
- vaf.fmt = fmt; \
- vaf.va = &args; \
- \
- if (pe->pdev) \
- strlcpy(pfix, dev_name(&pe->pdev->dev), \
- sizeof(pfix)); \
- else \
- sprintf(pfix, "%04x:%02x ", \
- pci_domain_nr(pe->pbus), \
- pe->pbus->number); \
- r = printk(kern_level "pci %s: [PE# %.3d] %pV", \
- pfix, pe->pe_number, &vaf); \
- \
- va_end(args); \
- \
- return r; \
-} \
-
-define_pe_printk_level(pe_err, KERN_ERR);
-define_pe_printk_level(pe_warn, KERN_WARNING);
-define_pe_printk_level(pe_info, KERN_INFO);
+static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ char pfix[32];
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (pe->pdev)
+ strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
+ else
+ sprintf(pfix, "%04x:%02x ",
+ pci_domain_nr(pe->pbus), pe->pbus->number);
+
+ printk("%spci %s: [PE# %.3d] %pV",
+ level, pfix, pe->pe_number, &vaf);
+
+ va_end(args);
+}
+
+#define pe_err(pe, fmt, ...) \
+ pe_level_printk(pe, KERN_ERR, fmt, ##__VA_ARGS__)
+#define pe_warn(pe, fmt, ...) \
+ pe_level_printk(pe, KERN_WARNING, fmt, ##__VA_ARGS__)
+#define pe_info(pe, fmt, ...) \
+ pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__)
/*
* stdcix is only supposed to be used in hypervisor real mode as per
@@ -385,7 +387,7 @@ static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
}
}
-int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
+static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
{
struct pnv_ioda_pe *pe, *slave;
s64 rc;
@@ -890,6 +892,28 @@ static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
return 0;
}
+static u64 pnv_pci_ioda_dma_get_required_mask(struct pnv_phb *phb,
+ struct pci_dev *pdev)
+{
+ struct pci_dn *pdn = pci_get_pdn(pdev);
+ struct pnv_ioda_pe *pe;
+ u64 end, mask;
+
+ if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
+ return 0;
+
+ pe = &phb->ioda.pe_array[pdn->pe_number];
+ if (!pe->tce_bypass_enabled)
+ return __dma_get_required_mask(&pdev->dev);
+
+
+ end = pe->tce_bypass_base + memblock_end_of_DRAM();
+ mask = 1ULL << (fls64(end) - 1);
+ mask += mask - 1;
+
+ return mask;
+}
+
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
struct pci_bus *bus,
bool add_to_iommu_group)
@@ -1306,14 +1330,186 @@ static void pnv_ioda2_msi_eoi(struct irq_data *d)
icp_native_eoi(d);
}
+
+static void set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
+{
+ struct irq_data *idata;
+ struct irq_chip *ichip;
+
+ if (phb->type != PNV_PHB_IODA2)
+ return;
+
+ if (!phb->ioda.irq_chip_init) {
+ /*
+ * First time we setup an MSI IRQ, we need to setup the
+ * corresponding IRQ chip to route correctly.
+ */
+ idata = irq_get_irq_data(virq);
+ ichip = irq_data_get_irq_chip(idata);
+ phb->ioda.irq_chip_init = 1;
+ phb->ioda.irq_chip = *ichip;
+ phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
+ }
+ irq_set_chip(virq, &phb->ioda.irq_chip);
+}
+
+#ifdef CONFIG_CXL_BASE
+
+struct device_node *pnv_pci_to_phb_node(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+
+ return hose->dn;
+}
+EXPORT_SYMBOL(pnv_pci_to_phb_node);
+
+int pnv_phb_to_cxl(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct pnv_ioda_pe *pe;
+ int rc;
+
+ pe = pnv_ioda_get_pe(dev);
+ if (!pe)
+ return -ENODEV;
+
+ pe_info(pe, "Switching PHB to CXL\n");
+
+ rc = opal_pci_set_phb_cxl_mode(phb->opal_id, 1, pe->pe_number);
+ if (rc)
+ dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(pnv_phb_to_cxl);
+
+/* Find PHB for cxl dev and allocate MSI hwirqs?
+ * Returns the absolute hardware IRQ number
+ */
+int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
+
+ if (hwirq < 0) {
+ dev_warn(&dev->dev, "Failed to find a free MSI\n");
+ return -ENOSPC;
+ }
+
+ return phb->msi_base + hwirq;
+}
+EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
+
+void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
+}
+EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
+
+void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
+ struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ int i, hwirq;
+
+ for (i = 1; i < CXL_IRQ_RANGES; i++) {
+ if (!irqs->range[i])
+ continue;
+ pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
+ i, irqs->offset[i],
+ irqs->range[i]);
+ hwirq = irqs->offset[i] - phb->msi_base;
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
+ irqs->range[i]);
+ }
+}
+EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
+
+int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
+ struct pci_dev *dev, int num)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ int i, hwirq, try;
+
+ memset(irqs, 0, sizeof(struct cxl_irq_ranges));
+
+ /* 0 is reserved for the multiplexed PSL DSI interrupt */
+ for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
+ try = num;
+ while (try) {
+ hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
+ if (hwirq >= 0)
+ break;
+ try /= 2;
+ }
+ if (!try)
+ goto fail;
+
+ irqs->offset[i] = phb->msi_base + hwirq;
+ irqs->range[i] = try;
+ pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
+ i, irqs->offset[i], irqs->range[i]);
+ num -= try;
+ }
+ if (num)
+ goto fail;
+
+ return 0;
+fail:
+ pnv_cxl_release_hwirq_ranges(irqs, dev);
+ return -ENOSPC;
+}
+EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
+
+int pnv_cxl_get_irq_count(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ return phb->msi_bmp.irq_count;
+}
+EXPORT_SYMBOL(pnv_cxl_get_irq_count);
+
+int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
+ unsigned int virq)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ unsigned int xive_num = hwirq - phb->msi_base;
+ struct pnv_ioda_pe *pe;
+ int rc;
+
+ if (!(pe = pnv_ioda_get_pe(dev)))
+ return -ENODEV;
+
+ /* Assign XIVE to PE */
+ rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
+ if (rc) {
+ pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
+ "hwirq 0x%x XIVE 0x%x PE\n",
+ pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
+ return -EIO;
+ }
+ set_msi_irq_chip(phb, virq);
+
+ return 0;
+}
+EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
+#endif
+
static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
unsigned int hwirq, unsigned int virq,
unsigned int is_64, struct msi_msg *msg)
{
struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
struct pci_dn *pdn = pci_get_pdn(dev);
- struct irq_data *idata;
- struct irq_chip *ichip;
unsigned int xive_num = hwirq - phb->msi_base;
__be32 data;
int rc;
@@ -1365,22 +1561,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
}
msg->data = be32_to_cpu(data);
- /*
- * Change the IRQ chip for the MSI interrupts on PHB3.
- * The corresponding IRQ chip should be populated for
- * the first time.
- */
- if (phb->type == PNV_PHB_IODA2) {
- if (!phb->ioda.irq_chip_init) {
- idata = irq_get_irq_data(virq);
- ichip = irq_data_get_irq_chip(idata);
- phb->ioda.irq_chip_init = 1;
- phb->ioda.irq_chip = *ichip;
- phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
- }
-
- irq_set_chip(virq, &phb->ioda.irq_chip);
- }
+ set_msi_irq_chip(phb, virq);
pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
" address=%x_%08x data=%x PE# %d\n",
@@ -1627,12 +1808,12 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
{
- opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET,
+ opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE,
OPAL_ASSERT_RESET);
}
-void __init pnv_pci_init_ioda_phb(struct device_node *np,
- u64 hub_id, int ioda_type)
+static void __init pnv_pci_init_ioda_phb(struct device_node *np,
+ u64 hub_id, int ioda_type)
{
struct pci_controller *hose;
struct pnv_phb *phb;
@@ -1782,6 +1963,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
/* Setup TCEs */
phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
phb->dma_set_mask = pnv_pci_ioda_dma_set_mask;
+ phb->dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask;
/* Setup shutdown function for kexec */
phb->shutdown = pnv_pci_ioda_shutdown;
@@ -1803,7 +1985,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
pci_add_flags(PCI_REASSIGN_ALL_RSRC);
/* Reset IODA tables to a clean state */
- rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
+ rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET);
if (rc)
pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b45c49249a5d..b3ca77ddf36d 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -753,6 +753,17 @@ int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
return __dma_set_mask(&pdev->dev, dma_mask);
}
+u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ if (phb && phb->dma_get_required_mask)
+ return phb->dma_get_required_mask(phb, pdev);
+
+ return __dma_get_required_mask(&pdev->dev);
+}
+
void pnv_pci_shutdown(void)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 48494d4b6058..34d29eb2a4de 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -85,6 +85,8 @@ struct pnv_eeh_ops {
int (*get_log)(struct eeh_pe *pe, int severity,
char *drv_log, unsigned long len);
int (*configure_bridge)(struct eeh_pe *pe);
+ int (*err_inject)(struct eeh_pe *pe, int type, int func,
+ unsigned long addr, unsigned long mask);
int (*next_error)(struct eeh_pe **pe);
};
#endif /* CONFIG_EEH */
@@ -122,6 +124,8 @@ struct pnv_phb {
void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev,
u64 dma_mask);
+ u64 (*dma_get_required_mask)(struct pnv_phb *phb,
+ struct pci_dev *pdev);
void (*fixup_phb)(struct pci_controller *hose);
u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
void (*shutdown)(struct pnv_phb *phb);
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 75501bfede7f..6c8e2d188cd0 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -13,6 +13,7 @@ struct pci_dev;
extern void pnv_pci_init(void);
extern void pnv_pci_shutdown(void);
extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask);
+extern u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev);
#else
static inline void pnv_pci_init(void) { }
static inline void pnv_pci_shutdown(void) { }
@@ -21,6 +22,11 @@ static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
{
return -ENODEV;
}
+
+static inline u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
+{
+ return 0;
+}
#endif
extern void pnv_lpc_init(void);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 5a0e2dc6de5f..3f9546d8a51f 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -173,6 +173,14 @@ static int pnv_dma_set_mask(struct device *dev, u64 dma_mask)
return __dma_set_mask(dev, dma_mask);
}
+static u64 pnv_dma_get_required_mask(struct device *dev)
+{
+ if (dev_is_pci(dev))
+ return pnv_pci_dma_get_required_mask(to_pci_dev(dev));
+
+ return __dma_get_required_mask(dev);
+}
+
static void pnv_shutdown(void)
{
/* Let the PCI code clear up IODA tables */
@@ -307,7 +315,7 @@ static int __init pnv_probe(void)
* Returns the cpu frequency for 'cpu' in Hz. This is used by
* /proc/cpuinfo
*/
-unsigned long pnv_get_proc_freq(unsigned int cpu)
+static unsigned long pnv_get_proc_freq(unsigned int cpu)
{
unsigned long ret_freq;
@@ -335,6 +343,7 @@ define_machine(powernv) {
.power_save = power7_idle,
.calibrate_decr = generic_calibrate_decr,
.dma_set_mask = pnv_dma_set_mask,
+ .dma_get_required_mask = pnv_dma_get_required_mask,
#ifdef CONFIG_KEXEC
.kexec_cpu_down = pnv_kexec_cpu_down,
#endif
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 5fcfcf44e3a9..4753958cd509 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -54,7 +54,7 @@ static void pnv_smp_setup_cpu(int cpu)
#endif
}
-int pnv_smp_kick_cpu(int nr)
+static int pnv_smp_kick_cpu(int nr)
{
unsigned int pcpu = get_hard_smp_processor_id(nr);
unsigned long start_here =
@@ -168,9 +168,9 @@ static void pnv_smp_cpu_kill_self(void)
power7_nap(1);
ppc64_runlatch_on();
- /* Reenable IRQs briefly to clear the IPI that woke us */
- local_irq_enable();
- local_irq_disable();
+ /* Clear the IPI that woke us up */
+ icp_native_flush_interrupt();
+ local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
mb();
if (cpu_core_split_required())
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 894ecb3eb596..c87f96b79d1a 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -24,6 +24,7 @@
#include <asm/smp.h>
#include "subcore.h"
+#include "powernv.h"
/*
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 2d8bf15879fd..fc44ad0475f8 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -555,7 +555,6 @@ static int cmm_mem_going_offline(void *arg)
pa_last = pa_last->next;
free_page((unsigned long)cmm_page_list);
cmm_page_list = pa_last;
- continue;
}
}
pa_curr = pa_curr->next;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index a2450b8a50a5..fdf01b660d59 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include "offline_states.h"
+#include "pseries.h"
#include <asm/prom.h>
#include <asm/machdep.h>
@@ -363,7 +364,8 @@ static int dlpar_online_cpu(struct device_node *dn)
int rc = 0;
unsigned int cpu;
int len, nthreads, i;
- const u32 *intserv;
+ const __be32 *intserv;
+ u32 thread;
intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
@@ -373,8 +375,9 @@ static int dlpar_online_cpu(struct device_node *dn)
cpu_maps_update_begin();
for (i = 0; i < nthreads; i++) {
+ thread = be32_to_cpu(intserv[i]);
for_each_present_cpu(cpu) {
- if (get_hard_smp_processor_id(cpu) != intserv[i])
+ if (get_hard_smp_processor_id(cpu) != thread)
continue;
BUG_ON(get_cpu_current_state(cpu)
!= CPU_STATE_OFFLINE);
@@ -388,7 +391,7 @@ static int dlpar_online_cpu(struct device_node *dn)
}
if (cpu == num_possible_cpus())
printk(KERN_WARNING "Could not find cpu to online "
- "with physical id 0x%x\n", intserv[i]);
+ "with physical id 0x%x\n", thread);
}
cpu_maps_update_done();
@@ -442,7 +445,8 @@ static int dlpar_offline_cpu(struct device_node *dn)
int rc = 0;
unsigned int cpu;
int len, nthreads, i;
- const u32 *intserv;
+ const __be32 *intserv;
+ u32 thread;
intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
@@ -452,8 +456,9 @@ static int dlpar_offline_cpu(struct device_node *dn)
cpu_maps_update_begin();
for (i = 0; i < nthreads; i++) {
+ thread = be32_to_cpu(intserv[i]);
for_each_present_cpu(cpu) {
- if (get_hard_smp_processor_id(cpu) != intserv[i])
+ if (get_hard_smp_processor_id(cpu) != thread)
continue;
if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
@@ -475,14 +480,14 @@ static int dlpar_offline_cpu(struct device_node *dn)
* Upgrade it's state to CPU_STATE_OFFLINE.
*/
set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
- BUG_ON(plpar_hcall_norets(H_PROD, intserv[i])
+ BUG_ON(plpar_hcall_norets(H_PROD, thread)
!= H_SUCCESS);
__cpu_die(cpu);
break;
}
if (cpu == num_possible_cpus())
printk(KERN_WARNING "Could not find cpu to offline "
- "with physical id 0x%x\n", intserv[i]);
+ "with physical id 0x%x\n", thread);
}
cpu_maps_update_done();
@@ -494,15 +499,15 @@ out:
static ssize_t dlpar_cpu_release(const char *buf, size_t count)
{
struct device_node *dn;
- const u32 *drc_index;
+ u32 drc_index;
int rc;
dn = of_find_node_by_path(buf);
if (!dn)
return -EINVAL;
- drc_index = of_get_property(dn, "ibm,my-drc-index", NULL);
- if (!drc_index) {
+ rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
+ if (rc) {
of_node_put(dn);
return -EINVAL;
}
@@ -513,7 +518,7 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count)
return -EINVAL;
}
- rc = dlpar_release_drc(*drc_index);
+ rc = dlpar_release_drc(drc_index);
if (rc) {
of_node_put(dn);
return rc;
@@ -521,7 +526,7 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count)
rc = dlpar_detach_node(dn);
if (rc) {
- dlpar_acquire_drc(*drc_index);
+ dlpar_acquire_drc(drc_index);
return rc;
}
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index b08053819d99..a6c7e19f5eb3 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -88,29 +88,14 @@ static int pseries_eeh_init(void)
* and its variant since the old firmware probably support address
* of domain/bus/slot/function for EEH RTAS operations.
*/
- if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) {
- pr_warn("%s: RTAS service <ibm,set-eeh-option> invalid\n",
- __func__);
- return -EINVAL;
- } else if (ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE) {
- pr_warn("%s: RTAS service <ibm,set-slot-reset> invalid\n",
- __func__);
- return -EINVAL;
- } else if (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
- ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) {
- pr_warn("%s: RTAS service <ibm,read-slot-reset-state2> and "
- "<ibm,read-slot-reset-state> invalid\n",
- __func__);
- return -EINVAL;
- } else if (ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE) {
- pr_warn("%s: RTAS service <ibm,slot-error-detail> invalid\n",
- __func__);
- return -EINVAL;
- } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
- ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) {
- pr_warn("%s: RTAS service <ibm,configure-pe> and "
- "<ibm,configure-bridge> invalid\n",
- __func__);
+ if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE ||
+ ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE ||
+ (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
+ ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
+ ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE ||
+ (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
+ ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) {
+ pr_info("EEH functionality not supported\n");
return -EINVAL;
}
@@ -118,11 +103,11 @@ static int pseries_eeh_init(void)
spin_lock_init(&slot_errbuf_lock);
eeh_error_buf_size = rtas_token("rtas-error-log-max");
if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
- pr_warn("%s: unknown EEH error log size\n",
+ pr_info("%s: unknown EEH error log size\n",
__func__);
eeh_error_buf_size = 1024;
} else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
- pr_warn("%s: EEH error log size %d exceeds the maximal %d\n",
+ pr_info("%s: EEH error log size %d exceeds the maximal %d\n",
__func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
}
@@ -349,7 +334,9 @@ static int pseries_eeh_set_option(struct eeh_pe *pe, int option)
if (pe->addr)
config_addr = pe->addr;
break;
-
+ case EEH_OPT_FREEZE_PE:
+ /* Not support */
+ return 0;
default:
pr_err("%s: Invalid option %d\n",
__func__, option);
@@ -729,6 +716,7 @@ static struct eeh_ops pseries_eeh_ops = {
.wait_state = pseries_eeh_wait_state,
.get_log = pseries_eeh_get_log,
.configure_bridge = pseries_eeh_configure_bridge,
+ .err_inject = NULL,
.read_config = pseries_eeh_read_config,
.write_config = pseries_eeh_write_config,
.next_error = NULL,
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 20d62975856f..b174fa751d26 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -90,7 +90,7 @@ static void rtas_stop_self(void)
{
static struct rtas_args args = {
.nargs = 0,
- .nret = 1,
+ .nret = cpu_to_be32(1),
.rets = &args.args[0],
};
@@ -312,7 +312,8 @@ static void pseries_remove_processor(struct device_node *np)
{
unsigned int cpu;
int len, nthreads, i;
- const u32 *intserv;
+ const __be32 *intserv;
+ u32 thread;
intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
@@ -322,8 +323,9 @@ static void pseries_remove_processor(struct device_node *np)
cpu_maps_update_begin();
for (i = 0; i < nthreads; i++) {
+ thread = be32_to_cpu(intserv[i]);
for_each_present_cpu(cpu) {
- if (get_hard_smp_processor_id(cpu) != intserv[i])
+ if (get_hard_smp_processor_id(cpu) != thread)
continue;
BUG_ON(cpu_online(cpu));
set_cpu_present(cpu, false);
@@ -332,7 +334,7 @@ static void pseries_remove_processor(struct device_node *np)
}
if (cpu >= nr_cpu_ids)
printk(KERN_WARNING "Could not find cpu to remove "
- "with physical id 0x%x\n", intserv[i]);
+ "with physical id 0x%x\n", thread);
}
cpu_maps_update_done();
}
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 34064f50945e..3c4c0dcd90d3 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -20,6 +20,7 @@
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/sparsemem.h>
+#include "pseries.h"
unsigned long pseries_memory_block_size(void)
{
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 4642d6a4d356..de1ec54a2a57 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -329,16 +329,16 @@ struct direct_window {
/* Dynamic DMA Window support */
struct ddw_query_response {
- __be32 windows_available;
- __be32 largest_available_block;
- __be32 page_size;
- __be32 migration_capable;
+ u32 windows_available;
+ u32 largest_available_block;
+ u32 page_size;
+ u32 migration_capable;
};
struct ddw_create_response {
- __be32 liobn;
- __be32 addr_hi;
- __be32 addr_lo;
+ u32 liobn;
+ u32 addr_hi;
+ u32 addr_lo;
};
static LIST_HEAD(direct_window_list);
@@ -725,16 +725,18 @@ static void remove_ddw(struct device_node *np, bool remove_prop)
{
struct dynamic_dma_window_prop *dwp;
struct property *win64;
- const u32 *ddw_avail;
+ u32 ddw_avail[3];
u64 liobn;
- int len, ret = 0;
+ int ret = 0;
+
+ ret = of_property_read_u32_array(np, "ibm,ddw-applicable",
+ &ddw_avail[0], 3);
- ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
if (!win64)
return;
- if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp))
+ if (ret || win64->length < sizeof(*dwp))
goto delprop;
dwp = win64->value;
@@ -872,8 +874,9 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
do {
/* extra outputs are LIOBN and dma-addr (hi, lo) */
- ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr,
- BUID_HI(buid), BUID_LO(buid), page_shift, window_shift);
+ ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create,
+ cfg_addr, BUID_HI(buid), BUID_LO(buid),
+ page_shift, window_shift);
} while (rtas_busy_delay(ret));
dev_info(&dev->dev,
"ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
@@ -910,7 +913,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
int page_shift;
u64 dma_addr, max_addr;
struct device_node *dn;
- const u32 *uninitialized_var(ddw_avail);
+ u32 ddw_avail[3];
struct direct_window *window;
struct property *win64;
struct dynamic_dma_window_prop *ddwprop;
@@ -942,8 +945,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* for the given node in that order.
* the property is actually in the parent, not the PE
*/
- ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
- if (!ddw_avail || len < 3 * sizeof(u32))
+ ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable",
+ &ddw_avail[0], 3);
+ if (ret)
goto out_failed;
/*
@@ -966,11 +970,11 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dev_dbg(&dev->dev, "no free dynamic windows");
goto out_failed;
}
- if (be32_to_cpu(query.page_size) & 4) {
+ if (query.page_size & 4) {
page_shift = 24; /* 16MB */
- } else if (be32_to_cpu(query.page_size) & 2) {
+ } else if (query.page_size & 2) {
page_shift = 16; /* 64kB */
- } else if (be32_to_cpu(query.page_size) & 1) {
+ } else if (query.page_size & 1) {
page_shift = 12; /* 4kB */
} else {
dev_dbg(&dev->dev, "no supported direct page size in mask %x",
@@ -980,7 +984,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
/* verify the window * number of ptes will map the partition */
/* check largest block * page size > max memory hotplug addr */
max_addr = memory_hotplug_max();
- if (be32_to_cpu(query.largest_available_block) < (max_addr >> page_shift)) {
+ if (query.largest_available_block < (max_addr >> page_shift)) {
dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
"%llu-sized pages\n", max_addr, query.largest_available_block,
1ULL << page_shift);
@@ -1006,8 +1010,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
if (ret != 0)
goto out_free_prop;
- ddwprop->liobn = create.liobn;
- ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2));
+ ddwprop->liobn = cpu_to_be32(create.liobn);
+ ddwprop->dma_base = cpu_to_be64(((u64)create.addr_hi << 32) |
+ create.addr_lo);
ddwprop->tce_shift = cpu_to_be32(page_shift);
ddwprop->window_shift = cpu_to_be32(len);
@@ -1039,7 +1044,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
list_add(&window->list, &direct_window_list);
spin_unlock(&direct_window_list_lock);
- dma_addr = of_read_number(&create.addr_hi, 2);
+ dma_addr = be64_to_cpu(ddwprop->dma_base);
goto out_unlock;
out_free_window:
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 34e64237fff9..8c509d5397c6 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -59,8 +59,6 @@ EXPORT_SYMBOL(plpar_hcall);
EXPORT_SYMBOL(plpar_hcall9);
EXPORT_SYMBOL(plpar_hcall_norets);
-extern void pSeries_find_serial_port(void);
-
void vpa_init(int cpu)
{
int hwcpu = get_hard_smp_processor_id(cpu);
@@ -642,7 +640,7 @@ EXPORT_SYMBOL(arch_free_page);
#endif
#ifdef CONFIG_TRACEPOINTS
-#ifdef CONFIG_JUMP_LABEL
+#ifdef HAVE_JUMP_LABEL
struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
void hcall_tracepoint_regfunc(void)
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 0cc240b7f694..11a3b617ef5d 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -276,8 +276,10 @@ static ssize_t pSeries_nvram_get_size(void)
* sequence #: The unique sequence # for each event. (until it wraps)
* error log: The error log from event_scan
*/
-int nvram_write_os_partition(struct nvram_os_partition *part, char * buff,
- int length, unsigned int err_type, unsigned int error_log_cnt)
+static int nvram_write_os_partition(struct nvram_os_partition *part,
+ char *buff, int length,
+ unsigned int err_type,
+ unsigned int error_log_cnt)
{
int rc;
loff_t tmp_index;
@@ -330,9 +332,9 @@ int nvram_write_error_log(char * buff, int length,
*
* Reads nvram partition for at most 'length'
*/
-int nvram_read_partition(struct nvram_os_partition *part, char *buff,
- int length, unsigned int *err_type,
- unsigned int *error_log_cnt)
+static int nvram_read_partition(struct nvram_os_partition *part, char *buff,
+ int length, unsigned int *err_type,
+ unsigned int *error_log_cnt)
{
int rc;
loff_t tmp_index;
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index c413ec158ff5..67e48594040c 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -29,6 +29,7 @@
#include <asm/pci-bridge.h>
#include <asm/prom.h>
#include <asm/ppc-pci.h>
+#include "pseries.h"
#if 0
void pcibios_name_device(struct pci_dev *dev)
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index dff05b9eb946..5a4d0fc03b03 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -126,7 +126,7 @@ struct epow_errorlog {
#define EPOW_MAIN_ENCLOSURE 5
#define EPOW_POWER_OFF 7
-void rtas_parse_epow_errlog(struct rtas_error_log *log)
+static void rtas_parse_epow_errlog(struct rtas_error_log *log)
{
struct pseries_errorlog *pseries_log;
struct epow_errorlog *epow_log;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index e724d3186e73..125c589eeef5 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -561,7 +561,7 @@ void pSeries_coalesce_init(void)
* fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions,
* handle that here. (Stolen from parse_system_parameter_string)
*/
-void pSeries_cmo_feature_init(void)
+static void pSeries_cmo_feature_init(void)
{
char *ptr, *key, *value, *end;
int call_status;
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 47b6b9f81d43..ad56edc39919 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -314,7 +314,7 @@ axon_ram_remove(struct platform_device *device)
return 0;
}
-static struct of_device_id axon_ram_device_id[] = {
+static const struct of_device_id axon_ram_device_id[] = {
{
.type = "dma-memory"
},
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
index e9056e438575..2d8a101b6b9e 100644
--- a/arch/powerpc/sysdev/dcr.c
+++ b/arch/powerpc/sysdev/dcr.c
@@ -230,5 +230,6 @@ EXPORT_SYMBOL_GPL(dcr_unmap_mmio);
#ifdef CONFIG_PPC_DCR_NATIVE
DEFINE_SPINLOCK(dcr_ind_lock);
+EXPORT_SYMBOL_GPL(dcr_ind_lock);
#endif /* defined(CONFIG_PPC_DCR_NATIVE) */
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
index afc2dbf37011..90545ad1626e 100644
--- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
+++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
@@ -171,7 +171,7 @@ static int mpc85xx_l2ctlr_of_remove(struct platform_device *dev)
return 0;
}
-static struct of_device_id mpc85xx_l2ctlr_of_match[] = {
+static const struct of_device_id mpc85xx_l2ctlr_of_match[] = {
{
.compatible = "fsl,p2020-l2-cache-controller",
},
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index b32e79dbef4f..de40b48b460e 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -18,6 +18,8 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
#include <sysdev/fsl_soc.h>
#include <asm/prom.h>
#include <asm/hw_irq.h>
@@ -50,6 +52,7 @@ struct fsl_msi_feature {
struct fsl_msi_cascade_data {
struct fsl_msi *msi_data;
int index;
+ int virq;
};
static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
@@ -65,11 +68,24 @@ static void fsl_msi_end_irq(struct irq_data *d)
{
}
+static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
+{
+ struct fsl_msi *msi_data = irqd->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
+ int cascade_virq, srs;
+
+ srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
+ cascade_virq = msi_data->cascade_array[srs]->virq;
+
+ seq_printf(p, " fsl-msi-%d", cascade_virq);
+}
+
+
static struct irq_chip fsl_msi_chip = {
.irq_mask = mask_msi_irq,
.irq_unmask = unmask_msi_irq,
.irq_ack = fsl_msi_end_irq,
- .name = "FSL-MSI",
+ .irq_print_chip = fsl_msi_print_chip,
};
static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
@@ -175,7 +191,8 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
np = of_parse_phandle(hose->dn, "fsl,msi", 0);
if (np) {
if (of_device_is_compatible(np, "fsl,mpic-msi") ||
- of_device_is_compatible(np, "fsl,vmpic-msi"))
+ of_device_is_compatible(np, "fsl,vmpic-msi") ||
+ of_device_is_compatible(np, "fsl,vmpic-msi-v4.3"))
phandle = np->phandle;
else {
dev_err(&pdev->dev,
@@ -234,40 +251,24 @@ out_free:
return rc;
}
-static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
+static irqreturn_t fsl_msi_cascade(int irq, void *data)
{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct irq_data *idata = irq_desc_get_irq_data(desc);
unsigned int cascade_irq;
struct fsl_msi *msi_data;
int msir_index = -1;
u32 msir_value = 0;
u32 intr_index;
u32 have_shift = 0;
- struct fsl_msi_cascade_data *cascade_data;
+ struct fsl_msi_cascade_data *cascade_data = data;
+ irqreturn_t ret = IRQ_NONE;
- cascade_data = irq_get_handler_data(irq);
msi_data = cascade_data->msi_data;
- raw_spin_lock(&desc->lock);
- if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
- if (chip->irq_mask_ack)
- chip->irq_mask_ack(idata);
- else {
- chip->irq_mask(idata);
- chip->irq_ack(idata);
- }
- }
-
- if (unlikely(irqd_irq_inprogress(idata)))
- goto unlock;
-
msir_index = cascade_data->index;
if (msir_index >= NR_MSI_REG_MAX)
cascade_irq = NO_IRQ;
- irqd_set_chained_irq_inprogress(idata);
switch (msi_data->feature & FSL_PIC_IP_MASK) {
case FSL_PIC_IP_MPIC:
msir_value = fsl_msi_read(msi_data->msi_regs,
@@ -296,40 +297,32 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
cascade_irq = irq_linear_revmap(msi_data->irqhost,
msi_hwirq(msi_data, msir_index,
intr_index + have_shift));
- if (cascade_irq != NO_IRQ)
+ if (cascade_irq != NO_IRQ) {
generic_handle_irq(cascade_irq);
+ ret = IRQ_HANDLED;
+ }
have_shift += intr_index + 1;
msir_value = msir_value >> (intr_index + 1);
}
- irqd_clr_chained_irq_inprogress(idata);
- switch (msi_data->feature & FSL_PIC_IP_MASK) {
- case FSL_PIC_IP_MPIC:
- case FSL_PIC_IP_VMPIC:
- chip->irq_eoi(idata);
- break;
- case FSL_PIC_IP_IPIC:
- if (!irqd_irq_disabled(idata) && chip->irq_unmask)
- chip->irq_unmask(idata);
- break;
- }
-unlock:
- raw_spin_unlock(&desc->lock);
+ return ret;
}
static int fsl_of_msi_remove(struct platform_device *ofdev)
{
struct fsl_msi *msi = platform_get_drvdata(ofdev);
int virq, i;
- struct fsl_msi_cascade_data *cascade_data;
if (msi->list.prev != NULL)
list_del(&msi->list);
for (i = 0; i < NR_MSI_REG_MAX; i++) {
- virq = msi->msi_virqs[i];
- if (virq != NO_IRQ) {
- cascade_data = irq_get_handler_data(virq);
- kfree(cascade_data);
+ if (msi->cascade_array[i]) {
+ virq = msi->cascade_array[i]->virq;
+
+ BUG_ON(virq == NO_IRQ);
+
+ free_irq(virq, msi->cascade_array[i]);
+ kfree(msi->cascade_array[i]);
irq_dispose_mapping(virq);
}
}
@@ -348,7 +341,7 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
int offset, int irq_index)
{
struct fsl_msi_cascade_data *cascade_data = NULL;
- int virt_msir, i;
+ int virt_msir, i, ret;
virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
if (virt_msir == NO_IRQ) {
@@ -363,11 +356,18 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
return -ENOMEM;
}
irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class);
- msi->msi_virqs[irq_index] = virt_msir;
cascade_data->index = offset;
cascade_data->msi_data = msi;
- irq_set_handler_data(virt_msir, cascade_data);
- irq_set_chained_handler(virt_msir, fsl_msi_cascade);
+ cascade_data->virq = virt_msir;
+ msi->cascade_array[irq_index] = cascade_data;
+
+ ret = request_irq(virt_msir, fsl_msi_cascade, 0,
+ "fsl-msi-cascade", cascade_data);
+ if (ret) {
+ dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
+ virt_msir, ret);
+ return ret;
+ }
/* Release the hwirqs corresponding to this MSI register */
for (i = 0; i < IRQS_PER_MSI_REG; i++)
@@ -461,7 +461,8 @@ static int fsl_of_msi_probe(struct platform_device *dev)
p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
- if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3")) {
+ if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") ||
+ of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) {
msi->srs_shift = MSIIR1_SRS_SHIFT;
msi->ibs_shift = MSIIR1_IBS_SHIFT;
if (p)
@@ -566,6 +567,10 @@ static const struct of_device_id fsl_of_msi_ids[] = {
.compatible = "fsl,vmpic-msi",
.data = &vmpic_msi_feature,
},
+ {
+ .compatible = "fsl,vmpic-msi-v4.3",
+ .data = &vmpic_msi_feature,
+ },
#endif
{}
};
diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h
index df9aa9fe0933..420cfcbdac01 100644
--- a/arch/powerpc/sysdev/fsl_msi.h
+++ b/arch/powerpc/sysdev/fsl_msi.h
@@ -27,6 +27,8 @@
#define FSL_PIC_IP_IPIC 0x00000002
#define FSL_PIC_IP_VMPIC 0x00000003
+struct fsl_msi_cascade_data;
+
struct fsl_msi {
struct irq_domain *irqhost;
@@ -37,7 +39,7 @@ struct fsl_msi {
u32 srs_shift; /* Shift of the shared interrupt register select */
void __iomem *msi_regs;
u32 feature;
- int msi_virqs[NR_MSI_REG_MAX];
+ struct fsl_msi_cascade_data *cascade_array[NR_MSI_REG_MAX];
struct msi_bitmap bitmap;
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index c5077673bd94..65d2ed4549e6 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -522,7 +522,8 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary)
} else {
/* For PCI read PROG to identify controller mode */
early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
- if ((progif & 1) == 1)
+ if ((progif & 1) &&
+ !of_property_read_bool(dev, "fsl,pci-agent-force-enum"))
goto no_bridge;
}
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index be33c9768ea1..89cec0ed6a58 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -960,7 +960,7 @@ void mpic_set_vector(unsigned int virq, unsigned int vector)
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
}
-void mpic_set_destination(unsigned int virq, unsigned int cpuid)
+static void mpic_set_destination(unsigned int virq, unsigned int cpuid)
{
struct mpic *mpic = mpic_from_irq(virq);
unsigned int src = virq_to_hw(virq);
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index 2ff630267e9e..0c75214b6f92 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -20,32 +20,37 @@ int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num)
int offset, order = get_count_order(num);
spin_lock_irqsave(&bmp->lock, flags);
- /*
- * This is fast, but stricter than we need. We might want to add
- * a fallback routine which does a linear search with no alignment.
- */
- offset = bitmap_find_free_region(bmp->bitmap, bmp->irq_count, order);
+
+ offset = bitmap_find_next_zero_area(bmp->bitmap, bmp->irq_count, 0,
+ num, (1 << order) - 1);
+ if (offset > bmp->irq_count)
+ goto err;
+
+ bitmap_set(bmp->bitmap, offset, num);
spin_unlock_irqrestore(&bmp->lock, flags);
- pr_debug("msi_bitmap: allocated 0x%x (2^%d) at offset 0x%x\n",
- num, order, offset);
+ pr_debug("msi_bitmap: allocated 0x%x at offset 0x%x\n", num, offset);
return offset;
+err:
+ spin_unlock_irqrestore(&bmp->lock, flags);
+ return -ENOMEM;
}
+EXPORT_SYMBOL(msi_bitmap_alloc_hwirqs);
void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset,
unsigned int num)
{
unsigned long flags;
- int order = get_count_order(num);
- pr_debug("msi_bitmap: freeing 0x%x (2^%d) at offset 0x%x\n",
- num, order, offset);
+ pr_debug("msi_bitmap: freeing 0x%x at offset 0x%x\n",
+ num, offset);
spin_lock_irqsave(&bmp->lock, flags);
- bitmap_release_region(bmp->bitmap, offset, order);
+ bitmap_clear(bmp->bitmap, offset, num);
spin_unlock_irqrestore(&bmp->lock, flags);
}
+EXPORT_SYMBOL(msi_bitmap_free_hwirqs);
void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq)
{
@@ -143,7 +148,7 @@ void msi_bitmap_free(struct msi_bitmap *bmp)
#define check(x) \
if (!(x)) printk("msi_bitmap: test failed at line %d\n", __LINE__);
-void __init test_basics(void)
+static void __init test_basics(void)
{
struct msi_bitmap bmp;
int i, size = 512;
@@ -180,6 +185,15 @@ void __init test_basics(void)
msi_bitmap_free_hwirqs(&bmp, size / 2, 1);
check(msi_bitmap_alloc_hwirqs(&bmp, 1) == size / 2);
+ /* Check we get a naturally aligned offset */
+ check(msi_bitmap_alloc_hwirqs(&bmp, 2) % 2 == 0);
+ check(msi_bitmap_alloc_hwirqs(&bmp, 4) % 4 == 0);
+ check(msi_bitmap_alloc_hwirqs(&bmp, 8) % 8 == 0);
+ check(msi_bitmap_alloc_hwirqs(&bmp, 9) % 16 == 0);
+ check(msi_bitmap_alloc_hwirqs(&bmp, 3) % 4 == 0);
+ check(msi_bitmap_alloc_hwirqs(&bmp, 7) % 8 == 0);
+ check(msi_bitmap_alloc_hwirqs(&bmp, 121) % 128 == 0);
+
msi_bitmap_free(&bmp);
/* Clients may check bitmap == NULL for "not-allocated" */
@@ -188,7 +202,7 @@ void __init test_basics(void)
kfree(bmp.bitmap);
}
-void __init test_of_node(void)
+static void __init test_of_node(void)
{
u32 prop_data[] = { 10, 10, 25, 3, 40, 1, 100, 100, 200, 20 };
const char *expected_str = "0-9,20-24,28-39,41-99,220-255";
@@ -236,7 +250,7 @@ void __init test_of_node(void)
kfree(bmp.bitmap);
}
-int __init msi_bitmap_selftest(void)
+static int __init msi_bitmap_selftest(void)
{
printk(KERN_DEBUG "Running MSI bitmap self-tests ...\n");
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index c2dba7db71ad..026bbc3b2c47 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -23,7 +23,7 @@
/* These functions provide the necessary setup for the mv64x60 drivers. */
-static struct of_device_id __initdata of_mv64x60_devices[] = {
+static const struct of_device_id of_mv64x60_devices[] __initconst = {
{ .compatible = "marvell,mv64306-devctrl", },
{}
};
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
index 5aaf86c03893..13e67d93a7c1 100644
--- a/arch/powerpc/sysdev/pmi.c
+++ b/arch/powerpc/sysdev/pmi.c
@@ -101,7 +101,7 @@ out:
}
-static struct of_device_id pmi_match[] = {
+static const struct of_device_id pmi_match[] = {
{ .type = "ibm,pmi", .name = "ibm,pmi" },
{ .type = "ibm,pmi" },
{},
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index de8d9483bbe8..2fc4cf1b7557 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -155,6 +155,31 @@ static void icp_native_cause_ipi(int cpu, unsigned long data)
icp_native_set_qirr(cpu, IPI_PRIORITY);
}
+/*
+ * Called when an interrupt is received on an off-line CPU to
+ * clear the interrupt, so that the CPU can go back to nap mode.
+ */
+void icp_native_flush_interrupt(void)
+{
+ unsigned int xirr = icp_native_get_xirr();
+ unsigned int vec = xirr & 0x00ffffff;
+
+ if (vec == XICS_IRQ_SPURIOUS)
+ return;
+ if (vec == XICS_IPI) {
+ /* Clear pending IPI */
+ int cpu = smp_processor_id();
+ kvmppc_set_host_ipi(cpu, 0);
+ icp_native_set_qirr(cpu, 0xff);
+ } else {
+ pr_err("XICS: hw interrupt 0x%x to offline cpu, disabling\n",
+ vec);
+ xics_mask_unknown_vec(vec);
+ }
+ /* EOI the interrupt */
+ icp_native_set_xirr(xirr);
+}
+
void xics_wake_cpu(int cpu)
{
icp_native_set_qirr(cpu, IPI_PRIORITY);
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 83f943a8e0db..56f0524e47a6 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -265,7 +265,7 @@ static void __init xilinx_i8259_setup_cascade(void)
static inline void xilinx_i8259_setup_cascade(void) { return; }
#endif /* defined(CONFIG_PPC_I8259) */
-static struct of_device_id xilinx_intc_match[] __initconst = {
+static const struct of_device_id xilinx_intc_match[] __initconst = {
{ .compatible = "xlnx,opb-intc-1.00.c", },
{ .compatible = "xlnx,xps-intc-1.00.a", },
{}
diff --git a/arch/powerpc/sysdev/xilinx_pci.c b/arch/powerpc/sysdev/xilinx_pci.c
index 1453b0eed220..fea5667699ed 100644
--- a/arch/powerpc/sysdev/xilinx_pci.c
+++ b/arch/powerpc/sysdev/xilinx_pci.c
@@ -27,7 +27,7 @@
#define PCI_HOST_ENABLE_CMD PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
-static struct of_device_id xilinx_pci_match[] = {
+static const struct of_device_id xilinx_pci_match[] = {
{ .compatible = "xlnx,plbv46-pci-1.03.a", },
{}
};
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 05c78bb5f570..f2cf1f90295b 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -58,6 +58,9 @@ config NO_IOPORT_MAP
config PCI_QUIRKS
def_bool n
+config ARCH_SUPPORTS_UPROBES
+ def_bool 64BIT
+
config S390
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
@@ -97,6 +100,7 @@ config S390
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS2
+ select DYNAMIC_FTRACE if FUNCTION_TRACER
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES if !SMP
select GENERIC_FIND_FIRST_BIT
@@ -113,10 +117,11 @@ config S390
select HAVE_CMPXCHG_LOCAL
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
- select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE if 64BIT
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT
select HAVE_FTRACE_MCOUNT_RECORD
- select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
+ select HAVE_FUNCTION_TRACER if 64BIT
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
@@ -496,8 +501,8 @@ config QDIO
menuconfig PCI
bool "PCI support"
- default n
depends on 64BIT
+ select HAVE_DMA_ATTRS
select PCI_MSI
help
Enable PCI support.
@@ -544,9 +549,6 @@ config HAS_DMA
config NEED_SG_DMA_LENGTH
def_bool PCI
-config HAVE_DMA_ATTRS
- def_bool PCI
-
config NEED_DMA_MAP_STATE
def_bool PCI
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 874e6d6e9c5f..878e67973151 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -35,13 +35,16 @@ endif
export LD_BFD
-cflags-$(CONFIG_MARCH_G5) += -march=g5
-cflags-$(CONFIG_MARCH_Z900) += -march=z900
-cflags-$(CONFIG_MARCH_Z990) += -march=z990
-cflags-$(CONFIG_MARCH_Z9_109) += -march=z9-109
-cflags-$(CONFIG_MARCH_Z10) += -march=z10
-cflags-$(CONFIG_MARCH_Z196) += -march=z196
-cflags-$(CONFIG_MARCH_ZEC12) += -march=zEC12
+mflags-$(CONFIG_MARCH_G5) := -march=g5
+mflags-$(CONFIG_MARCH_Z900) := -march=z900
+mflags-$(CONFIG_MARCH_Z990) := -march=z990
+mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
+mflags-$(CONFIG_MARCH_Z10) := -march=z10
+mflags-$(CONFIG_MARCH_Z196) := -march=z196
+mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12
+
+aflags-y += $(mflags-y)
+cflags-y += $(mflags-y)
cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5
cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 19ff956b752b..b5dce6544d76 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -15,11 +15,13 @@
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
/* Fast-BCR without checkpoint synchronization */
-#define mb() do { asm volatile("bcr 14,0" : : : "memory"); } while (0)
+#define __ASM_BARRIER "bcr 14,0\n"
#else
-#define mb() do { asm volatile("bcr 15,0" : : : "memory"); } while (0)
+#define __ASM_BARRIER "bcr 15,0\n"
#endif
+#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
+
#define rmb() mb()
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index f65bd3634519..f8c196984853 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -8,8 +8,6 @@
#define _S390_CPUTIME_H
#include <linux/types.h>
-#include <linux/percpu.h>
-#include <linux/spinlock.h>
#include <asm/div64.h>
@@ -18,6 +16,8 @@
typedef unsigned long long __nocast cputime_t;
typedef unsigned long long __nocast cputime64_t;
+#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
+
static inline unsigned long __div(unsigned long long n, unsigned long base)
{
#ifndef CONFIG_64BIT
@@ -165,28 +165,8 @@ static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
return clock;
}
-struct s390_idle_data {
- int nohz_delay;
- unsigned int sequence;
- unsigned long long idle_count;
- unsigned long long idle_time;
- unsigned long long clock_idle_enter;
- unsigned long long clock_idle_exit;
- unsigned long long timer_idle_enter;
- unsigned long long timer_idle_exit;
-};
-
-DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
-
-cputime64_t s390_get_idle_time(int cpu);
-
-#define arch_idle_time(cpu) s390_get_idle_time(cpu)
-
-static inline int s390_nohz_delay(int cpu)
-{
- return __get_cpu_var(s390_idle).nohz_delay != 0;
-}
+cputime64_t arch_cpu_idle_time(int cpu);
-#define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
+#define arch_idle_time(cpu) arch_cpu_idle_time(cpu)
#endif /* _S390_CPUTIME_H */
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
index 04a83f5773cd..60323c21938b 100644
--- a/arch/s390/include/asm/dis.h
+++ b/arch/s390/include/asm/dis.h
@@ -13,12 +13,13 @@
#define OPERAND_FPR 0x2 /* Operand printed as %fx */
#define OPERAND_AR 0x4 /* Operand printed as %ax */
#define OPERAND_CR 0x8 /* Operand printed as %cx */
-#define OPERAND_DISP 0x10 /* Operand printed as displacement */
-#define OPERAND_BASE 0x20 /* Operand printed as base register */
-#define OPERAND_INDEX 0x40 /* Operand printed as index register */
-#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */
-#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */
-#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */
+#define OPERAND_VR 0x10 /* Operand printed as %vx */
+#define OPERAND_DISP 0x20 /* Operand printed as displacement */
+#define OPERAND_BASE 0x40 /* Operand printed as base register */
+#define OPERAND_INDEX 0x80 /* Operand printed as index register */
+#define OPERAND_PCREL 0x100 /* Operand printed as pc-relative symbol */
+#define OPERAND_SIGNED 0x200 /* Operand printed as signed value */
+#define OPERAND_LENGTH 0x400 /* Operand printed as length (+1) */
struct s390_operand {
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 3fbc67d9e197..709955ddaa4d 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -56,24 +56,35 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return dma_addr == DMA_ERROR_CODE;
}
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
- void *ret;
+ void *cpu_addr;
+
+ BUG_ON(!ops);
- ret = ops->alloc(dev, size, dma_handle, flag, NULL);
- debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
- return ret;
+ cpu_addr = ops->alloc(dev, size, dma_handle, flags, attrs);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+
+ return cpu_addr;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
- struct dma_map_ops *dma_ops = get_dma_ops(dev);
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!ops);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- dma_ops->free(dev, size, cpu_addr, dma_handle, NULL);
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
#endif /* _ASM_S390_DMA_MAPPING_H */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 78f4f8711d58..f6e43d39e3d8 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -102,6 +102,7 @@
#define HWCAP_S390_ETF3EH 256
#define HWCAP_S390_HIGH_GPRS 512
#define HWCAP_S390_TE 1024
+#define HWCAP_S390_VXRS 2048
/*
* These are used to set parameters in the core dumps.
@@ -225,6 +226,6 @@ int arch_setup_additional_pages(struct linux_binprm *, int);
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
-void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
+void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
#endif
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index bf246dae1367..3aef8afec336 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -4,6 +4,7 @@
#ifndef __ASSEMBLY__
extern void _mcount(void);
+extern char ftrace_graph_caller_end;
struct dyn_arch_ftrace { };
@@ -17,10 +18,8 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
#endif /* __ASSEMBLY__ */
-#ifdef CONFIG_64BIT
-#define MCOUNT_INSN_SIZE 12
-#else
-#define MCOUNT_INSN_SIZE 22
-#endif
+#define MCOUNT_INSN_SIZE 18
+
+#define ARCH_SUPPORTS_FTRACE_OPS 1
#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
new file mode 100644
index 000000000000..6af037f574b8
--- /dev/null
+++ b/arch/s390/include/asm/idle.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright IBM Corp. 2014
+ *
+ * Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef _S390_IDLE_H
+#define _S390_IDLE_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct s390_idle_data {
+ unsigned int sequence;
+ unsigned long long idle_count;
+ unsigned long long idle_time;
+ unsigned long long clock_idle_enter;
+ unsigned long long clock_idle_exit;
+ unsigned long long timer_idle_enter;
+ unsigned long long timer_idle_exit;
+};
+
+extern struct device_attribute dev_attr_idle_count;
+extern struct device_attribute dev_attr_idle_time_us;
+
+#endif /* _S390_IDLE_H */
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index c81661e756a0..ece606c2ee86 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -89,12 +89,12 @@ extern u32 ipl_flags;
extern u32 dump_prefix_page;
struct dump_save_areas {
- struct save_area **areas;
+ struct save_area_ext **areas;
int count;
};
extern struct dump_save_areas dump_save_areas;
-struct save_area *dump_save_area_create(int cpu);
+struct save_area_ext *dump_save_area_create(int cpu);
extern void do_reipl(void);
extern void do_halt(void);
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index c4dd400a2791..e787cc1bff8f 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -51,6 +51,7 @@ enum interruption_class {
IRQEXT_CMS,
IRQEXT_CMC,
IRQEXT_CMR,
+ IRQEXT_FTP,
IRQIO_CIO,
IRQIO_QAI,
IRQIO_DAS,
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 4176dfe0fba1..98629173ce3b 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -84,6 +84,10 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
+int probe_is_prohibited_opcode(u16 *insn);
+int probe_get_fixup_type(u16 *insn);
+int probe_is_insn_relative_long(u16 *insn);
+
#define flush_insn_slot(p) do { } while (0)
#endif /* _ASM_S390_KPROBES_H */
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 4349197ab9df..6cc51fe84410 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <asm/ptrace.h>
#include <asm/cpu.h>
+#include <asm/types.h>
#ifdef CONFIG_32BIT
@@ -31,6 +32,11 @@ struct save_area {
u32 ctrl_regs[16];
} __packed;
+struct save_area_ext {
+ struct save_area sa;
+ __vector128 vx_regs[32];
+};
+
struct _lowcore {
psw_t restart_psw; /* 0x0000 */
psw_t restart_old_psw; /* 0x0008 */
@@ -183,6 +189,11 @@ struct save_area {
u64 ctrl_regs[16];
} __packed;
+struct save_area_ext {
+ struct save_area sa;
+ __vector128 vx_regs[32];
+};
+
struct _lowcore {
__u8 pad_0x0000[0x0014-0x0000]; /* 0x0000 */
__u32 ipl_parmblock_ptr; /* 0x0014 */
@@ -310,7 +321,10 @@ struct _lowcore {
/* Extended facility list */
__u64 stfle_fac_list[32]; /* 0x0f00 */
- __u8 pad_0x1000[0x11b8-0x1000]; /* 0x1000 */
+ __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
+
+ /* Pointer to vector register save area */
+ __u64 vector_save_area_addr; /* 0x11b0 */
/* 64 bit extparam used for pfault/diag 250: defined by architecture */
__u64 ext_params2; /* 0x11B8 */
@@ -334,9 +348,10 @@ struct _lowcore {
/* Transaction abort diagnostic block */
__u8 pgm_tdb[256]; /* 0x1800 */
+ __u8 pad_0x1900[0x1c00-0x1900]; /* 0x1900 */
- /* align to the top of the prefix area */
- __u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */
+ /* Software defined save area for vector registers */
+ __u8 vector_save_area[1024]; /* 0x1c00 */
} __packed;
#endif /* CONFIG_32BIT */
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index 35f8ec185616..3027a5a72b74 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -38,7 +38,7 @@ struct mci {
__u32 pm : 1; /* 22 psw program mask and cc validity */
__u32 ia : 1; /* 23 psw instruction address validity */
__u32 fa : 1; /* 24 failing storage address validity */
- __u32 : 1; /* 25 */
+ __u32 vr : 1; /* 25 vector register validity */
__u32 ec : 1; /* 26 external damage code validity */
__u32 fp : 1; /* 27 floating point register validity */
__u32 gr : 1; /* 28 general register validity */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index b7054356cc98..57c882761dea 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -217,7 +217,6 @@ extern unsigned long MODULES_END;
*/
/* Hardware bits in the page table entry */
-#define _PAGE_CO 0x100 /* HW Change-bit override */
#define _PAGE_PROTECT 0x200 /* HW read-only bit */
#define _PAGE_INVALID 0x400 /* HW invalid bit */
#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
@@ -234,8 +233,8 @@ extern unsigned long MODULES_END;
#define __HAVE_ARCH_PTE_SPECIAL
/* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
- _PAGE_DIRTY | _PAGE_YOUNG)
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
+ _PAGE_YOUNG)
/*
* handle_pte_fault uses pte_present, pte_none and pte_file to find out the
@@ -354,7 +353,6 @@ extern unsigned long MODULES_END;
#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
-#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
/* Bits in the segment table entry */
#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
@@ -371,7 +369,6 @@ extern unsigned long MODULES_END;
#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
#define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */
#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
-#define _SEGMENT_ENTRY_CO 0x0100 /* change-recording override */
#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
@@ -873,8 +870,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pgste = pgste_set_pte(ptep, pgste, entry);
pgste_set_unlock(ptep, pgste);
} else {
- if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1)
- pte_val(entry) |= _PAGE_CO;
*ptep = entry;
}
}
@@ -1044,6 +1039,22 @@ static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
: "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
}
+static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
+{
+ unsigned long pto = (unsigned long) ptep;
+
+#ifndef CONFIG_64BIT
+ /* pto in ESA mode must point to the start of the segment table */
+ pto &= 0x7ffffc00;
+#endif
+ /* Invalidate a range of ptes + global TLB flush of the ptes */
+ do {
+ asm volatile(
+ " .insn rrf,0xb2210000,%2,%0,%1,0"
+ : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
+ } while (nr != 255);
+}
+
static inline void ptep_flush_direct(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index e568fc8a7250..d559bdb03d18 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -13,9 +13,11 @@
#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
#define CIF_ASCE 1 /* user asce needs fixup / uaccess */
+#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING)
#define _CIF_ASCE (1<<CIF_ASCE)
+#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY)
#ifndef __ASSEMBLY__
@@ -43,6 +45,8 @@ static inline int test_cpu_flag(int flag)
return !!(S390_lowcore.cpu_flags & (1U << flag));
}
+#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
+
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
@@ -113,6 +117,7 @@ struct thread_struct {
int ri_signum;
#ifdef CONFIG_64BIT
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
+ __vector128 *vxrs; /* Vector register save area */
#endif
};
@@ -285,7 +290,12 @@ static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
return (psw.addr - ilc) & mask;
#endif
}
-
+
+/*
+ * Function to stop a processor until the next interrupt occurs
+ */
+void enabled_wait(void);
+
/*
* Function to drop a processor into disabled wait state
*/
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 55d69dd7473c..be317feff7ac 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -161,6 +161,12 @@ static inline long regs_return_value(struct pt_regs *regs)
return regs->gprs[2];
}
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->psw.addr = val | PSW_ADDR_AMODE;
+}
+
int regs_query_register_offset(const char *name);
const char *regs_query_register_name(unsigned int offset);
unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 089a49814c50..7736fdd72595 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -55,8 +55,8 @@ extern void detect_memory_memblock(void);
#define MACHINE_FLAG_LPP (1UL << 13)
#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
#define MACHINE_FLAG_TE (1UL << 15)
-#define MACHINE_FLAG_RRBM (1UL << 16)
#define MACHINE_FLAG_TLB_LC (1UL << 17)
+#define MACHINE_FLAG_VX (1UL << 18)
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -78,8 +78,8 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_LPP (0)
#define MACHINE_HAS_TOPOLOGY (0)
#define MACHINE_HAS_TE (0)
-#define MACHINE_HAS_RRBM (0)
#define MACHINE_HAS_TLB_LC (0)
+#define MACHINE_HAS_VX (0)
#else /* CONFIG_64BIT */
#define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1)
@@ -91,8 +91,8 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
-#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
+#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
#endif /* CONFIG_64BIT */
/*
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index bf9c823d4020..49576115dbb7 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -15,6 +15,7 @@
#define SIGP_SET_ARCHITECTURE 18
#define SIGP_COND_EMERGENCY_SIGNAL 19
#define SIGP_SENSE_RUNNING 21
+#define SIGP_STORE_ADDITIONAL_STATUS 23
/* SIGP condition codes */
#define SIGP_CC_ORDER_CODE_ACCEPTED 0
@@ -33,9 +34,10 @@
#ifndef __ASSEMBLY__
-static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
+static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
+ u32 *status)
{
- register unsigned int reg1 asm ("1") = parm;
+ register unsigned long reg1 asm ("1") = parm;
int cc;
asm volatile(
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 4f1307962a95..762d4f88af5a 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -29,7 +29,6 @@ extern int smp_find_processor_id(u16 address);
extern int smp_store_status(int cpu);
extern int smp_vcpu_scheduled(int cpu);
extern void smp_yield_cpu(int cpu);
-extern void smp_yield(void);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
extern void smp_fill_possible_mask(void);
@@ -50,7 +49,6 @@ static inline int smp_find_processor_id(u16 address) { return 0; }
static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
-static inline void smp_yield(void) { }
static inline void smp_fill_possible_mask(void) { }
#endif /* CONFIG_SMP */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 96879f7ad6da..d6bdf906caa5 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -37,11 +37,17 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
* (the type definitions are in asm/spinlock_types.h)
*/
+void arch_lock_relax(unsigned int cpu);
+
void arch_spin_lock_wait(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *);
-void arch_spin_relax(arch_spinlock_t *);
void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
+static inline void arch_spin_relax(arch_spinlock_t *lock)
+{
+ arch_lock_relax(lock->lock);
+}
+
static inline u32 arch_spin_lockval(int cpu)
{
return ~cpu;
@@ -64,11 +70,6 @@ static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
_raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
}
-static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
-{
- return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0);
-}
-
static inline void arch_spin_lock(arch_spinlock_t *lp)
{
if (!arch_spin_trylock_once(lp))
@@ -91,7 +92,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp)
static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
- arch_spin_tryrelease_once(lp);
+ typecheck(unsigned int, lp->lock);
+ asm volatile(
+ __ASM_BARRIER
+ "st %1,%0\n"
+ : "+Q" (lp->lock)
+ : "d" (0)
+ : "cc", "memory");
}
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
@@ -123,13 +130,12 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
*/
#define arch_write_can_lock(x) ((x)->lock == 0)
-extern void _raw_read_lock_wait(arch_rwlock_t *lp);
-extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
-extern void _raw_write_lock_wait(arch_rwlock_t *lp);
-extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
static inline int arch_read_trylock_once(arch_rwlock_t *rw)
{
unsigned int old = ACCESS_ONCE(rw->lock);
@@ -144,16 +150,82 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
_raw_compare_and_swap(&rw->lock, 0, 0x80000000));
}
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __RAW_OP_OR "lao"
+#define __RAW_OP_AND "lan"
+#define __RAW_OP_ADD "laa"
+
+#define __RAW_LOCK(ptr, op_val, op_string) \
+({ \
+ unsigned int old_val; \
+ \
+ typecheck(unsigned int *, ptr); \
+ asm volatile( \
+ op_string " %0,%2,%1\n" \
+ "bcr 14,0\n" \
+ : "=d" (old_val), "+Q" (*ptr) \
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+#define __RAW_UNLOCK(ptr, op_val, op_string) \
+({ \
+ unsigned int old_val; \
+ \
+ typecheck(unsigned int *, ptr); \
+ asm volatile( \
+ "bcr 14,0\n" \
+ op_string " %0,%2,%1\n" \
+ : "=d" (old_val), "+Q" (*ptr) \
+ : "d" (op_val) \
+ : "cc", "memory"); \
+ old_val; \
+})
+
+extern void _raw_read_lock_wait(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
+
static inline void arch_read_lock(arch_rwlock_t *rw)
{
- if (!arch_read_trylock_once(rw))
+ unsigned int old;
+
+ old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
+ if ((int) old < 0)
_raw_read_lock_wait(rw);
}
-static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned int old;
+
+ old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
+ if (old != 0)
+ _raw_write_lock_wait(rw, old);
+ rw->owner = SPINLOCK_LOCKVAL;
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ rw->owner = 0;
+ __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
+}
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+extern void _raw_read_lock_wait(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp);
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
if (!arch_read_trylock_once(rw))
- _raw_read_lock_wait_flags(rw, flags);
+ _raw_read_lock_wait(rw);
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -169,19 +241,24 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
{
if (!arch_write_trylock_once(rw))
_raw_write_lock_wait(rw);
-}
-
-static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
-{
- if (!arch_write_trylock_once(rw))
- _raw_write_lock_wait_flags(rw, flags);
+ rw->owner = SPINLOCK_LOCKVAL;
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
+ typecheck(unsigned int, rw->lock);
+
+ rw->owner = 0;
+ asm volatile(
+ __ASM_BARRIER
+ "st %1,%0\n"
+ : "+Q" (rw->lock)
+ : "d" (0)
+ : "cc", "memory");
}
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
if (!arch_read_trylock_once(rw))
@@ -191,12 +268,20 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- if (!arch_write_trylock_once(rw))
- return _raw_write_trylock_retry(rw);
+ if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
+ return 0;
+ rw->owner = SPINLOCK_LOCKVAL;
return 1;
}
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
+static inline void arch_read_relax(arch_rwlock_t *rw)
+{
+ arch_lock_relax(rw->owner);
+}
+
+static inline void arch_write_relax(arch_rwlock_t *rw)
+{
+ arch_lock_relax(rw->owner);
+}
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index b2cd6ff7c2c5..d84b6939237c 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -13,6 +13,7 @@ typedef struct {
typedef struct {
unsigned int lock;
+ unsigned int owner;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 18ea9e3f8142..2542a7e4c8b4 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -103,6 +103,61 @@ static inline void restore_fp_regs(freg_t *fprs)
asm volatile("ld 15,%0" : : "Q" (fprs[15]));
}
+static inline void save_vx_regs(__vector128 *vxrs)
+{
+ typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
+
+ asm volatile(
+ " la 1,%0\n"
+ " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
+ " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
+ : "=Q" (*(addrtype *) vxrs) : : "1");
+}
+
+static inline void save_vx_regs_safe(__vector128 *vxrs)
+{
+ unsigned long cr0, flags;
+
+ flags = arch_local_irq_save();
+ __ctl_store(cr0, 0, 0);
+ __ctl_set_bit(0, 17);
+ __ctl_set_bit(0, 18);
+ save_vx_regs(vxrs);
+ __ctl_load(cr0, 0, 0);
+ arch_local_irq_restore(flags);
+}
+
+static inline void restore_vx_regs(__vector128 *vxrs)
+{
+ typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
+
+ asm volatile(
+ " la 1,%0\n"
+ " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
+ " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
+ : : "Q" (*(addrtype *) vxrs) : "1");
+}
+
+static inline void save_fp_vx_regs(struct task_struct *task)
+{
+#ifdef CONFIG_64BIT
+ if (task->thread.vxrs)
+ save_vx_regs(task->thread.vxrs);
+ else
+#endif
+ save_fp_regs(task->thread.fp_regs.fprs);
+}
+
+static inline void restore_fp_vx_regs(struct task_struct *task)
+{
+#ifdef CONFIG_64BIT
+ if (task->thread.vxrs)
+ restore_vx_regs(task->thread.vxrs);
+ else
+#endif
+ restore_fp_regs(task->thread.fp_regs.fprs);
+}
+
static inline void save_access_regs(unsigned int *acrs)
{
typedef struct { int _[NUM_ACRS]; } acrstype;
@@ -120,16 +175,16 @@ static inline void restore_access_regs(unsigned int *acrs)
#define switch_to(prev,next,last) do { \
if (prev->mm) { \
save_fp_ctl(&prev->thread.fp_regs.fpc); \
- save_fp_regs(prev->thread.fp_regs.fprs); \
+ save_fp_vx_regs(prev); \
save_access_regs(&prev->thread.acrs[0]); \
save_ri_cb(prev->thread.ri_cb); \
} \
if (next->mm) { \
+ update_cr_regs(next); \
restore_fp_ctl(&next->thread.fp_regs.fpc); \
- restore_fp_regs(next->thread.fp_regs.fprs); \
+ restore_fp_vx_regs(next); \
restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
- update_cr_regs(next); \
} \
prev = __switch_to(prev,next); \
} while (0)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index b833e9c0bfbf..4d62fd5b56e5 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -84,11 +84,13 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
#define TIF_SECCOMP 5 /* secure computing */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+#define TIF_UPROBE 7 /* breakpointed or single-stepping */
#define TIF_31BIT 16 /* 32bit process */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
#define TIF_SINGLE_STEP 19 /* This task is single stepped */
#define TIF_BLOCK_STEP 20 /* This task is block stepped */
+#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -97,6 +99,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_UPROBE (1<<TIF_UPROBE)
#define _TIF_31BIT (1<<TIF_31BIT)
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
diff --git a/arch/s390/include/asm/uprobes.h b/arch/s390/include/asm/uprobes.h
new file mode 100644
index 000000000000..1411dff7fea7
--- /dev/null
+++ b/arch/s390/include/asm/uprobes.h
@@ -0,0 +1,42 @@
+/*
+ * User-space Probes (UProbes) for s390
+ *
+ * Copyright IBM Corp. 2014
+ * Author(s): Jan Willeke,
+ */
+
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+
+#include <linux/notifier.h>
+
+typedef u16 uprobe_opcode_t;
+
+#define UPROBE_XOL_SLOT_BYTES 256 /* cache aligned */
+
+#define UPROBE_SWBP_INSN 0x0002
+#define UPROBE_SWBP_INSN_SIZE 2
+
+struct arch_uprobe {
+ union{
+ uprobe_opcode_t insn[3];
+ uprobe_opcode_t ixol[3];
+ };
+ unsigned int saved_per : 1;
+ unsigned int saved_int_code;
+};
+
+struct arch_uprobe_task {
+};
+
+int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm,
+ unsigned long addr);
+int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
+int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
+ void *data);
+void arch_uprobe_abort_xol(struct arch_uprobe *ap, struct pt_regs *regs);
+unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
+ struct pt_regs *regs);
+#endif /* _ASM_UPROBES_H */
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index bc9746a7d47c..a62526d09201 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -22,13 +22,17 @@ struct vdso_data {
__u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */
__u64 xtime_clock_sec; /* Kernel time 0x10 */
__u64 xtime_clock_nsec; /* 0x18 */
- __u64 wtom_clock_sec; /* Wall to monotonic clock 0x20 */
- __u64 wtom_clock_nsec; /* 0x28 */
- __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
- __u32 tz_dsttime; /* Type of dst correction 0x34 */
- __u32 ectg_available; /* ECTG instruction present 0x38 */
- __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */
- __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */
+ __u64 xtime_coarse_sec; /* Coarse kernel time 0x20 */
+ __u64 xtime_coarse_nsec; /* 0x28 */
+ __u64 wtom_clock_sec; /* Wall to monotonic clock 0x30 */
+ __u64 wtom_clock_nsec; /* 0x38 */
+ __u64 wtom_coarse_sec; /* Coarse wall to monotonic 0x40 */
+ __u64 wtom_coarse_nsec; /* 0x48 */
+ __u32 tz_minuteswest; /* Minutes west of Greenwich 0x50 */
+ __u32 tz_dsttime; /* Type of dst correction 0x54 */
+ __u32 ectg_available; /* ECTG instruction present 0x58 */
+ __u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */
+ __u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
};
struct vdso_per_cpu_data {
diff --git a/arch/s390/include/asm/vtimer.h b/arch/s390/include/asm/vtimer.h
index bfe25d513ad2..10a179af62d8 100644
--- a/arch/s390/include/asm/vtimer.h
+++ b/arch/s390/include/asm/vtimer.h
@@ -28,6 +28,4 @@ extern int del_virt_timer(struct vtimer_list *timer);
extern void init_cpu_vtimer(void);
extern void vtime_init(void);
-extern void vtime_stop_cpu(void);
-
#endif /* _ASM_S390_TIMER_H */
diff --git a/arch/s390/include/uapi/asm/sigcontext.h b/arch/s390/include/uapi/asm/sigcontext.h
index b30de9c01bbe..5f0b8d7ddb0b 100644
--- a/arch/s390/include/uapi/asm/sigcontext.h
+++ b/arch/s390/include/uapi/asm/sigcontext.h
@@ -7,10 +7,14 @@
#define _ASM_S390_SIGCONTEXT_H
#include <linux/compiler.h>
+#include <linux/types.h>
-#define __NUM_GPRS 16
-#define __NUM_FPRS 16
-#define __NUM_ACRS 16
+#define __NUM_GPRS 16
+#define __NUM_FPRS 16
+#define __NUM_ACRS 16
+#define __NUM_VXRS 32
+#define __NUM_VXRS_LOW 16
+#define __NUM_VXRS_HIGH 16
#ifndef __s390x__
@@ -59,6 +63,16 @@ typedef struct
_s390_fp_regs fpregs;
} _sigregs;
+typedef struct
+{
+#ifndef __s390x__
+ unsigned long gprs_high[__NUM_GPRS];
+#endif
+ unsigned long long vxrs_low[__NUM_VXRS_LOW];
+ __vector128 vxrs_high[__NUM_VXRS_HIGH];
+ unsigned char __reserved[128];
+} _sigregs_ext;
+
struct sigcontext
{
unsigned long oldmask[_SIGCONTEXT_NSIG_WORDS];
diff --git a/arch/s390/include/uapi/asm/types.h b/arch/s390/include/uapi/asm/types.h
index 038f2b9178a4..3c3951e3415b 100644
--- a/arch/s390/include/uapi/asm/types.h
+++ b/arch/s390/include/uapi/asm/types.h
@@ -17,6 +17,10 @@
typedef unsigned long addr_t;
typedef __signed__ long saddr_t;
+typedef struct {
+ __u32 u[4];
+} __vector128;
+
#endif /* __ASSEMBLY__ */
#endif /* _UAPI_S390_TYPES_H */
diff --git a/arch/s390/include/uapi/asm/ucontext.h b/arch/s390/include/uapi/asm/ucontext.h
index 3e077b2a4705..64a69aa5dde0 100644
--- a/arch/s390/include/uapi/asm/ucontext.h
+++ b/arch/s390/include/uapi/asm/ucontext.h
@@ -7,10 +7,15 @@
#ifndef _ASM_S390_UCONTEXT_H
#define _ASM_S390_UCONTEXT_H
-#define UC_EXTENDED 0x00000001
-
-#ifndef __s390x__
+#define UC_GPRS_HIGH 1 /* uc_mcontext_ext has valid high gprs */
+#define UC_VXRS 2 /* uc_mcontext_ext has valid vector regs */
+/*
+ * The struct ucontext_extended describes how the registers are stored
+ * on a rt signal frame. Please note that the structure is not fixed,
+ * if new CPU registers are added to the user state the size of the
+ * struct ucontext_extended will increase.
+ */
struct ucontext_extended {
unsigned long uc_flags;
struct ucontext *uc_link;
@@ -19,11 +24,9 @@ struct ucontext_extended {
sigset_t uc_sigmask;
/* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
unsigned char __unused[128 - sizeof(sigset_t)];
- unsigned long uc_gprs_high[16];
+ _sigregs_ext uc_mcontext_ext;
};
-#endif
-
struct ucontext {
unsigned long uc_flags;
struct ucontext *uc_link;
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index a95c4ca99617..204c43a4c245 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -28,7 +28,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
-obj-y := traps.o time.o process.o base.o early.o setup.o vtime.o
+obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
@@ -52,11 +52,9 @@ obj-$(CONFIG_COMPAT) += compat_wrapper.o $(compat-obj-y)
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o)
-obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
-obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
+obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_UPROBES) += uprobes.o
ifdef CONFIG_64BIT
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index afe1715a4eb7..ef279a136801 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -9,7 +9,7 @@
#include <linux/kbuild.h>
#include <linux/kvm_host.h>
#include <linux/sched.h>
-#include <asm/cputime.h>
+#include <asm/idle.h>
#include <asm/vdso.h>
#include <asm/pgtable.h>
@@ -62,8 +62,12 @@ int main(void)
DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp));
DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec));
DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
+ DEFINE(__VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec));
+ DEFINE(__VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec));
DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
+ DEFINE(__VDSO_WTOM_CRS_SEC, offsetof(struct vdso_data, wtom_coarse_sec));
+ DEFINE(__VDSO_WTOM_CRS_NSEC, offsetof(struct vdso_data, wtom_coarse_nsec));
DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
@@ -73,8 +77,11 @@ int main(void)
/* constants used by the vdso */
DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
+ DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
+ DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
+ DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
BLANK();
/* idle data offsets */
DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter));
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 70d4b7c4beaa..a0a886c04977 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -50,6 +50,14 @@ typedef struct
_s390_fp_regs32 fpregs;
} _sigregs32;
+typedef struct
+{
+ __u32 gprs_high[__NUM_GPRS];
+ __u64 vxrs_low[__NUM_VXRS_LOW];
+ __vector128 vxrs_high[__NUM_VXRS_HIGH];
+ __u8 __reserved[128];
+} _sigregs_ext32;
+
#define _SIGCONTEXT_NSIG32 64
#define _SIGCONTEXT_NSIG_BPW32 32
#define __SIGNAL_FRAMESIZE32 96
@@ -72,6 +80,7 @@ struct ucontext32 {
compat_sigset_t uc_sigmask;
/* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
unsigned char __unused[128 - sizeof(compat_sigset_t)];
+ _sigregs_ext32 uc_mcontext_ext;
};
struct stat64_emu31;
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 598b0b42668b..009f5eb11125 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -36,17 +36,16 @@ typedef struct
struct sigcontext32 sc;
_sigregs32 sregs;
int signo;
- __u32 gprs_high[NUM_GPRS];
- __u8 retcode[S390_SYSCALL_SIZE];
+ _sigregs_ext32 sregs_ext;
+ __u16 svc_insn; /* Offset of svc_insn is NOT fixed! */
} sigframe32;
typedef struct
{
__u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
- __u8 retcode[S390_SYSCALL_SIZE];
+ __u16 svc_insn;
compat_siginfo_t info;
struct ucontext32 uc;
- __u32 gprs_high[NUM_GPRS];
} rt_sigframe32;
int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
@@ -151,6 +150,38 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
return err ? -EFAULT : 0;
}
+/* Store registers needed to create the signal frame */
+static void store_sigregs(void)
+{
+ int i;
+
+ save_access_regs(current->thread.acrs);
+ save_fp_ctl(&current->thread.fp_regs.fpc);
+ if (current->thread.vxrs) {
+ save_vx_regs(current->thread.vxrs);
+ for (i = 0; i < __NUM_FPRS; i++)
+ current->thread.fp_regs.fprs[i] =
+ *(freg_t *)(current->thread.vxrs + i);
+ } else
+ save_fp_regs(current->thread.fp_regs.fprs);
+}
+
+/* Load registers after signal return */
+static void load_sigregs(void)
+{
+ int i;
+
+ restore_access_regs(current->thread.acrs);
+ /* restore_fp_ctl is done in restore_sigregs */
+ if (current->thread.vxrs) {
+ for (i = 0; i < __NUM_FPRS; i++)
+ *(freg_t *)(current->thread.vxrs + i) =
+ current->thread.fp_regs.fprs[i];
+ restore_vx_regs(current->thread.vxrs);
+ } else
+ restore_fp_regs(current->thread.fp_regs.fprs);
+}
+
static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
{
_sigregs32 user_sregs;
@@ -163,11 +194,8 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
(__u32)(regs->psw.mask & PSW_MASK_BA);
for (i = 0; i < NUM_GPRS; i++)
user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
- save_access_regs(current->thread.acrs);
memcpy(&user_sregs.regs.acrs, current->thread.acrs,
sizeof(user_sregs.regs.acrs));
- save_fp_ctl(&current->thread.fp_regs.fpc);
- save_fp_regs(current->thread.fp_regs.fprs);
memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
sizeof(user_sregs.fpregs));
if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
@@ -207,37 +235,67 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
sizeof(current->thread.acrs));
- restore_access_regs(current->thread.acrs);
memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
sizeof(current->thread.fp_regs));
- restore_fp_regs(current->thread.fp_regs.fprs);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
return 0;
}
-static int save_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs)
+static int save_sigregs_ext32(struct pt_regs *regs,
+ _sigregs_ext32 __user *sregs_ext)
{
__u32 gprs_high[NUM_GPRS];
+ __u64 vxrs[__NUM_VXRS_LOW];
int i;
+ /* Save high gprs to signal stack */
for (i = 0; i < NUM_GPRS; i++)
gprs_high[i] = regs->gprs[i] >> 32;
- if (__copy_to_user(uregs, &gprs_high, sizeof(gprs_high)))
+ if (__copy_to_user(&sregs_ext->gprs_high, &gprs_high,
+ sizeof(sregs_ext->gprs_high)))
return -EFAULT;
+
+ /* Save vector registers to signal stack */
+ if (current->thread.vxrs) {
+ for (i = 0; i < __NUM_VXRS_LOW; i++)
+ vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1);
+ if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
+ sizeof(sregs_ext->vxrs_low)) ||
+ __copy_to_user(&sregs_ext->vxrs_high,
+ current->thread.vxrs + __NUM_VXRS_LOW,
+ sizeof(sregs_ext->vxrs_high)))
+ return -EFAULT;
+ }
return 0;
}
-static int restore_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs)
+static int restore_sigregs_ext32(struct pt_regs *regs,
+ _sigregs_ext32 __user *sregs_ext)
{
__u32 gprs_high[NUM_GPRS];
+ __u64 vxrs[__NUM_VXRS_LOW];
int i;
- if (__copy_from_user(&gprs_high, uregs, sizeof(gprs_high)))
+ /* Restore high gprs from signal stack */
+ if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
+ sizeof(&sregs_ext->gprs_high)))
return -EFAULT;
for (i = 0; i < NUM_GPRS; i++)
*(__u32 *)&regs->gprs[i] = gprs_high[i];
+
+ /* Restore vector registers from signal stack */
+ if (current->thread.vxrs) {
+ if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
+ sizeof(sregs_ext->vxrs_low)) ||
+ __copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW,
+ &sregs_ext->vxrs_high,
+ sizeof(sregs_ext->vxrs_high)))
+ return -EFAULT;
+ for (i = 0; i < __NUM_VXRS_LOW; i++)
+ *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
+ }
return 0;
}
@@ -252,8 +310,9 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
set_current_blocked(&set);
if (restore_sigregs32(regs, &frame->sregs))
goto badframe;
- if (restore_sigregs_gprs_high(regs, frame->gprs_high))
+ if (restore_sigregs_ext32(regs, &frame->sregs_ext))
goto badframe;
+ load_sigregs();
return regs->gprs[2];
badframe:
force_sig(SIGSEGV, current);
@@ -269,12 +328,13 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
set_current_blocked(&set);
+ if (compat_restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
goto badframe;
- if (restore_sigregs_gprs_high(regs, frame->gprs_high))
+ if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
goto badframe;
- if (compat_restore_altstack(&frame->uc.uc_stack))
- goto badframe;
+ load_sigregs();
return regs->gprs[2];
badframe:
force_sig(SIGSEGV, current);
@@ -324,37 +384,64 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
int sig = ksig->sig;
- sigframe32 __user *frame = get_sigframe(&ksig->ka, regs, sizeof(sigframe32));
-
+ sigframe32 __user *frame;
+ struct sigcontext32 sc;
+ unsigned long restorer;
+ size_t frame_size;
+
+ /*
+ * gprs_high are always present for 31-bit compat tasks.
+ * The space for vector registers is only allocated if
+ * the machine supports it
+ */
+ frame_size = sizeof(*frame) - sizeof(frame->sregs_ext.__reserved);
+ if (!MACHINE_HAS_VX)
+ frame_size -= sizeof(frame->sregs_ext.vxrs_low) +
+ sizeof(frame->sregs_ext.vxrs_high);
+ frame = get_sigframe(&ksig->ka, regs, frame_size);
if (frame == (void __user *) -1UL)
return -EFAULT;
- if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32))
+ /* Set up backchain. */
+ if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
+ return -EFAULT;
+
+ /* Create struct sigcontext32 on the signal stack */
+ memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32);
+ sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
+ if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
return -EFAULT;
+ /* Store registers needed to create the signal frame */
+ store_sigregs();
+
+ /* Create _sigregs32 on the signal stack */
if (save_sigregs32(regs, &frame->sregs))
return -EFAULT;
- if (save_sigregs_gprs_high(regs, frame->gprs_high))
+
+ /* Place signal number on stack to allow backtrace from handler. */
+ if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
return -EFAULT;
- if (__put_user((unsigned long) &frame->sregs, &frame->sc.sregs))
+
+ /* Create _sigregs_ext32 on the signal stack */
+ if (save_sigregs_ext32(regs, &frame->sregs_ext))
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
- regs->gprs[14] = (__u64 __force) ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
+ restorer = (unsigned long __force)
+ ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
} else {
- regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE;
- if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn,
- (u16 __force __user *)(frame->retcode)))
+ /* Signal frames without vectors registers are short ! */
+ __u16 __user *svc = (void *) frame + frame_size - 2;
+ if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
return -EFAULT;
+ restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
}
- /* Set up backchain. */
- if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
- return -EFAULT;
-
/* Set up registers for signal handler */
+ regs->gprs[14] = restorer;
regs->gprs[15] = (__force __u64) frame;
/* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA |
@@ -375,50 +462,69 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
regs->gprs[6] = task_thread_info(current)->last_break;
}
- /* Place signal number on stack to allow backtrace from handler. */
- if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
- return -EFAULT;
return 0;
}
static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
- int err = 0;
- rt_sigframe32 __user *frame = get_sigframe(&ksig->ka, regs, sizeof(rt_sigframe32));
-
+ rt_sigframe32 __user *frame;
+ unsigned long restorer;
+ size_t frame_size;
+ u32 uc_flags;
+
+ frame_size = sizeof(*frame) -
+ sizeof(frame->uc.uc_mcontext_ext.__reserved);
+ /*
+ * gprs_high are always present for 31-bit compat tasks.
+ * The space for vector registers is only allocated if
+ * the machine supports it
+ */
+ uc_flags = UC_GPRS_HIGH;
+ if (MACHINE_HAS_VX) {
+ if (current->thread.vxrs)
+ uc_flags |= UC_VXRS;
+ } else
+ frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) +
+ sizeof(frame->uc.uc_mcontext_ext.vxrs_high);
+ frame = get_sigframe(&ksig->ka, regs, frame_size);
if (frame == (void __user *) -1UL)
return -EFAULT;
- if (copy_siginfo_to_user32(&frame->info, &ksig->info))
- return -EFAULT;
-
- /* Create the ucontext. */
- err |= __put_user(UC_EXTENDED, &frame->uc.uc_flags);
- err |= __put_user(0, &frame->uc.uc_link);
- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]);
- err |= save_sigregs32(regs, &frame->uc.uc_mcontext);
- err |= save_sigregs_gprs_high(regs, frame->gprs_high);
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
- if (err)
+ /* Set up backchain. */
+ if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame))
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
- regs->gprs[14] = (__u64 __force) ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
+ restorer = (unsigned long __force)
+ ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
} else {
- regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE;
- if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
- (u16 __force __user *)(frame->retcode)))
+ __u16 __user *svc = &frame->svc_insn;
+ if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc))
return -EFAULT;
+ restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
}
- /* Set up backchain. */
- if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame))
+ /* Create siginfo on the signal stack */
+ if (copy_siginfo_to_user32(&frame->info, &ksig->info))
+ return -EFAULT;
+
+ /* Store registers needed to create the signal frame */
+ store_sigregs();
+
+ /* Create ucontext on the signal stack. */
+ if (__put_user(uc_flags, &frame->uc.uc_flags) ||
+ __put_user(0, &frame->uc.uc_link) ||
+ __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
+ save_sigregs32(regs, &frame->uc.uc_mcontext) ||
+ __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
+ save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
return -EFAULT;
/* Set up registers for signal handler */
+ regs->gprs[14] = restorer;
regs->gprs[15] = (__force __u64) frame;
/* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA |
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index a3b9150e6802..9f73c8059022 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -46,9 +46,9 @@ struct dump_save_areas dump_save_areas;
/*
* Allocate and add a save area for a CPU
*/
-struct save_area *dump_save_area_create(int cpu)
+struct save_area_ext *dump_save_area_create(int cpu)
{
- struct save_area **save_areas, *save_area;
+ struct save_area_ext **save_areas, *save_area;
save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
if (!save_area)
@@ -386,9 +386,45 @@ static void *nt_s390_prefix(void *ptr, struct save_area *sa)
}
/*
+ * Initialize vxrs high note (full 128 bit VX registers 16-31)
+ */
+static void *nt_s390_vx_high(void *ptr, __vector128 *vx_regs)
+{
+ return nt_init(ptr, NT_S390_VXRS_HIGH, &vx_regs[16],
+ 16 * sizeof(__vector128), KEXEC_CORE_NOTE_NAME);
+}
+
+/*
+ * Initialize vxrs low note (lower halves of VX registers 0-15)
+ */
+static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs)
+{
+ Elf64_Nhdr *note;
+ u64 len;
+ int i;
+
+ note = (Elf64_Nhdr *)ptr;
+ note->n_namesz = strlen(KEXEC_CORE_NOTE_NAME) + 1;
+ note->n_descsz = 16 * 8;
+ note->n_type = NT_S390_VXRS_LOW;
+ len = sizeof(Elf64_Nhdr);
+
+ memcpy(ptr + len, KEXEC_CORE_NOTE_NAME, note->n_namesz);
+ len = roundup(len + note->n_namesz, 4);
+
+ ptr += len;
+ /* Copy lower halves of SIMD registers 0-15 */
+ for (i = 0; i < 16; i++) {
+ memcpy(ptr, &vx_regs[i], 8);
+ ptr += 8;
+ }
+ return ptr;
+}
+
+/*
* Fill ELF notes for one CPU with save area registers
*/
-void *fill_cpu_elf_notes(void *ptr, struct save_area *sa)
+void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vx_regs)
{
ptr = nt_prstatus(ptr, sa);
ptr = nt_fpregset(ptr, sa);
@@ -397,6 +433,10 @@ void *fill_cpu_elf_notes(void *ptr, struct save_area *sa)
ptr = nt_s390_tod_preg(ptr, sa);
ptr = nt_s390_ctrs(ptr, sa);
ptr = nt_s390_prefix(ptr, sa);
+ if (MACHINE_HAS_VX && vx_regs) {
+ ptr = nt_s390_vx_low(ptr, vx_regs);
+ ptr = nt_s390_vx_high(ptr, vx_regs);
+ }
return ptr;
}
@@ -484,7 +524,7 @@ static int get_cpu_cnt(void)
int i, cpus = 0;
for (i = 0; i < dump_save_areas.count; i++) {
- if (dump_save_areas.areas[i]->pref_reg == 0)
+ if (dump_save_areas.areas[i]->sa.pref_reg == 0)
continue;
cpus++;
}
@@ -530,17 +570,17 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
*/
static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
{
- struct save_area *sa;
+ struct save_area_ext *sa_ext;
void *ptr_start = ptr;
int i;
ptr = nt_prpsinfo(ptr);
for (i = 0; i < dump_save_areas.count; i++) {
- sa = dump_save_areas.areas[i];
- if (sa->pref_reg == 0)
+ sa_ext = dump_save_areas.areas[i];
+ if (sa_ext->sa.pref_reg == 0)
continue;
- ptr = fill_cpu_elf_notes(ptr, sa);
+ ptr = fill_cpu_elf_notes(ptr, &sa_ext->sa, sa_ext->vx_regs);
}
ptr = nt_vmcoreinfo(ptr);
memset(phdr, 0, sizeof(*phdr));
@@ -581,7 +621,7 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
mem_chunk_cnt = get_mem_chunk_cnt();
- alloc_size = 0x1000 + get_cpu_cnt() * 0x300 +
+ alloc_size = 0x1000 + get_cpu_cnt() * 0x4a0 +
mem_chunk_cnt * sizeof(Elf64_Phdr);
hdr = kzalloc_panic(alloc_size);
/* Init elf header */
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 993efe6a887c..f3762937dd82 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -60,6 +60,11 @@ enum {
A_28, /* Access reg. starting at position 28 */
C_8, /* Control reg. starting at position 8 */
C_12, /* Control reg. starting at position 12 */
+ V_8, /* Vector reg. starting at position 8, extension bit at 36 */
+ V_12, /* Vector reg. starting at position 12, extension bit at 37 */
+ V_16, /* Vector reg. starting at position 16, extension bit at 38 */
+ V_32, /* Vector reg. starting at position 32, extension bit at 39 */
+ W_12, /* Vector reg. at bit 12, extension at bit 37, used as index */
B_16, /* Base register starting at position 16 */
B_32, /* Base register starting at position 32 */
X_12, /* Index register starting at position 12 */
@@ -82,6 +87,8 @@ enum {
U8_24, /* 8 bit unsigned value starting at 24 */
U8_32, /* 8 bit unsigned value starting at 32 */
I8_8, /* 8 bit signed value starting at 8 */
+ I8_16, /* 8 bit signed value starting at 16 */
+ I8_24, /* 8 bit signed value starting at 24 */
I8_32, /* 8 bit signed value starting at 32 */
J12_12, /* PC relative offset at 12 */
I16_16, /* 16 bit signed value starting at 16 */
@@ -96,6 +103,9 @@ enum {
U32_16, /* 32 bit unsigned value starting at 16 */
M_16, /* 4 bit optional mask starting at 16 */
M_20, /* 4 bit optional mask starting at 20 */
+ M_24, /* 4 bit optional mask starting at 24 */
+ M_28, /* 4 bit optional mask starting at 28 */
+ M_32, /* 4 bit optional mask starting at 32 */
RO_28, /* optional GPR starting at position 28 */
};
@@ -130,7 +140,7 @@ enum {
INSTR_RSY_RDRM,
INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
INSTR_RS_RURD,
- INSTR_RXE_FRRD, INSTR_RXE_RRRD,
+ INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM,
INSTR_RXF_FRRDF,
INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD,
INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD,
@@ -143,6 +153,17 @@ enum {
INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
INSTR_S_00, INSTR_S_RD,
+ INSTR_VRI_V0IM, INSTR_VRI_V0I0, INSTR_VRI_V0IIM, INSTR_VRI_VVIM,
+ INSTR_VRI_VVV0IM, INSTR_VRI_VVV0I0, INSTR_VRI_VVIMM,
+ INSTR_VRR_VV00MMM, INSTR_VRR_VV000MM, INSTR_VRR_VV0000M,
+ INSTR_VRR_VV00000, INSTR_VRR_VVV0M0M, INSTR_VRR_VV00M0M,
+ INSTR_VRR_VVV000M, INSTR_VRR_VVV000V, INSTR_VRR_VVV0000,
+ INSTR_VRR_VVV0MMM, INSTR_VRR_VVV00MM, INSTR_VRR_VVVMM0V,
+ INSTR_VRR_VVVM0MV, INSTR_VRR_VVVM00V, INSTR_VRR_VRR0000,
+ INSTR_VRS_VVRDM, INSTR_VRS_VVRD0, INSTR_VRS_VRRDM, INSTR_VRS_VRRD0,
+ INSTR_VRS_RVRDM,
+ INSTR_VRV_VVRDM, INSTR_VRV_VWRDM,
+ INSTR_VRX_VRRDM, INSTR_VRX_VRRD0,
};
static const struct s390_operand operands[] =
@@ -168,6 +189,11 @@ static const struct s390_operand operands[] =
[A_28] = { 4, 28, OPERAND_AR },
[C_8] = { 4, 8, OPERAND_CR },
[C_12] = { 4, 12, OPERAND_CR },
+ [V_8] = { 4, 8, OPERAND_VR },
+ [V_12] = { 4, 12, OPERAND_VR },
+ [V_16] = { 4, 16, OPERAND_VR },
+ [V_32] = { 4, 32, OPERAND_VR },
+ [W_12] = { 4, 12, OPERAND_INDEX | OPERAND_VR },
[B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR },
[B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR },
[X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR },
@@ -190,6 +216,11 @@ static const struct s390_operand operands[] =
[U8_24] = { 8, 24, 0 },
[U8_32] = { 8, 32, 0 },
[J12_12] = { 12, 12, OPERAND_PCREL },
+ [I8_8] = { 8, 8, OPERAND_SIGNED },
+ [I8_16] = { 8, 16, OPERAND_SIGNED },
+ [I8_24] = { 8, 24, OPERAND_SIGNED },
+ [I8_32] = { 8, 32, OPERAND_SIGNED },
+ [I16_32] = { 16, 32, OPERAND_SIGNED },
[I16_16] = { 16, 16, OPERAND_SIGNED },
[U16_16] = { 16, 16, 0 },
[U16_32] = { 16, 32, 0 },
@@ -202,6 +233,9 @@ static const struct s390_operand operands[] =
[U32_16] = { 32, 16, 0 },
[M_16] = { 4, 16, 0 },
[M_20] = { 4, 20, 0 },
+ [M_24] = { 4, 24, 0 },
+ [M_28] = { 4, 28, 0 },
+ [M_32] = { 4, 32, 0 },
[RO_28] = { 4, 28, OPERAND_GPR }
};
@@ -283,6 +317,7 @@ static const unsigned char formats[][7] = {
[INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
[INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
[INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
+ [INSTR_RXE_RRRDM] = { 0xff, R_8,D_20,X_12,B_16,M_32,0 },
[INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 },
[INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },
[INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },
@@ -307,6 +342,37 @@ static const unsigned char formats[][7] = {
[INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
[INSTR_S_00] = { 0xff, 0,0,0,0,0,0 },
[INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 },
+ [INSTR_VRI_V0IM] = { 0xff, V_8,I16_16,M_32,0,0,0 },
+ [INSTR_VRI_V0I0] = { 0xff, V_8,I16_16,0,0,0,0 },
+ [INSTR_VRI_V0IIM] = { 0xff, V_8,I8_16,I8_24,M_32,0,0 },
+ [INSTR_VRI_VVIM] = { 0xff, V_8,I16_16,V_12,M_32,0,0 },
+ [INSTR_VRI_VVV0IM]= { 0xff, V_8,V_12,V_16,I8_24,M_32,0 },
+ [INSTR_VRI_VVV0I0]= { 0xff, V_8,V_12,V_16,I8_24,0,0 },
+ [INSTR_VRI_VVIMM] = { 0xff, V_8,V_12,I16_16,M_32,M_28,0 },
+ [INSTR_VRR_VV00MMM]={ 0xff, V_8,V_12,M_32,M_28,M_24,0 },
+ [INSTR_VRR_VV000MM]={ 0xff, V_8,V_12,M_32,M_28,0,0 },
+ [INSTR_VRR_VV0000M]={ 0xff, V_8,V_12,M_32,0,0,0 },
+ [INSTR_VRR_VV00000]={ 0xff, V_8,V_12,0,0,0,0 },
+ [INSTR_VRR_VVV0M0M]={ 0xff, V_8,V_12,V_16,M_32,M_24,0 },
+ [INSTR_VRR_VV00M0M]={ 0xff, V_8,V_12,M_32,M_24,0,0 },
+ [INSTR_VRR_VVV000M]={ 0xff, V_8,V_12,V_16,M_32,0,0 },
+ [INSTR_VRR_VVV000V]={ 0xff, V_8,V_12,V_16,V_32,0,0 },
+ [INSTR_VRR_VVV0000]={ 0xff, V_8,V_12,V_16,0,0,0 },
+ [INSTR_VRR_VVV0MMM]={ 0xff, V_8,V_12,V_16,M_32,M_28,M_24 },
+ [INSTR_VRR_VVV00MM]={ 0xff, V_8,V_12,V_16,M_32,M_28,0 },
+ [INSTR_VRR_VVVMM0V]={ 0xff, V_8,V_12,V_16,V_32,M_20,M_24 },
+ [INSTR_VRR_VVVM0MV]={ 0xff, V_8,V_12,V_16,V_32,M_28,M_20 },
+ [INSTR_VRR_VVVM00V]={ 0xff, V_8,V_12,V_16,V_32,M_20,0 },
+ [INSTR_VRR_VRR0000]={ 0xff, V_8,R_12,R_16,0,0,0 },
+ [INSTR_VRS_VVRDM] = { 0xff, V_8,V_12,D_20,B_16,M_32,0 },
+ [INSTR_VRS_VVRD0] = { 0xff, V_8,V_12,D_20,B_16,0,0 },
+ [INSTR_VRS_VRRDM] = { 0xff, V_8,R_12,D_20,B_16,M_32,0 },
+ [INSTR_VRS_VRRD0] = { 0xff, V_8,R_12,D_20,B_16,0,0 },
+ [INSTR_VRS_RVRDM] = { 0xff, R_8,V_12,D_20,B_16,M_32,0 },
+ [INSTR_VRV_VVRDM] = { 0xff, V_8,V_12,D_20,B_16,M_32,0 },
+ [INSTR_VRV_VWRDM] = { 0xff, V_8,D_20,W_12,B_16,M_32,0 },
+ [INSTR_VRX_VRRDM] = { 0xff, V_8,D_20,X_12,B_16,M_32,0 },
+ [INSTR_VRX_VRRD0] = { 0xff, V_8,D_20,X_12,B_16,0,0 },
};
enum {
@@ -381,6 +447,11 @@ enum {
LONG_INSN_MPCIFC,
LONG_INSN_STPCIFC,
LONG_INSN_PCISTB,
+ LONG_INSN_VPOPCT,
+ LONG_INSN_VERLLV,
+ LONG_INSN_VESRAV,
+ LONG_INSN_VESRLV,
+ LONG_INSN_VSBCBI
};
static char *long_insn_name[] = {
@@ -455,6 +526,11 @@ static char *long_insn_name[] = {
[LONG_INSN_MPCIFC] = "mpcifc",
[LONG_INSN_STPCIFC] = "stpcifc",
[LONG_INSN_PCISTB] = "pcistb",
+ [LONG_INSN_VPOPCT] = "vpopct",
+ [LONG_INSN_VERLLV] = "verllv",
+ [LONG_INSN_VESRAV] = "vesrav",
+ [LONG_INSN_VESRLV] = "vesrlv",
+ [LONG_INSN_VSBCBI] = "vsbcbi",
};
static struct s390_insn opcode[] = {
@@ -1369,6 +1445,150 @@ static struct s390_insn opcode_e5[] = {
{ "", 0, INSTR_INVALID }
};
+static struct s390_insn opcode_e7[] = {
+#ifdef CONFIG_64BIT
+ { "lcbb", 0x27, INSTR_RXE_RRRDM },
+ { "vgef", 0x13, INSTR_VRV_VVRDM },
+ { "vgeg", 0x12, INSTR_VRV_VVRDM },
+ { "vgbm", 0x44, INSTR_VRI_V0I0 },
+ { "vgm", 0x46, INSTR_VRI_V0IIM },
+ { "vl", 0x06, INSTR_VRX_VRRD0 },
+ { "vlr", 0x56, INSTR_VRR_VV00000 },
+ { "vlrp", 0x05, INSTR_VRX_VRRDM },
+ { "vleb", 0x00, INSTR_VRX_VRRDM },
+ { "vleh", 0x01, INSTR_VRX_VRRDM },
+ { "vlef", 0x03, INSTR_VRX_VRRDM },
+ { "vleg", 0x02, INSTR_VRX_VRRDM },
+ { "vleib", 0x40, INSTR_VRI_V0IM },
+ { "vleih", 0x41, INSTR_VRI_V0IM },
+ { "vleif", 0x43, INSTR_VRI_V0IM },
+ { "vleig", 0x42, INSTR_VRI_V0IM },
+ { "vlgv", 0x21, INSTR_VRS_RVRDM },
+ { "vllez", 0x04, INSTR_VRX_VRRDM },
+ { "vlm", 0x36, INSTR_VRS_VVRD0 },
+ { "vlbb", 0x07, INSTR_VRX_VRRDM },
+ { "vlvg", 0x22, INSTR_VRS_VRRDM },
+ { "vlvgp", 0x62, INSTR_VRR_VRR0000 },
+ { "vll", 0x37, INSTR_VRS_VRRD0 },
+ { "vmrh", 0x61, INSTR_VRR_VVV000M },
+ { "vmrl", 0x60, INSTR_VRR_VVV000M },
+ { "vpk", 0x94, INSTR_VRR_VVV000M },
+ { "vpks", 0x97, INSTR_VRR_VVV0M0M },
+ { "vpkls", 0x95, INSTR_VRR_VVV0M0M },
+ { "vperm", 0x8c, INSTR_VRR_VVV000V },
+ { "vpdi", 0x84, INSTR_VRR_VVV000M },
+ { "vrep", 0x4d, INSTR_VRI_VVIM },
+ { "vrepi", 0x45, INSTR_VRI_V0IM },
+ { "vscef", 0x1b, INSTR_VRV_VWRDM },
+ { "vsceg", 0x1a, INSTR_VRV_VWRDM },
+ { "vsel", 0x8d, INSTR_VRR_VVV000V },
+ { "vseg", 0x5f, INSTR_VRR_VV0000M },
+ { "vst", 0x0e, INSTR_VRX_VRRD0 },
+ { "vsteb", 0x08, INSTR_VRX_VRRDM },
+ { "vsteh", 0x09, INSTR_VRX_VRRDM },
+ { "vstef", 0x0b, INSTR_VRX_VRRDM },
+ { "vsteg", 0x0a, INSTR_VRX_VRRDM },
+ { "vstm", 0x3e, INSTR_VRS_VVRD0 },
+ { "vstl", 0x3f, INSTR_VRS_VRRD0 },
+ { "vuph", 0xd7, INSTR_VRR_VV0000M },
+ { "vuplh", 0xd5, INSTR_VRR_VV0000M },
+ { "vupl", 0xd6, INSTR_VRR_VV0000M },
+ { "vupll", 0xd4, INSTR_VRR_VV0000M },
+ { "va", 0xf3, INSTR_VRR_VVV000M },
+ { "vacc", 0xf1, INSTR_VRR_VVV000M },
+ { "vac", 0xbb, INSTR_VRR_VVVM00V },
+ { "vaccc", 0xb9, INSTR_VRR_VVVM00V },
+ { "vn", 0x68, INSTR_VRR_VVV0000 },
+ { "vnc", 0x69, INSTR_VRR_VVV0000 },
+ { "vavg", 0xf2, INSTR_VRR_VVV000M },
+ { "vavgl", 0xf0, INSTR_VRR_VVV000M },
+ { "vcksm", 0x66, INSTR_VRR_VVV0000 },
+ { "vec", 0xdb, INSTR_VRR_VV0000M },
+ { "vecl", 0xd9, INSTR_VRR_VV0000M },
+ { "vceq", 0xf8, INSTR_VRR_VVV0M0M },
+ { "vch", 0xfb, INSTR_VRR_VVV0M0M },
+ { "vchl", 0xf9, INSTR_VRR_VVV0M0M },
+ { "vclz", 0x53, INSTR_VRR_VV0000M },
+ { "vctz", 0x52, INSTR_VRR_VV0000M },
+ { "vx", 0x6d, INSTR_VRR_VVV0000 },
+ { "vgfm", 0xb4, INSTR_VRR_VVV000M },
+ { "vgfma", 0xbc, INSTR_VRR_VVVM00V },
+ { "vlc", 0xde, INSTR_VRR_VV0000M },
+ { "vlp", 0xdf, INSTR_VRR_VV0000M },
+ { "vmx", 0xff, INSTR_VRR_VVV000M },
+ { "vmxl", 0xfd, INSTR_VRR_VVV000M },
+ { "vmn", 0xfe, INSTR_VRR_VVV000M },
+ { "vmnl", 0xfc, INSTR_VRR_VVV000M },
+ { "vmal", 0xaa, INSTR_VRR_VVVM00V },
+ { "vmae", 0xae, INSTR_VRR_VVVM00V },
+ { "vmale", 0xac, INSTR_VRR_VVVM00V },
+ { "vmah", 0xab, INSTR_VRR_VVVM00V },
+ { "vmalh", 0xa9, INSTR_VRR_VVVM00V },
+ { "vmao", 0xaf, INSTR_VRR_VVVM00V },
+ { "vmalo", 0xad, INSTR_VRR_VVVM00V },
+ { "vmh", 0xa3, INSTR_VRR_VVV000M },
+ { "vmlh", 0xa1, INSTR_VRR_VVV000M },
+ { "vml", 0xa2, INSTR_VRR_VVV000M },
+ { "vme", 0xa6, INSTR_VRR_VVV000M },
+ { "vmle", 0xa4, INSTR_VRR_VVV000M },
+ { "vmo", 0xa7, INSTR_VRR_VVV000M },
+ { "vmlo", 0xa5, INSTR_VRR_VVV000M },
+ { "vno", 0x6b, INSTR_VRR_VVV0000 },
+ { "vo", 0x6a, INSTR_VRR_VVV0000 },
+ { { 0, LONG_INSN_VPOPCT }, 0x50, INSTR_VRR_VV0000M },
+ { { 0, LONG_INSN_VERLLV }, 0x73, INSTR_VRR_VVV000M },
+ { "verll", 0x33, INSTR_VRS_VVRDM },
+ { "verim", 0x72, INSTR_VRI_VVV0IM },
+ { "veslv", 0x70, INSTR_VRR_VVV000M },
+ { "vesl", 0x30, INSTR_VRS_VVRDM },
+ { { 0, LONG_INSN_VESRAV }, 0x7a, INSTR_VRR_VVV000M },
+ { "vesra", 0x3a, INSTR_VRS_VVRDM },
+ { { 0, LONG_INSN_VESRLV }, 0x78, INSTR_VRR_VVV000M },
+ { "vesrl", 0x38, INSTR_VRS_VVRDM },
+ { "vsl", 0x74, INSTR_VRR_VVV0000 },
+ { "vslb", 0x75, INSTR_VRR_VVV0000 },
+ { "vsldb", 0x77, INSTR_VRI_VVV0I0 },
+ { "vsra", 0x7e, INSTR_VRR_VVV0000 },
+ { "vsrab", 0x7f, INSTR_VRR_VVV0000 },
+ { "vsrl", 0x7c, INSTR_VRR_VVV0000 },
+ { "vsrlb", 0x7d, INSTR_VRR_VVV0000 },
+ { "vs", 0xf7, INSTR_VRR_VVV000M },
+ { "vscb", 0xf5, INSTR_VRR_VVV000M },
+ { "vsb", 0xbf, INSTR_VRR_VVVM00V },
+ { { 0, LONG_INSN_VSBCBI }, 0xbd, INSTR_VRR_VVVM00V },
+ { "vsumg", 0x65, INSTR_VRR_VVV000M },
+ { "vsumq", 0x67, INSTR_VRR_VVV000M },
+ { "vsum", 0x64, INSTR_VRR_VVV000M },
+ { "vtm", 0xd8, INSTR_VRR_VV00000 },
+ { "vfae", 0x82, INSTR_VRR_VVV0M0M },
+ { "vfee", 0x80, INSTR_VRR_VVV0M0M },
+ { "vfene", 0x81, INSTR_VRR_VVV0M0M },
+ { "vistr", 0x5c, INSTR_VRR_VV00M0M },
+ { "vstrc", 0x8a, INSTR_VRR_VVVMM0V },
+ { "vfa", 0xe3, INSTR_VRR_VVV00MM },
+ { "wfc", 0xcb, INSTR_VRR_VV000MM },
+ { "wfk", 0xca, INSTR_VRR_VV000MM },
+ { "vfce", 0xe8, INSTR_VRR_VVV0MMM },
+ { "vfch", 0xeb, INSTR_VRR_VVV0MMM },
+ { "vfche", 0xea, INSTR_VRR_VVV0MMM },
+ { "vcdg", 0xc3, INSTR_VRR_VV00MMM },
+ { "vcdlg", 0xc1, INSTR_VRR_VV00MMM },
+ { "vcgd", 0xc2, INSTR_VRR_VV00MMM },
+ { "vclgd", 0xc0, INSTR_VRR_VV00MMM },
+ { "vfd", 0xe5, INSTR_VRR_VVV00MM },
+ { "vfi", 0xc7, INSTR_VRR_VV00MMM },
+ { "vlde", 0xc4, INSTR_VRR_VV000MM },
+ { "vled", 0xc5, INSTR_VRR_VV00MMM },
+ { "vfm", 0xe7, INSTR_VRR_VVV00MM },
+ { "vfma", 0x8f, INSTR_VRR_VVVM0MV },
+ { "vfms", 0x8e, INSTR_VRR_VVVM0MV },
+ { "vfpso", 0xcc, INSTR_VRR_VV00MMM },
+ { "vfsq", 0xce, INSTR_VRR_VV000MM },
+ { "vfs", 0xe2, INSTR_VRR_VVV00MM },
+ { "vftci", 0x4a, INSTR_VRI_VVIMM },
+#endif
+};
+
static struct s390_insn opcode_eb[] = {
#ifdef CONFIG_64BIT
{ "lmg", 0x04, INSTR_RSY_RRRD },
@@ -1552,16 +1772,17 @@ static struct s390_insn opcode_ed[] = {
static unsigned int extract_operand(unsigned char *code,
const struct s390_operand *operand)
{
+ unsigned char *cp;
unsigned int val;
int bits;
/* Extract fragments of the operand byte for byte. */
- code += operand->shift / 8;
+ cp = code + operand->shift / 8;
bits = (operand->shift & 7) + operand->bits;
val = 0;
do {
val <<= 8;
- val |= (unsigned int) *code++;
+ val |= (unsigned int) *cp++;
bits -= 8;
} while (bits > 0);
val >>= -bits;
@@ -1571,6 +1792,18 @@ static unsigned int extract_operand(unsigned char *code,
if (operand->bits == 20 && operand->shift == 20)
val = (val & 0xff) << 12 | (val & 0xfff00) >> 8;
+ /* Check for register extensions bits for vector registers. */
+ if (operand->flags & OPERAND_VR) {
+ if (operand->shift == 8)
+ val |= (code[4] & 8) << 1;
+ else if (operand->shift == 12)
+ val |= (code[4] & 4) << 2;
+ else if (operand->shift == 16)
+ val |= (code[4] & 2) << 3;
+ else if (operand->shift == 32)
+ val |= (code[4] & 1) << 4;
+ }
+
/* Sign extend value if the operand is signed or pc relative. */
if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) &&
(val & (1U << (operand->bits - 1))))
@@ -1639,6 +1872,10 @@ struct s390_insn *find_insn(unsigned char *code)
case 0xe5:
table = opcode_e5;
break;
+ case 0xe7:
+ table = opcode_e7;
+ opfrag = code[5];
+ break;
case 0xeb:
table = opcode_eb;
opfrag = code[5];
@@ -1734,6 +1971,8 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
ptr += sprintf(ptr, "%%a%i", value);
else if (operand->flags & OPERAND_CR)
ptr += sprintf(ptr, "%%c%i", value);
+ else if (operand->flags & OPERAND_VR)
+ ptr += sprintf(ptr, "%%v%i", value);
else if (operand->flags & OPERAND_PCREL)
ptr += sprintf(ptr, "%lx", (signed int) value
+ addr);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 0dff972a169c..cef2879edff3 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -390,10 +390,10 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
if (test_facility(50) && test_facility(73))
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
- if (test_facility(66))
- S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM;
if (test_facility(51))
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
+ if (test_facility(129))
+ S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
#endif
}
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 1aad48398d06..0554b9771c9f 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
#include <linux/signal.h>
#include <asm/ptrace.h>
-#include <asm/cputime.h>
+#include <asm/idle.h>
extern void *restart_stack;
extern unsigned long suspend_zero_pages;
@@ -21,6 +21,8 @@ void psw_idle(struct s390_idle_data *, unsigned long);
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
+int alloc_vector_registers(struct task_struct *tsk);
+
void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs);
@@ -43,8 +45,10 @@ void special_op_exception(struct pt_regs *regs);
void specification_exception(struct pt_regs *regs);
void transaction_exception(struct pt_regs *regs);
void translation_exception(struct pt_regs *regs);
+void vector_exception(struct pt_regs *regs);
void do_per_trap(struct pt_regs *regs);
+void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str);
void syscall_trace(struct pt_regs *regs, int entryexit);
void kernel_stack_overflow(struct pt_regs * regs);
void do_signal(struct pt_regs *regs);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index f2e674c702e1..7b2e03afd017 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -42,7 +42,8 @@ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
-_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
+_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
+ _TIF_UPROBE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT)
_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE)
@@ -265,6 +266,10 @@ sysc_work:
jo sysc_mcck_pending
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo sysc_reschedule
+#ifdef CONFIG_UPROBES
+ tm __TI_flags+7(%r12),_TIF_UPROBE
+ jo sysc_uprobe_notify
+#endif
tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
jo sysc_singlestep
tm __TI_flags+7(%r12),_TIF_SIGPENDING
@@ -323,6 +328,16 @@ sysc_notify_resume:
jg do_notify_resume
#
+# _TIF_UPROBE is set, call uprobe_notify_resume
+#
+#ifdef CONFIG_UPROBES
+sysc_uprobe_notify:
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,sysc_return
+ jg uprobe_notify_resume
+#endif
+
+#
# _PIF_PER_TRAP is set, call do_per_trap
#
sysc_singlestep:
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 54d6493c4a56..51d14fe5eb9a 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -1,7 +1,7 @@
/*
* Dynamic function tracer architecture backend.
*
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009,2014
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -17,100 +17,76 @@
#include <asm/asm-offsets.h>
#include "entry.h"
-#ifdef CONFIG_DYNAMIC_FTRACE
-
+void mcount_replace_code(void);
void ftrace_disable_code(void);
void ftrace_enable_insn(void);
-#ifdef CONFIG_64BIT
/*
- * The 64-bit mcount code looks like this:
+ * The mcount code looks like this:
* stg %r14,8(%r15) # offset 0
- * > larl %r1,<&counter> # offset 6
- * > brasl %r14,_mcount # offset 12
+ * larl %r1,<&counter> # offset 6
+ * brasl %r14,_mcount # offset 12
* lg %r14,8(%r15) # offset 18
- * Total length is 24 bytes. The middle two instructions of the mcount
- * block get overwritten by ftrace_make_nop / ftrace_make_call.
- * The 64-bit enabled ftrace code block looks like this:
- * stg %r14,8(%r15) # offset 0
+ * Total length is 24 bytes. The complete mcount block initially gets replaced
+ * by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop
+ * only patch the jg/lg instruction within the block.
+ * Note: we do not patch the first instruction to an unconditional branch,
+ * since that would break kprobes/jprobes. It is easier to leave the larl
+ * instruction in and only modify the second instruction.
+ * The enabled ftrace code block looks like this:
+ * larl %r0,.+24 # offset 0
* > lg %r1,__LC_FTRACE_FUNC # offset 6
- * > lgr %r0,%r0 # offset 12
- * > basr %r14,%r1 # offset 16
- * lg %r14,8(%15) # offset 18
- * The return points of the mcount/ftrace function have the same offset 18.
- * The 64-bit disable ftrace code block looks like this:
- * stg %r14,8(%r15) # offset 0
+ * br %r1 # offset 12
+ * brcl 0,0 # offset 14
+ * brc 0,0 # offset 20
+ * The ftrace function gets called with a non-standard C function call ABI
+ * where r0 contains the return address. It is also expected that the called
+ * function only clobbers r0 and r1, but restores r2-r15.
+ * The return point of the ftrace function has offset 24, so execution
+ * continues behind the mcount block.
+ * larl %r0,.+24 # offset 0
* > jg .+18 # offset 6
- * > lgr %r0,%r0 # offset 12
- * > basr %r14,%r1 # offset 16
- * lg %r14,8(%15) # offset 18
+ * br %r1 # offset 12
+ * brcl 0,0 # offset 14
+ * brc 0,0 # offset 20
* The jg instruction branches to offset 24 to skip as many instructions
* as possible.
*/
asm(
" .align 4\n"
+ "mcount_replace_code:\n"
+ " larl %r0,0f\n"
"ftrace_disable_code:\n"
" jg 0f\n"
- " lgr %r0,%r0\n"
- " basr %r14,%r1\n"
+ " br %r1\n"
+ " brcl 0,0\n"
+ " brc 0,0\n"
"0:\n"
" .align 4\n"
"ftrace_enable_insn:\n"
" lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
+#define MCOUNT_BLOCK_SIZE 24
+#define MCOUNT_INSN_OFFSET 6
#define FTRACE_INSN_SIZE 6
-#else /* CONFIG_64BIT */
-/*
- * The 31-bit mcount code looks like this:
- * st %r14,4(%r15) # offset 0
- * > bras %r1,0f # offset 4
- * > .long _mcount # offset 8
- * > .long <&counter> # offset 12
- * > 0: l %r14,0(%r1) # offset 16
- * > l %r1,4(%r1) # offset 20
- * basr %r14,%r14 # offset 24
- * l %r14,4(%r15) # offset 26
- * Total length is 30 bytes. The twenty bytes starting from offset 4
- * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
- * The 31-bit enabled ftrace code block looks like this:
- * st %r14,4(%r15) # offset 0
- * > l %r14,__LC_FTRACE_FUNC # offset 4
- * > j 0f # offset 8
- * > .fill 12,1,0x07 # offset 12
- * 0: basr %r14,%r14 # offset 24
- * l %r14,4(%r14) # offset 26
- * The return points of the mcount/ftrace function have the same offset 26.
- * The 31-bit disabled ftrace code block looks like this:
- * st %r14,4(%r15) # offset 0
- * > j .+26 # offset 4
- * > j 0f # offset 8
- * > .fill 12,1,0x07 # offset 12
- * 0: basr %r14,%r14 # offset 24
- * l %r14,4(%r14) # offset 26
- * The j instruction branches to offset 30 to skip as many instructions
- * as possible.
- */
-asm(
- " .align 4\n"
- "ftrace_disable_code:\n"
- " j 1f\n"
- " j 0f\n"
- " .fill 12,1,0x07\n"
- "0: basr %r14,%r14\n"
- "1:\n"
- " .align 4\n"
- "ftrace_enable_insn:\n"
- " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n");
-
-#define FTRACE_INSN_SIZE 4
-
-#endif /* CONFIG_64BIT */
-
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ return 0;
+}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
+ /* Initial replacement of the whole mcount block */
+ if (addr == MCOUNT_ADDR) {
+ if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET,
+ mcount_replace_code,
+ MCOUNT_BLOCK_SIZE))
+ return -EPERM;
+ return 0;
+ }
if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
MCOUNT_INSN_SIZE))
return -EPERM;
@@ -135,8 +111,6 @@ int __init ftrace_dyn_arch_init(void)
return 0;
}
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Hook the return address and push it in the stack of return addresses
@@ -162,31 +136,26 @@ out:
return parent;
}
-#ifdef CONFIG_DYNAMIC_FTRACE
/*
* Patch the kernel code at ftrace_graph_caller location. The instruction
- * there is branch relative and save to prepare_ftrace_return. To disable
- * the call to prepare_ftrace_return we patch the bras offset to point
- * directly after the instructions. To enable the call we calculate
- * the original offset to prepare_ftrace_return and put it back.
+ * there is branch relative on condition. To enable the ftrace graph code
+ * block, we simply patch the mask field of the instruction to zero and
+ * turn the instruction into a nop.
+ * To disable the ftrace graph code the mask field will be patched to
+ * all ones, which turns the instruction into an unconditional branch.
*/
int ftrace_enable_ftrace_graph_caller(void)
{
- unsigned short offset;
+ u8 op = 0x04; /* set mask field to zero */
- offset = ((void *) prepare_ftrace_return -
- (void *) ftrace_graph_caller) / 2;
- return probe_kernel_write((void *) ftrace_graph_caller + 2,
- &offset, sizeof(offset));
+ return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
}
int ftrace_disable_ftrace_graph_caller(void)
{
- static unsigned short offset = 0x0002;
+ u8 op = 0xf4; /* set mask field to all ones */
- return probe_kernel_write((void *) ftrace_graph_caller + 2,
- &offset, sizeof(offset));
+ return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
}
-#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index e88d35d74950..d62eee11f0b5 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -398,7 +398,7 @@ ENTRY(startup_kdump)
xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
#ifndef CONFIG_MARCH_G5
# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
- .insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list
+ .insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST
tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
jz 0f
la %r0,1
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
new file mode 100644
index 000000000000..c846aee7372f
--- /dev/null
+++ b/arch/s390/kernel/idle.c
@@ -0,0 +1,124 @@
+/*
+ * Idle functions for s390.
+ *
+ * Copyright IBM Corp. 2014
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/kprobes.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <asm/cputime.h>
+#include <asm/nmi.h>
+#include <asm/smp.h>
+#include "entry.h"
+
+static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
+
+void __kprobes enabled_wait(void)
+{
+ struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
+ unsigned long long idle_time;
+ unsigned long psw_mask;
+
+ trace_hardirqs_on();
+
+ /* Wait for external, I/O or machine check interrupt. */
+ psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
+ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
+ clear_cpu_flag(CIF_NOHZ_DELAY);
+
+ /* Call the assembler magic in entry.S */
+ psw_idle(idle, psw_mask);
+
+ /* Account time spent with enabled wait psw loaded as idle time. */
+ idle->sequence++;
+ smp_wmb();
+ idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
+ idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
+ idle->idle_time += idle_time;
+ idle->idle_count++;
+ account_idle_time(idle_time);
+ smp_wmb();
+ idle->sequence++;
+}
+
+static ssize_t show_idle_count(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
+ unsigned long long idle_count;
+ unsigned int sequence;
+
+ do {
+ sequence = ACCESS_ONCE(idle->sequence);
+ idle_count = ACCESS_ONCE(idle->idle_count);
+ if (ACCESS_ONCE(idle->clock_idle_enter))
+ idle_count++;
+ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
+ return sprintf(buf, "%llu\n", idle_count);
+}
+DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
+
+static ssize_t show_idle_time(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
+ unsigned long long now, idle_time, idle_enter, idle_exit;
+ unsigned int sequence;
+
+ do {
+ now = get_tod_clock();
+ sequence = ACCESS_ONCE(idle->sequence);
+ idle_time = ACCESS_ONCE(idle->idle_time);
+ idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
+ idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
+ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
+ idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
+ return sprintf(buf, "%llu\n", idle_time >> 12);
+}
+DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
+
+cputime64_t arch_cpu_idle_time(int cpu)
+{
+ struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
+ unsigned long long now, idle_enter, idle_exit;
+ unsigned int sequence;
+
+ do {
+ now = get_tod_clock();
+ sequence = ACCESS_ONCE(idle->sequence);
+ idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
+ idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
+ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
+ return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
+}
+
+void arch_cpu_idle_enter(void)
+{
+ local_mcck_disable();
+}
+
+void arch_cpu_idle(void)
+{
+ if (!test_cpu_flag(CIF_MCCK_PENDING))
+ /* Halt the cpu and keep track of cpu time accounting. */
+ enabled_wait();
+ local_irq_enable();
+}
+
+void arch_cpu_idle_exit(void)
+{
+ local_mcck_enable();
+ if (test_cpu_flag(CIF_MCCK_PENDING))
+ s390_handle_mcck();
+}
+
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
+}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 8eb82443cfbd..1b8a38ab7861 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -70,6 +70,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
{.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
{.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
{.irq = IRQEXT_CMR, .name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
+ {.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
{.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
{.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
{.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
@@ -258,7 +259,7 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy)
ext_code = *(struct ext_code *) &regs->int_code;
if (ext_code.code != EXT_IRQ_CLK_COMP)
- __get_cpu_var(s390_idle).nohz_delay = 1;
+ set_cpu_flag(CIF_NOHZ_DELAY);
index = ext_hash(ext_code.code);
rcu_read_lock();
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index bc71a7b95af5..27ae5433fe4d 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -58,161 +58,13 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
.insn_size = MAX_INSN_SIZE,
};
-static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
-{
- if (!is_known_insn((unsigned char *)insn))
- return -EINVAL;
- switch (insn[0] >> 8) {
- case 0x0c: /* bassm */
- case 0x0b: /* bsm */
- case 0x83: /* diag */
- case 0x44: /* ex */
- case 0xac: /* stnsm */
- case 0xad: /* stosm */
- return -EINVAL;
- case 0xc6:
- switch (insn[0] & 0x0f) {
- case 0x00: /* exrl */
- return -EINVAL;
- }
- }
- switch (insn[0]) {
- case 0x0101: /* pr */
- case 0xb25a: /* bsa */
- case 0xb240: /* bakr */
- case 0xb258: /* bsg */
- case 0xb218: /* pc */
- case 0xb228: /* pt */
- case 0xb98d: /* epsw */
- return -EINVAL;
- }
- return 0;
-}
-
-static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
-{
- /* default fixup method */
- int fixup = FIXUP_PSW_NORMAL;
-
- switch (insn[0] >> 8) {
- case 0x05: /* balr */
- case 0x0d: /* basr */
- fixup = FIXUP_RETURN_REGISTER;
- /* if r2 = 0, no branch will be taken */
- if ((insn[0] & 0x0f) == 0)
- fixup |= FIXUP_BRANCH_NOT_TAKEN;
- break;
- case 0x06: /* bctr */
- case 0x07: /* bcr */
- fixup = FIXUP_BRANCH_NOT_TAKEN;
- break;
- case 0x45: /* bal */
- case 0x4d: /* bas */
- fixup = FIXUP_RETURN_REGISTER;
- break;
- case 0x47: /* bc */
- case 0x46: /* bct */
- case 0x86: /* bxh */
- case 0x87: /* bxle */
- fixup = FIXUP_BRANCH_NOT_TAKEN;
- break;
- case 0x82: /* lpsw */
- fixup = FIXUP_NOT_REQUIRED;
- break;
- case 0xb2: /* lpswe */
- if ((insn[0] & 0xff) == 0xb2)
- fixup = FIXUP_NOT_REQUIRED;
- break;
- case 0xa7: /* bras */
- if ((insn[0] & 0x0f) == 0x05)
- fixup |= FIXUP_RETURN_REGISTER;
- break;
- case 0xc0:
- if ((insn[0] & 0x0f) == 0x05) /* brasl */
- fixup |= FIXUP_RETURN_REGISTER;
- break;
- case 0xeb:
- switch (insn[2] & 0xff) {
- case 0x44: /* bxhg */
- case 0x45: /* bxleg */
- fixup = FIXUP_BRANCH_NOT_TAKEN;
- break;
- }
- break;
- case 0xe3: /* bctg */
- if ((insn[2] & 0xff) == 0x46)
- fixup = FIXUP_BRANCH_NOT_TAKEN;
- break;
- case 0xec:
- switch (insn[2] & 0xff) {
- case 0xe5: /* clgrb */
- case 0xe6: /* cgrb */
- case 0xf6: /* crb */
- case 0xf7: /* clrb */
- case 0xfc: /* cgib */
- case 0xfd: /* cglib */
- case 0xfe: /* cib */
- case 0xff: /* clib */
- fixup = FIXUP_BRANCH_NOT_TAKEN;
- break;
- }
- break;
- }
- return fixup;
-}
-
-static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
-{
- /* Check if we have a RIL-b or RIL-c format instruction which
- * we need to modify in order to avoid instruction emulation. */
- switch (insn[0] >> 8) {
- case 0xc0:
- if ((insn[0] & 0x0f) == 0x00) /* larl */
- return true;
- break;
- case 0xc4:
- switch (insn[0] & 0x0f) {
- case 0x02: /* llhrl */
- case 0x04: /* lghrl */
- case 0x05: /* lhrl */
- case 0x06: /* llghrl */
- case 0x07: /* sthrl */
- case 0x08: /* lgrl */
- case 0x0b: /* stgrl */
- case 0x0c: /* lgfrl */
- case 0x0d: /* lrl */
- case 0x0e: /* llgfrl */
- case 0x0f: /* strl */
- return true;
- }
- break;
- case 0xc6:
- switch (insn[0] & 0x0f) {
- case 0x02: /* pfdrl */
- case 0x04: /* cghrl */
- case 0x05: /* chrl */
- case 0x06: /* clghrl */
- case 0x07: /* clhrl */
- case 0x08: /* cgrl */
- case 0x0a: /* clgrl */
- case 0x0c: /* cgfrl */
- case 0x0d: /* crl */
- case 0x0e: /* clgfrl */
- case 0x0f: /* clrl */
- return true;
- }
- break;
- }
- return false;
-}
-
static void __kprobes copy_instruction(struct kprobe *p)
{
s64 disp, new_disp;
u64 addr, new_addr;
memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8));
- if (!is_insn_relative_long(p->ainsn.insn))
+ if (!probe_is_insn_relative_long(p->ainsn.insn))
return;
/*
* For pc-relative instructions in RIL-b or RIL-c format patch the
@@ -276,7 +128,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if ((unsigned long) p->addr & 0x01)
return -EINVAL;
/* Make sure the probe isn't going on a difficult instruction */
- if (is_prohibited_opcode(p->addr))
+ if (probe_is_prohibited_opcode(p->addr))
return -EINVAL;
if (s390_get_insn_slot(p))
return -ENOMEM;
@@ -605,7 +457,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
- int fixup = get_fixup_type(p->ainsn.insn);
+ int fixup = probe_get_fixup_type(p->ainsn.insn);
if (fixup & FIXUP_PSW_NORMAL)
ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
@@ -789,11 +641,6 @@ void __kprobes jprobe_return(void)
asm volatile(".word 0x0002");
}
-static void __used __kprobes jprobe_return_end(void)
-{
- asm volatile("bcr 0,0");
-}
-
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 719e27b2cf22..4685337fa7c6 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -25,6 +25,7 @@
#include <asm/elf.h>
#include <asm/asm-offsets.h>
#include <asm/os_info.h>
+#include <asm/switch_to.h>
typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
@@ -43,7 +44,7 @@ static void add_elf_notes(int cpu)
memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa));
ptr = (u64 *) per_cpu_ptr(crash_notes, cpu);
- ptr = fill_cpu_elf_notes(ptr, sa);
+ ptr = fill_cpu_elf_notes(ptr, sa, NULL);
memset(ptr, 0, sizeof(struct elf_note));
}
@@ -53,8 +54,11 @@ static void add_elf_notes(int cpu)
static void setup_regs(void)
{
unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE;
+ struct _lowcore *lc;
int cpu, this_cpu;
+ /* Get lowcore pointer from store status of this CPU (absolute zero) */
+ lc = (struct _lowcore *)(unsigned long)S390_lowcore.prefixreg_save_area;
this_cpu = smp_find_processor_id(stap());
add_elf_notes(this_cpu);
for_each_online_cpu(cpu) {
@@ -64,6 +68,8 @@ static void setup_regs(void)
continue;
add_elf_notes(cpu);
}
+ if (MACHINE_HAS_VX)
+ save_vx_regs_safe((void *) lc->vector_save_area_addr);
/* Copy dump CPU store status info to absolute zero */
memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
}
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 433c6dbfa442..4300ea374826 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -8,62 +8,72 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ftrace.h>
+#include <asm/ptrace.h>
.section .kprobes.text, "ax"
ENTRY(ftrace_stub)
br %r14
+#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
+#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
+#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
+#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
+
ENTRY(_mcount)
-#ifdef CONFIG_DYNAMIC_FTRACE
br %r14
ENTRY(ftrace_caller)
+ .globl ftrace_regs_caller
+ .set ftrace_regs_caller,ftrace_caller
+ lgr %r1,%r15
+ aghi %r15,-STACK_FRAME_SIZE
+ stg %r1,__SF_BACKCHAIN(%r15)
+ stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
+ stg %r0,(STACK_PTREGS_PSW+8)(%r15)
+ stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ aghik %r2,%r0,-MCOUNT_INSN_SIZE
+ lgrl %r4,function_trace_op
+ lgrl %r1,ftrace_trace_function
+#else
+ lgr %r2,%r0
+ aghi %r2,-MCOUNT_INSN_SIZE
+ larl %r4,function_trace_op
+ lg %r4,0(%r4)
+ larl %r1,ftrace_trace_function
+ lg %r1,0(%r1)
#endif
- stm %r2,%r5,16(%r15)
- bras %r1,1f
-0: .long ftrace_trace_function
-1: st %r14,56(%r15)
- lr %r0,%r15
- ahi %r15,-96
- l %r3,100(%r15)
- la %r2,0(%r14)
- st %r0,__SF_BACKCHAIN(%r15)
- la %r3,0(%r3)
- ahi %r2,-MCOUNT_INSN_SIZE
- l %r14,0b-0b(%r1)
- l %r14,0(%r14)
- basr %r14,%r14
+ lgr %r3,%r14
+ la %r5,STACK_PTREGS(%r15)
+ basr %r14,%r1
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- l %r2,100(%r15)
- l %r3,152(%r15)
+# The j instruction gets runtime patched to a nop instruction.
+# See ftrace_enable_ftrace_graph_caller.
ENTRY(ftrace_graph_caller)
-# The bras instruction gets runtime patched to call prepare_ftrace_return.
-# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
-# bras %r14,prepare_ftrace_return
- bras %r14,0f
-0: st %r2,100(%r15)
+ j ftrace_graph_caller_end
+ lg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
+ lg %r3,(STACK_PTREGS_PSW+8)(%r15)
+ brasl %r14,prepare_ftrace_return
+ stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
+ftrace_graph_caller_end:
+ .globl ftrace_graph_caller_end
#endif
- ahi %r15,96
- l %r14,56(%r15)
- lm %r2,%r5,16(%r15)
- br %r14
+ lg %r1,(STACK_PTREGS_PSW+8)(%r15)
+ lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
+ br %r1
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(return_to_handler)
- stm %r2,%r5,16(%r15)
- st %r14,56(%r15)
- lr %r0,%r15
- ahi %r15,-96
- st %r0,__SF_BACKCHAIN(%r15)
- bras %r1,0f
- .long ftrace_return_to_handler
-0: l %r2,0b-0b(%r1)
- basr %r14,%r2
- lr %r14,%r2
- ahi %r15,96
- lm %r2,%r5,16(%r15)
+ stmg %r2,%r5,32(%r15)
+ lgr %r1,%r15
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ stg %r1,__SF_BACKCHAIN(%r15)
+ brasl %r14,ftrace_return_to_handler
+ aghi %r15,STACK_FRAME_OVERHEAD
+ lgr %r14,%r2
+ lmg %r2,%r5,32(%r15)
br %r14
#endif
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
deleted file mode 100644
index c67a8bf0fd9a..000000000000
--- a/arch/s390/kernel/mcount64.S
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright IBM Corp. 2008, 2009
- *
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
- *
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/ftrace.h>
-
- .section .kprobes.text, "ax"
-
-ENTRY(ftrace_stub)
- br %r14
-
-ENTRY(_mcount)
-#ifdef CONFIG_DYNAMIC_FTRACE
- br %r14
-
-ENTRY(ftrace_caller)
-#endif
- stmg %r2,%r5,32(%r15)
- stg %r14,112(%r15)
- lgr %r1,%r15
- aghi %r15,-160
- stg %r1,__SF_BACKCHAIN(%r15)
- lgr %r2,%r14
- lg %r3,168(%r15)
- aghi %r2,-MCOUNT_INSN_SIZE
- larl %r14,ftrace_trace_function
- lg %r14,0(%r14)
- basr %r14,%r14
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- lg %r2,168(%r15)
- lg %r3,272(%r15)
-ENTRY(ftrace_graph_caller)
-# The bras instruction gets runtime patched to call prepare_ftrace_return.
-# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
-# bras %r14,prepare_ftrace_return
- bras %r14,0f
-0: stg %r2,168(%r15)
-#endif
- aghi %r15,160
- lmg %r2,%r5,32(%r15)
- lg %r14,112(%r15)
- br %r14
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
-ENTRY(return_to_handler)
- stmg %r2,%r5,32(%r15)
- lgr %r1,%r15
- aghi %r15,-160
- stg %r1,__SF_BACKCHAIN(%r15)
- brasl %r14,ftrace_return_to_handler
- aghi %r15,160
- lgr %r14,%r2
- lmg %r2,%r5,32(%r15)
- br %r14
-
-#endif
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 210e1285f75a..db96b418160a 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -20,6 +20,7 @@
#include <asm/cputime.h>
#include <asm/nmi.h>
#include <asm/crw.h>
+#include <asm/switch_to.h>
struct mcck_struct {
int kill_task;
@@ -163,6 +164,21 @@ static int notrace s390_revalidate_registers(struct mci *mci)
" ld 15,120(%0)\n"
: : "a" (fpt_save_area));
}
+
+#ifdef CONFIG_64BIT
+ /* Revalidate vector registers */
+ if (MACHINE_HAS_VX && current->thread.vxrs) {
+ if (!mci->vr) {
+ /*
+ * Vector registers can't be restored and therefore
+ * the process needs to be terminated.
+ */
+ kill_task = 1;
+ }
+ restore_vx_regs((__vector128 *)
+ S390_lowcore.vector_save_area_addr);
+ }
+#endif
/* Revalidate access registers */
asm volatile(
" lam 0,15,0(%0)"
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 813ec7260878..f6f8886399f6 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -49,7 +49,7 @@ PGM_CHECK_DEFAULT /* 17 */
PGM_CHECK_64BIT(transaction_exception) /* 18 */
PGM_CHECK_DEFAULT /* 19 */
PGM_CHECK_DEFAULT /* 1a */
-PGM_CHECK_DEFAULT /* 1b */
+PGM_CHECK_64BIT(vector_exception) /* 1b */
PGM_CHECK(space_switch_exception) /* 1c */
PGM_CHECK(hfp_sqrt_exception) /* 1d */
PGM_CHECK_DEFAULT /* 1e */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 93b9ca42e5c0..ed84cc224899 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -61,30 +61,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sf->gprs[8];
}
-void arch_cpu_idle(void)
-{
- local_mcck_disable();
- if (test_cpu_flag(CIF_MCCK_PENDING)) {
- local_mcck_enable();
- local_irq_enable();
- return;
- }
- /* Halt the cpu and keep track of cpu time accounting. */
- vtime_stop_cpu();
- local_irq_enable();
-}
-
-void arch_cpu_idle_exit(void)
-{
- if (test_cpu_flag(CIF_MCCK_PENDING))
- s390_handle_mcck();
-}
-
-void arch_cpu_idle_dead(void)
-{
- cpu_die();
-}
-
extern void __kprobes kernel_thread_starter(void);
/*
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 24612029f450..edefead3b43a 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -23,7 +23,6 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
*/
void cpu_init(void)
{
- struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
struct cpuid *id = &__get_cpu_var(cpu_id);
get_cpu_id(id);
@@ -31,7 +30,6 @@ void cpu_init(void)
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
- memset(idle, 0, sizeof(*idle));
}
/*
@@ -41,7 +39,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
{
static const char *hwcap_str[] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
- "edat", "etf3eh", "highgprs", "te"
+ "edat", "etf3eh", "highgprs", "te", "vx"
};
unsigned long n = (unsigned long) v - 1;
int i;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 5dc7ad9e2fbf..f537e937a988 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -38,15 +38,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
-enum s390_regset {
- REGSET_GENERAL,
- REGSET_FP,
- REGSET_LAST_BREAK,
- REGSET_TDB,
- REGSET_SYSTEM_CALL,
- REGSET_GENERAL_EXTENDED,
-};
-
void update_cr_regs(struct task_struct *task)
{
struct pt_regs *regs = task_pt_regs(task);
@@ -55,27 +46,39 @@ void update_cr_regs(struct task_struct *task)
#ifdef CONFIG_64BIT
/* Take care of the enable/disable of transactional execution. */
- if (MACHINE_HAS_TE) {
+ if (MACHINE_HAS_TE || MACHINE_HAS_VX) {
unsigned long cr, cr_new;
__ctl_store(cr, 0, 0);
- /* Set or clear transaction execution TXC bit 8. */
- cr_new = cr | (1UL << 55);
- if (task->thread.per_flags & PER_FLAG_NO_TE)
- cr_new &= ~(1UL << 55);
+ cr_new = cr;
+ if (MACHINE_HAS_TE) {
+ /* Set or clear transaction execution TXC bit 8. */
+ cr_new |= (1UL << 55);
+ if (task->thread.per_flags & PER_FLAG_NO_TE)
+ cr_new &= ~(1UL << 55);
+ }
+ if (MACHINE_HAS_VX) {
+ /* Enable/disable of vector extension */
+ cr_new &= ~(1UL << 17);
+ if (task->thread.vxrs)
+ cr_new |= (1UL << 17);
+ }
if (cr_new != cr)
__ctl_load(cr_new, 0, 0);
- /* Set or clear transaction execution TDC bits 62 and 63. */
- __ctl_store(cr, 2, 2);
- cr_new = cr & ~3UL;
- if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
- if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
- cr_new |= 1UL;
- else
- cr_new |= 2UL;
+ if (MACHINE_HAS_TE) {
+ /* Set/clear transaction execution TDC bits 62/63. */
+ __ctl_store(cr, 2, 2);
+ cr_new = cr & ~3UL;
+ if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
+ if (task->thread.per_flags &
+ PER_FLAG_TE_ABORT_RAND_TEND)
+ cr_new |= 1UL;
+ else
+ cr_new |= 2UL;
+ }
+ if (cr_new != cr)
+ __ctl_load(cr_new, 2, 2);
}
- if (cr_new != cr)
- __ctl_load(cr_new, 2, 2);
}
#endif
/* Copy user specified PER registers */
@@ -84,7 +87,8 @@ void update_cr_regs(struct task_struct *task)
new.end = thread->per_user.end;
/* merge TIF_SINGLE_STEP into user specified PER registers. */
- if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) {
+ if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
+ test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
new.control |= PER_EVENT_BRANCH;
else
@@ -93,6 +97,8 @@ void update_cr_regs(struct task_struct *task)
new.control |= PER_CONTROL_SUSPENSION;
new.control |= PER_EVENT_TRANSACTION_END;
#endif
+ if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
+ new.control |= PER_EVENT_IFETCH;
new.start = 0;
new.end = PSW_ADDR_INSN;
}
@@ -803,7 +809,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
long ret = 0;
/* Do the secure computing check first. */
- if (secure_computing(regs->gprs[2])) {
+ if (secure_computing()) {
/* seccomp failures shouldn't expose any additional code. */
ret = -1;
goto out;
@@ -923,7 +929,15 @@ static int s390_fpregs_get(struct task_struct *target,
save_fp_ctl(&target->thread.fp_regs.fpc);
save_fp_regs(target->thread.fp_regs.fprs);
}
+#ifdef CONFIG_64BIT
+ else if (target->thread.vxrs) {
+ int i;
+ for (i = 0; i < __NUM_VXRS_LOW; i++)
+ target->thread.fp_regs.fprs[i] =
+ *(freg_t *)(target->thread.vxrs + i);
+ }
+#endif
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_regs, 0, -1);
}
@@ -957,9 +971,20 @@ static int s390_fpregs_set(struct task_struct *target,
target->thread.fp_regs.fprs,
offsetof(s390_fp_regs, fprs), -1);
- if (rc == 0 && target == current) {
- restore_fp_ctl(&target->thread.fp_regs.fpc);
- restore_fp_regs(target->thread.fp_regs.fprs);
+ if (rc == 0) {
+ if (target == current) {
+ restore_fp_ctl(&target->thread.fp_regs.fpc);
+ restore_fp_regs(target->thread.fp_regs.fprs);
+ }
+#ifdef CONFIG_64BIT
+ else if (target->thread.vxrs) {
+ int i;
+
+ for (i = 0; i < __NUM_VXRS_LOW; i++)
+ *(freg_t *)(target->thread.vxrs + i) =
+ target->thread.fp_regs.fprs[i];
+ }
+#endif
}
return rc;
@@ -1015,6 +1040,95 @@ static int s390_tdb_set(struct task_struct *target,
return 0;
}
+static int s390_vxrs_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ return !!target->thread.vxrs;
+}
+
+static int s390_vxrs_low_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ __u64 vxrs[__NUM_VXRS_LOW];
+ int i;
+
+ if (target->thread.vxrs) {
+ if (target == current)
+ save_vx_regs(target->thread.vxrs);
+ for (i = 0; i < __NUM_VXRS_LOW; i++)
+ vxrs[i] = *((__u64 *)(target->thread.vxrs + i) + 1);
+ } else
+ memset(vxrs, 0, sizeof(vxrs));
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
+}
+
+static int s390_vxrs_low_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ __u64 vxrs[__NUM_VXRS_LOW];
+ int i, rc;
+
+ if (!target->thread.vxrs) {
+ rc = alloc_vector_registers(target);
+ if (rc)
+ return rc;
+ } else if (target == current)
+ save_vx_regs(target->thread.vxrs);
+
+ rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
+ if (rc == 0) {
+ for (i = 0; i < __NUM_VXRS_LOW; i++)
+ *((__u64 *)(target->thread.vxrs + i) + 1) = vxrs[i];
+ if (target == current)
+ restore_vx_regs(target->thread.vxrs);
+ }
+
+ return rc;
+}
+
+static int s390_vxrs_high_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ __vector128 vxrs[__NUM_VXRS_HIGH];
+
+ if (target->thread.vxrs) {
+ if (target == current)
+ save_vx_regs(target->thread.vxrs);
+ memcpy(vxrs, target->thread.vxrs + __NUM_VXRS_LOW,
+ sizeof(vxrs));
+ } else
+ memset(vxrs, 0, sizeof(vxrs));
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
+}
+
+static int s390_vxrs_high_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int rc;
+
+ if (!target->thread.vxrs) {
+ rc = alloc_vector_registers(target);
+ if (rc)
+ return rc;
+ } else if (target == current)
+ save_vx_regs(target->thread.vxrs);
+
+ rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ target->thread.vxrs + __NUM_VXRS_LOW, 0, -1);
+ if (rc == 0 && target == current)
+ restore_vx_regs(target->thread.vxrs);
+
+ return rc;
+}
+
#endif
static int s390_system_call_get(struct task_struct *target,
@@ -1038,7 +1152,7 @@ static int s390_system_call_set(struct task_struct *target,
}
static const struct user_regset s390_regsets[] = {
- [REGSET_GENERAL] = {
+ {
.core_note_type = NT_PRSTATUS,
.n = sizeof(s390_regs) / sizeof(long),
.size = sizeof(long),
@@ -1046,7 +1160,7 @@ static const struct user_regset s390_regsets[] = {
.get = s390_regs_get,
.set = s390_regs_set,
},
- [REGSET_FP] = {
+ {
.core_note_type = NT_PRFPREG,
.n = sizeof(s390_fp_regs) / sizeof(long),
.size = sizeof(long),
@@ -1054,8 +1168,16 @@ static const struct user_regset s390_regsets[] = {
.get = s390_fpregs_get,
.set = s390_fpregs_set,
},
+ {
+ .core_note_type = NT_S390_SYSTEM_CALL,
+ .n = 1,
+ .size = sizeof(unsigned int),
+ .align = sizeof(unsigned int),
+ .get = s390_system_call_get,
+ .set = s390_system_call_set,
+ },
#ifdef CONFIG_64BIT
- [REGSET_LAST_BREAK] = {
+ {
.core_note_type = NT_S390_LAST_BREAK,
.n = 1,
.size = sizeof(long),
@@ -1063,7 +1185,7 @@ static const struct user_regset s390_regsets[] = {
.get = s390_last_break_get,
.set = s390_last_break_set,
},
- [REGSET_TDB] = {
+ {
.core_note_type = NT_S390_TDB,
.n = 1,
.size = 256,
@@ -1071,15 +1193,25 @@ static const struct user_regset s390_regsets[] = {
.get = s390_tdb_get,
.set = s390_tdb_set,
},
-#endif
- [REGSET_SYSTEM_CALL] = {
- .core_note_type = NT_S390_SYSTEM_CALL,
- .n = 1,
- .size = sizeof(unsigned int),
- .align = sizeof(unsigned int),
- .get = s390_system_call_get,
- .set = s390_system_call_set,
+ {
+ .core_note_type = NT_S390_VXRS_LOW,
+ .n = __NUM_VXRS_LOW,
+ .size = sizeof(__u64),
+ .align = sizeof(__u64),
+ .active = s390_vxrs_active,
+ .get = s390_vxrs_low_get,
+ .set = s390_vxrs_low_set,
},
+ {
+ .core_note_type = NT_S390_VXRS_HIGH,
+ .n = __NUM_VXRS_HIGH,
+ .size = sizeof(__vector128),
+ .align = sizeof(__vector128),
+ .active = s390_vxrs_active,
+ .get = s390_vxrs_high_get,
+ .set = s390_vxrs_high_set,
+ },
+#endif
};
static const struct user_regset_view user_s390_view = {
@@ -1244,7 +1376,7 @@ static int s390_compat_last_break_set(struct task_struct *target,
}
static const struct user_regset s390_compat_regsets[] = {
- [REGSET_GENERAL] = {
+ {
.core_note_type = NT_PRSTATUS,
.n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
.size = sizeof(compat_long_t),
@@ -1252,7 +1384,7 @@ static const struct user_regset s390_compat_regsets[] = {
.get = s390_compat_regs_get,
.set = s390_compat_regs_set,
},
- [REGSET_FP] = {
+ {
.core_note_type = NT_PRFPREG,
.n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
.size = sizeof(compat_long_t),
@@ -1260,7 +1392,15 @@ static const struct user_regset s390_compat_regsets[] = {
.get = s390_fpregs_get,
.set = s390_fpregs_set,
},
- [REGSET_LAST_BREAK] = {
+ {
+ .core_note_type = NT_S390_SYSTEM_CALL,
+ .n = 1,
+ .size = sizeof(compat_uint_t),
+ .align = sizeof(compat_uint_t),
+ .get = s390_system_call_get,
+ .set = s390_system_call_set,
+ },
+ {
.core_note_type = NT_S390_LAST_BREAK,
.n = 1,
.size = sizeof(long),
@@ -1268,7 +1408,7 @@ static const struct user_regset s390_compat_regsets[] = {
.get = s390_compat_last_break_get,
.set = s390_compat_last_break_set,
},
- [REGSET_TDB] = {
+ {
.core_note_type = NT_S390_TDB,
.n = 1,
.size = 256,
@@ -1276,15 +1416,25 @@ static const struct user_regset s390_compat_regsets[] = {
.get = s390_tdb_get,
.set = s390_tdb_set,
},
- [REGSET_SYSTEM_CALL] = {
- .core_note_type = NT_S390_SYSTEM_CALL,
- .n = 1,
- .size = sizeof(compat_uint_t),
- .align = sizeof(compat_uint_t),
- .get = s390_system_call_get,
- .set = s390_system_call_set,
+ {
+ .core_note_type = NT_S390_VXRS_LOW,
+ .n = __NUM_VXRS_LOW,
+ .size = sizeof(__u64),
+ .align = sizeof(__u64),
+ .active = s390_vxrs_active,
+ .get = s390_vxrs_low_get,
+ .set = s390_vxrs_low_set,
+ },
+ {
+ .core_note_type = NT_S390_VXRS_HIGH,
+ .n = __NUM_VXRS_HIGH,
+ .size = sizeof(__vector128),
+ .align = sizeof(__vector128),
+ .active = s390_vxrs_active,
+ .get = s390_vxrs_high_get,
+ .set = s390_vxrs_high_set,
},
- [REGSET_GENERAL_EXTENDED] = {
+ {
.core_note_type = NT_S390_HIGH_GPRS,
.n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
.size = sizeof(compat_long_t),
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 82bc113e8c1d..e80d9ff9a56d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -343,6 +343,9 @@ static void __init setup_lowcore(void)
__ctl_set_bit(14, 29);
}
#else
+ if (MACHINE_HAS_VX)
+ lc->vector_save_area_addr =
+ (unsigned long) &lc->vector_save_area;
lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
#endif
lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
@@ -452,8 +455,8 @@ static void __init setup_memory_end(void)
#ifdef CONFIG_64BIT
vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
- tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
- if (tmp <= (1UL << 42))
+ tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
+ if (tmp + vmalloc_size + MODULES_LEN <= (1UL << 42))
vmax = 1UL << 42; /* 3-level kernel page table */
else
vmax = 1UL << 53; /* 4-level kernel page table */
@@ -765,6 +768,12 @@ static void __init setup_hwcaps(void)
*/
if (test_facility(50) && test_facility(73))
elf_hwcap |= HWCAP_S390_TE;
+
+ /*
+ * Vector extension HWCAP_S390_VXRS is bit 11.
+ */
+ if (test_facility(129))
+ elf_hwcap |= HWCAP_S390_VXRS;
#endif
get_cpu_id(&cpu_id);
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 469c4c6d9182..0c1a0ff0a558 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -31,30 +31,117 @@
#include <asm/switch_to.h>
#include "entry.h"
-typedef struct
+/*
+ * Layout of an old-style signal-frame:
+ * -----------------------------------------
+ * | save area (_SIGNAL_FRAMESIZE) |
+ * -----------------------------------------
+ * | struct sigcontext |
+ * | oldmask |
+ * | _sigregs * |
+ * -----------------------------------------
+ * | _sigregs with |
+ * | _s390_regs_common |
+ * | _s390_fp_regs |
+ * -----------------------------------------
+ * | int signo |
+ * -----------------------------------------
+ * | _sigregs_ext with |
+ * | gprs_high 64 byte (opt) |
+ * | vxrs_low 128 byte (opt) |
+ * | vxrs_high 256 byte (opt) |
+ * | reserved 128 byte (opt) |
+ * -----------------------------------------
+ * | __u16 svc_insn |
+ * -----------------------------------------
+ * The svc_insn entry with the sigreturn system call opcode does not
+ * have a fixed position and moves if gprs_high or vxrs exist.
+ * Future extensions will be added to _sigregs_ext.
+ */
+struct sigframe
{
__u8 callee_used_stack[__SIGNAL_FRAMESIZE];
struct sigcontext sc;
_sigregs sregs;
int signo;
- __u8 retcode[S390_SYSCALL_SIZE];
-} sigframe;
+ _sigregs_ext sregs_ext;
+ __u16 svc_insn; /* Offset of svc_insn is NOT fixed! */
+};
-typedef struct
+/*
+ * Layout of an rt signal-frame:
+ * -----------------------------------------
+ * | save area (_SIGNAL_FRAMESIZE) |
+ * -----------------------------------------
+ * | svc __NR_rt_sigreturn 2 byte |
+ * -----------------------------------------
+ * | struct siginfo |
+ * -----------------------------------------
+ * | struct ucontext_extended with |
+ * | unsigned long uc_flags |
+ * | struct ucontext *uc_link |
+ * | stack_t uc_stack |
+ * | _sigregs uc_mcontext with |
+ * | _s390_regs_common |
+ * | _s390_fp_regs |
+ * | sigset_t uc_sigmask |
+ * | _sigregs_ext uc_mcontext_ext |
+ * | gprs_high 64 byte (opt) |
+ * | vxrs_low 128 byte (opt) |
+ * | vxrs_high 256 byte (opt)|
+ * | reserved 128 byte (opt) |
+ * -----------------------------------------
+ * Future extensions will be added to _sigregs_ext.
+ */
+struct rt_sigframe
{
__u8 callee_used_stack[__SIGNAL_FRAMESIZE];
- __u8 retcode[S390_SYSCALL_SIZE];
+ __u16 svc_insn;
struct siginfo info;
- struct ucontext uc;
-} rt_sigframe;
+ struct ucontext_extended uc;
+};
+
+/* Store registers needed to create the signal frame */
+static void store_sigregs(void)
+{
+ save_access_regs(current->thread.acrs);
+ save_fp_ctl(&current->thread.fp_regs.fpc);
+#ifdef CONFIG_64BIT
+ if (current->thread.vxrs) {
+ int i;
+
+ save_vx_regs(current->thread.vxrs);
+ for (i = 0; i < __NUM_FPRS; i++)
+ current->thread.fp_regs.fprs[i] =
+ *(freg_t *)(current->thread.vxrs + i);
+ } else
+#endif
+ save_fp_regs(current->thread.fp_regs.fprs);
+}
+
+/* Load registers after signal return */
+static void load_sigregs(void)
+{
+ restore_access_regs(current->thread.acrs);
+ /* restore_fp_ctl is done in restore_sigregs */
+#ifdef CONFIG_64BIT
+ if (current->thread.vxrs) {
+ int i;
+
+ for (i = 0; i < __NUM_FPRS; i++)
+ *(freg_t *)(current->thread.vxrs + i) =
+ current->thread.fp_regs.fprs[i];
+ restore_vx_regs(current->thread.vxrs);
+ } else
+#endif
+ restore_fp_regs(current->thread.fp_regs.fprs);
+}
/* Returns non-zero on fault. */
static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
{
_sigregs user_sregs;
- save_access_regs(current->thread.acrs);
-
/* Copy a 'clean' PSW mask to the user to avoid leaking
information about whether PER is currently on. */
user_sregs.regs.psw.mask = PSW_USER_BITS |
@@ -63,12 +150,6 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
memcpy(&user_sregs.regs.acrs, current->thread.acrs,
sizeof(user_sregs.regs.acrs));
- /*
- * We have to store the fp registers to current->thread.fp_regs
- * to merge them with the emulated registers.
- */
- save_fp_ctl(&current->thread.fp_regs.fpc);
- save_fp_regs(current->thread.fp_regs.fprs);
memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
sizeof(user_sregs.fpregs));
if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs)))
@@ -107,20 +188,64 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs));
memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
sizeof(current->thread.acrs));
- restore_access_regs(current->thread.acrs);
memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
sizeof(current->thread.fp_regs));
- restore_fp_regs(current->thread.fp_regs.fprs);
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
return 0;
}
+/* Returns non-zero on fault. */
+static int save_sigregs_ext(struct pt_regs *regs,
+ _sigregs_ext __user *sregs_ext)
+{
+#ifdef CONFIG_64BIT
+ __u64 vxrs[__NUM_VXRS_LOW];
+ int i;
+
+ /* Save vector registers to signal stack */
+ if (current->thread.vxrs) {
+ for (i = 0; i < __NUM_VXRS_LOW; i++)
+ vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1);
+ if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
+ sizeof(sregs_ext->vxrs_low)) ||
+ __copy_to_user(&sregs_ext->vxrs_high,
+ current->thread.vxrs + __NUM_VXRS_LOW,
+ sizeof(sregs_ext->vxrs_high)))
+ return -EFAULT;
+ }
+#endif
+ return 0;
+}
+
+static int restore_sigregs_ext(struct pt_regs *regs,
+ _sigregs_ext __user *sregs_ext)
+{
+#ifdef CONFIG_64BIT
+ __u64 vxrs[__NUM_VXRS_LOW];
+ int i;
+
+ /* Restore vector registers from signal stack */
+ if (current->thread.vxrs) {
+ if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
+ sizeof(sregs_ext->vxrs_low)) ||
+ __copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW,
+ &sregs_ext->vxrs_high,
+ sizeof(sregs_ext->vxrs_high)))
+ return -EFAULT;
+ for (i = 0; i < __NUM_VXRS_LOW; i++)
+ *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
+ }
+#endif
+ return 0;
+}
+
SYSCALL_DEFINE0(sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
- sigframe __user *frame = (sigframe __user *)regs->gprs[15];
+ struct sigframe __user *frame =
+ (struct sigframe __user *) regs->gprs[15];
sigset_t set;
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
@@ -128,6 +253,9 @@ SYSCALL_DEFINE0(sigreturn)
set_current_blocked(&set);
if (restore_sigregs(regs, &frame->sregs))
goto badframe;
+ if (restore_sigregs_ext(regs, &frame->sregs_ext))
+ goto badframe;
+ load_sigregs();
return regs->gprs[2];
badframe:
force_sig(SIGSEGV, current);
@@ -137,16 +265,20 @@ badframe:
SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
- rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15];
+ struct rt_sigframe __user *frame =
+ (struct rt_sigframe __user *)regs->gprs[15];
sigset_t set;
if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
set_current_blocked(&set);
+ if (restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
if (restore_sigregs(regs, &frame->uc.uc_mcontext))
goto badframe;
- if (restore_altstack(&frame->uc.uc_stack))
+ if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
goto badframe;
+ load_sigregs();
return regs->gprs[2];
badframe:
force_sig(SIGSEGV, current);
@@ -154,11 +286,6 @@ badframe:
}
/*
- * Set up a signal frame.
- */
-
-
-/*
* Determine which stack to use..
*/
static inline void __user *
@@ -195,39 +322,63 @@ static inline int map_signal(int sig)
static int setup_frame(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs * regs)
{
- sigframe __user *frame;
-
- frame = get_sigframe(ka, regs, sizeof(sigframe));
+ struct sigframe __user *frame;
+ struct sigcontext sc;
+ unsigned long restorer;
+ size_t frame_size;
+ /*
+ * gprs_high are only present for a 31-bit task running on
+ * a 64-bit kernel (see compat_signal.c) but the space for
+ * gprs_high need to be allocated if vector registers are
+ * included in the signal frame on a 31-bit system.
+ */
+ frame_size = sizeof(*frame) - sizeof(frame->sregs_ext);
+ if (MACHINE_HAS_VX)
+ frame_size += sizeof(frame->sregs_ext);
+ frame = get_sigframe(ka, regs, frame_size);
if (frame == (void __user *) -1UL)
return -EFAULT;
- if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE))
+ /* Set up backchain. */
+ if (__put_user(regs->gprs[15], (addr_t __user *) frame))
return -EFAULT;
+ /* Create struct sigcontext on the signal stack */
+ memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE);
+ sc.sregs = (_sigregs __user __force *) &frame->sregs;
+ if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
+ return -EFAULT;
+
+ /* Store registers needed to create the signal frame */
+ store_sigregs();
+
+ /* Create _sigregs on the signal stack */
if (save_sigregs(regs, &frame->sregs))
return -EFAULT;
- if (__put_user(&frame->sregs, &frame->sc.sregs))
+
+ /* Place signal number on stack to allow backtrace from handler. */
+ if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
+ return -EFAULT;
+
+ /* Create _sigregs_ext on the signal stack */
+ if (save_sigregs_ext(regs, &frame->sregs_ext))
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
- regs->gprs[14] = (unsigned long)
- ka->sa.sa_restorer | PSW_ADDR_AMODE;
+ restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE;
} else {
- regs->gprs[14] = (unsigned long)
- frame->retcode | PSW_ADDR_AMODE;
- if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn,
- (u16 __user *)(frame->retcode)))
+ /* Signal frame without vector registers are short ! */
+ __u16 __user *svc = (void *) frame + frame_size - 2;
+ if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
return -EFAULT;
+ restorer = (unsigned long) svc | PSW_ADDR_AMODE;
}
- /* Set up backchain. */
- if (__put_user(regs->gprs[15], (addr_t __user *) frame))
- return -EFAULT;
-
/* Set up registers for signal handler */
+ regs->gprs[14] = restorer;
regs->gprs[15] = (unsigned long) frame;
/* Force default amode and default user address space control. */
regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
@@ -247,54 +398,69 @@ static int setup_frame(int sig, struct k_sigaction *ka,
regs->gprs[5] = regs->int_parm_long;
regs->gprs[6] = task_thread_info(current)->last_break;
}
-
- /* Place signal number on stack to allow backtrace from handler. */
- if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
- return -EFAULT;
return 0;
}
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
- int err = 0;
- rt_sigframe __user *frame;
-
- frame = get_sigframe(&ksig->ka, regs, sizeof(rt_sigframe));
+ struct rt_sigframe __user *frame;
+ unsigned long uc_flags, restorer;
+ size_t frame_size;
+ frame_size = sizeof(struct rt_sigframe) - sizeof(_sigregs_ext);
+ /*
+ * gprs_high are only present for a 31-bit task running on
+ * a 64-bit kernel (see compat_signal.c) but the space for
+ * gprs_high need to be allocated if vector registers are
+ * included in the signal frame on a 31-bit system.
+ */
+ uc_flags = 0;
+#ifdef CONFIG_64BIT
+ if (MACHINE_HAS_VX) {
+ frame_size += sizeof(_sigregs_ext);
+ if (current->thread.vxrs)
+ uc_flags |= UC_VXRS;
+ }
+#endif
+ frame = get_sigframe(&ksig->ka, regs, frame_size);
if (frame == (void __user *) -1UL)
return -EFAULT;
- if (copy_siginfo_to_user(&frame->info, &ksig->info))
- return -EFAULT;
-
- /* Create the ucontext. */
- err |= __put_user(0, &frame->uc.uc_flags);
- err |= __put_user(NULL, &frame->uc.uc_link);
- err |= __save_altstack(&frame->uc.uc_stack, regs->gprs[15]);
- err |= save_sigregs(regs, &frame->uc.uc_mcontext);
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
- if (err)
+ /* Set up backchain. */
+ if (__put_user(regs->gprs[15], (addr_t __user *) frame))
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
- regs->gprs[14] = (unsigned long)
+ restorer = (unsigned long)
ksig->ka.sa.sa_restorer | PSW_ADDR_AMODE;
} else {
- regs->gprs[14] = (unsigned long)
- frame->retcode | PSW_ADDR_AMODE;
- if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
- (u16 __user *)(frame->retcode)))
+ __u16 __user *svc = &frame->svc_insn;
+ if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc))
return -EFAULT;
+ restorer = (unsigned long) svc | PSW_ADDR_AMODE;
}
- /* Set up backchain. */
- if (__put_user(regs->gprs[15], (addr_t __user *) frame))
+ /* Create siginfo on the signal stack */
+ if (copy_siginfo_to_user(&frame->info, &ksig->info))
+ return -EFAULT;
+
+ /* Store registers needed to create the signal frame */
+ store_sigregs();
+
+ /* Create ucontext on the signal stack. */
+ if (__put_user(uc_flags, &frame->uc.uc_flags) ||
+ __put_user(NULL, &frame->uc.uc_link) ||
+ __save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
+ save_sigregs(regs, &frame->uc.uc_mcontext) ||
+ __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
+ save_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
return -EFAULT;
/* Set up registers for signal handler */
+ regs->gprs[14] = restorer;
regs->gprs[15] = (unsigned long) frame;
/* Force default amode and default user address space control. */
regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 243c7e512600..6fd9e60101f1 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -45,6 +45,7 @@
#include <asm/debug.h>
#include <asm/os_info.h>
#include <asm/sigp.h>
+#include <asm/idle.h>
#include "entry.h"
enum {
@@ -82,7 +83,8 @@ DEFINE_MUTEX(smp_cpu_state_mutex);
/*
* Signal processor helper functions.
*/
-static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
+static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm,
+ u32 *status)
{
int cc;
@@ -178,6 +180,9 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
goto out;
}
#else
+ if (MACHINE_HAS_VX)
+ lc->vector_save_area_addr =
+ (unsigned long) &lc->vector_save_area;
if (vdso_alloc_per_cpu(lc))
goto out;
#endif
@@ -333,12 +338,6 @@ int smp_vcpu_scheduled(int cpu)
return pcpu_running(pcpu_devices + cpu);
}
-void smp_yield(void)
-{
- if (MACHINE_HAS_DIAG44)
- asm volatile("diag 0,0,0x44");
-}
-
void smp_yield_cpu(int cpu)
{
if (MACHINE_HAS_DIAG9C)
@@ -517,35 +516,53 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
static void __init smp_get_save_area(int cpu, u16 address)
{
void *lc = pcpu_devices[0].lowcore;
- struct save_area *save_area;
+ struct save_area_ext *sa_ext;
+ unsigned long vx_sa;
if (is_kdump_kernel())
return;
if (!OLDMEM_BASE && (address == boot_cpu_address ||
ipl_info.type != IPL_TYPE_FCP_DUMP))
return;
- save_area = dump_save_area_create(cpu);
- if (!save_area)
+ sa_ext = dump_save_area_create(cpu);
+ if (!sa_ext)
panic("could not allocate memory for save area\n");
if (address == boot_cpu_address) {
/* Copy the registers of the boot cpu. */
- copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
+ copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
SAVE_AREA_BASE - PAGE_SIZE, 0);
+ if (MACHINE_HAS_VX)
+ save_vx_regs_safe(sa_ext->vx_regs);
return;
}
/* Get the registers of a non-boot cpu. */
__pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL);
- memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area));
+ memcpy_real(&sa_ext->sa, lc + SAVE_AREA_BASE, sizeof(sa_ext->sa));
+ if (!MACHINE_HAS_VX)
+ return;
+ /* Get the VX registers */
+ vx_sa = __get_free_page(GFP_KERNEL);
+ if (!vx_sa)
+ panic("could not allocate memory for VX save area\n");
+ __pcpu_sigp_relax(address, SIGP_STORE_ADDITIONAL_STATUS, vx_sa, NULL);
+ memcpy(sa_ext->vx_regs, (void *) vx_sa, sizeof(sa_ext->vx_regs));
+ free_page(vx_sa);
}
int smp_store_status(int cpu)
{
+ unsigned long vx_sa;
struct pcpu *pcpu;
pcpu = pcpu_devices + cpu;
if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
+ if (!MACHINE_HAS_VX)
+ return 0;
+ vx_sa = __pa(pcpu->lowcore->vector_save_area_addr);
+ __pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
+ vx_sa, NULL);
return 0;
}
@@ -667,7 +684,7 @@ static void smp_start_secondary(void *cpuvoid)
cpu_init();
preempt_disable();
init_cpu_timer();
- init_cpu_vtimer();
+ vtime_init();
pfault_init();
notify_cpu_starting(smp_processor_id());
set_cpu_online(smp_processor_id(), true);
@@ -726,6 +743,7 @@ int __cpu_disable(void)
cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
__ctl_load(cregs, 0, 15);
+ clear_cpu_flag(CIF_NOHZ_DELAY);
return 0;
}
@@ -898,42 +916,6 @@ static struct attribute_group cpu_common_attr_group = {
.attrs = cpu_common_attrs,
};
-static ssize_t show_idle_count(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
- unsigned long long idle_count;
- unsigned int sequence;
-
- do {
- sequence = ACCESS_ONCE(idle->sequence);
- idle_count = ACCESS_ONCE(idle->idle_count);
- if (ACCESS_ONCE(idle->clock_idle_enter))
- idle_count++;
- } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
- return sprintf(buf, "%llu\n", idle_count);
-}
-static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
-
-static ssize_t show_idle_time(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
- unsigned long long now, idle_time, idle_enter, idle_exit;
- unsigned int sequence;
-
- do {
- now = get_tod_clock();
- sequence = ACCESS_ONCE(idle->sequence);
- idle_time = ACCESS_ONCE(idle->idle_time);
- idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
- idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
- } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
- idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
- return sprintf(buf, "%llu\n", idle_time >> 12);
-}
-static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
-
static struct attribute *cpu_online_attrs[] = {
&dev_attr_idle_count.attr,
&dev_attr_idle_time_us.attr,
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 4cef607f3711..69e980de0f62 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -232,6 +232,19 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtom_clock_nsec -= nsecps;
vdso_data->wtom_clock_sec++;
}
+
+ vdso_data->xtime_coarse_sec = tk->xtime_sec;
+ vdso_data->xtime_coarse_nsec =
+ (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
+ vdso_data->wtom_coarse_sec =
+ vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec;
+ vdso_data->wtom_coarse_nsec =
+ vdso_data->xtime_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
+ while (vdso_data->wtom_coarse_nsec >= NSEC_PER_SEC) {
+ vdso_data->wtom_coarse_nsec -= NSEC_PER_SEC;
+ vdso_data->wtom_coarse_sec++;
+ }
+
vdso_data->tk_mult = tk->tkr.mult;
vdso_data->tk_shift = tk->tkr.shift;
smp_wmb();
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 355a16c55702..b93bed76ea94 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -464,15 +464,17 @@ static struct sched_domain_topology_level s390_topology[] = {
static int __init topology_init(void)
{
- if (!MACHINE_HAS_TOPOLOGY) {
+ if (MACHINE_HAS_TOPOLOGY)
+ set_topology_timer();
+ else
topology_update_polarization_simple();
- goto out;
- }
- set_topology_timer();
-out:
-
- set_sched_topology(s390_topology);
-
return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
}
device_initcall(topology_init);
+
+static int __init early_topology_init(void)
+{
+ set_sched_topology(s390_topology);
+ return 0;
+}
+early_initcall(early_topology_init);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index c5762324d9ee..9ff5ecba26ab 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -18,6 +18,8 @@
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/switch_to.h>
#include "entry.h"
int show_unhandled_signals = 1;
@@ -58,15 +60,10 @@ int is_valid_bugaddr(unsigned long addr)
return 1;
}
-static void __kprobes do_trap(struct pt_regs *regs,
- int si_signo, int si_code, char *str)
+void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
{
siginfo_t info;
- if (notify_die(DIE_TRAP, str, regs, 0,
- regs->int_code, si_signo) == NOTIFY_STOP)
- return;
-
if (user_mode(regs)) {
info.si_signo = si_signo;
info.si_errno = 0;
@@ -90,6 +87,15 @@ static void __kprobes do_trap(struct pt_regs *regs,
}
}
+static void __kprobes do_trap(struct pt_regs *regs, int si_signo, int si_code,
+ char *str)
+{
+ if (notify_die(DIE_TRAP, str, regs, 0,
+ regs->int_code, si_signo) == NOTIFY_STOP)
+ return;
+ do_report_trap(regs, si_signo, si_code, str);
+}
+
void __kprobes do_per_trap(struct pt_regs *regs)
{
siginfo_t info;
@@ -178,6 +184,7 @@ void __kprobes illegal_op(struct pt_regs *regs)
siginfo_t info;
__u8 opcode[6];
__u16 __user *location;
+ int is_uprobe_insn = 0;
int signal = 0;
location = get_trap_ip(regs);
@@ -194,6 +201,10 @@ void __kprobes illegal_op(struct pt_regs *regs)
force_sig_info(SIGTRAP, &info, current);
} else
signal = SIGILL;
+#ifdef CONFIG_UPROBES
+ } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) {
+ is_uprobe_insn = 1;
+#endif
#ifdef CONFIG_MATHEMU
} else if (opcode[0] == 0xb3) {
if (get_user(*((__u16 *) (opcode+2)), location+1))
@@ -219,11 +230,13 @@ void __kprobes illegal_op(struct pt_regs *regs)
#endif
} else
signal = SIGILL;
- } else {
- /*
- * If we get an illegal op in kernel mode, send it through the
- * kprobes notifier. If kprobes doesn't pick it up, SIGILL
- */
+ }
+ /*
+ * We got either an illegal op in kernel mode, or user space trapped
+ * on a uprobes illegal instruction. See if kprobes or uprobes picks
+ * it up. If not, SIGILL.
+ */
+ if (is_uprobe_insn || !user_mode(regs)) {
if (notify_die(DIE_BPT, "bpt", regs, 0,
3, SIGTRAP) != NOTIFY_STOP)
signal = SIGILL;
@@ -292,6 +305,74 @@ DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
"specification exception");
#endif
+#ifdef CONFIG_64BIT
+int alloc_vector_registers(struct task_struct *tsk)
+{
+ __vector128 *vxrs;
+ int i;
+
+ /* Allocate vector register save area. */
+ vxrs = kzalloc(sizeof(__vector128) * __NUM_VXRS,
+ GFP_KERNEL|__GFP_REPEAT);
+ if (!vxrs)
+ return -ENOMEM;
+ preempt_disable();
+ if (tsk == current)
+ save_fp_regs(tsk->thread.fp_regs.fprs);
+ /* Copy the 16 floating point registers */
+ for (i = 0; i < 16; i++)
+ *(freg_t *) &vxrs[i] = tsk->thread.fp_regs.fprs[i];
+ tsk->thread.vxrs = vxrs;
+ if (tsk == current) {
+ __ctl_set_bit(0, 17);
+ restore_vx_regs(vxrs);
+ }
+ preempt_enable();
+ return 0;
+}
+
+void vector_exception(struct pt_regs *regs)
+{
+ int si_code, vic;
+
+ if (!MACHINE_HAS_VX) {
+ do_trap(regs, SIGILL, ILL_ILLOPN, "illegal operation");
+ return;
+ }
+
+ /* get vector interrupt code from fpc */
+ asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
+ vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
+ switch (vic) {
+ case 1: /* invalid vector operation */
+ si_code = FPE_FLTINV;
+ break;
+ case 2: /* division by zero */
+ si_code = FPE_FLTDIV;
+ break;
+ case 3: /* overflow */
+ si_code = FPE_FLTOVF;
+ break;
+ case 4: /* underflow */
+ si_code = FPE_FLTUND;
+ break;
+ case 5: /* inexact */
+ si_code = FPE_FLTRES;
+ break;
+ default: /* unknown cause */
+ si_code = 0;
+ }
+ do_trap(regs, SIGFPE, si_code, "vector exception");
+}
+
+static int __init disable_vector_extension(char *str)
+{
+ S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
+ return 1;
+}
+__setup("novx", disable_vector_extension);
+#endif
+
void data_exception(struct pt_regs *regs)
{
__u16 __user *location;
@@ -357,6 +438,18 @@ void data_exception(struct pt_regs *regs)
}
}
#endif
+#ifdef CONFIG_64BIT
+ /* Check for vector register enablement */
+ if (MACHINE_HAS_VX && !current->thread.vxrs &&
+ (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
+ alloc_vector_registers(current);
+ /* Vector data exception is suppressing, rewind psw. */
+ regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
+ clear_pt_regs_flag(regs, PIF_PER_TRAP);
+ return;
+ }
+#endif
+
if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
signal = SIGFPE;
else
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
new file mode 100644
index 000000000000..956f4f7a591c
--- /dev/null
+++ b/arch/s390/kernel/uprobes.c
@@ -0,0 +1,332 @@
+/*
+ * User-space Probes (UProbes) for s390
+ *
+ * Copyright IBM Corp. 2014
+ * Author(s): Jan Willeke,
+ */
+
+#include <linux/kprobes.h>
+#include <linux/uaccess.h>
+#include <linux/uprobes.h>
+#include <linux/compat.h>
+#include <linux/kdebug.h>
+#include <asm/switch_to.h>
+#include <asm/facility.h>
+#include <asm/dis.h>
+#include "entry.h"
+
+#define UPROBE_TRAP_NR UINT_MAX
+
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ unsigned long addr)
+{
+ return probe_is_prohibited_opcode(auprobe->insn);
+}
+
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ if (psw_bits(regs->psw).eaba == PSW_AMODE_24BIT)
+ return -EINVAL;
+ if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_AMODE_31BIT)
+ return -EINVAL;
+ clear_pt_regs_flag(regs, PIF_PER_TRAP);
+ auprobe->saved_per = psw_bits(regs->psw).r;
+ auprobe->saved_int_code = regs->int_code;
+ regs->int_code = UPROBE_TRAP_NR;
+ regs->psw.addr = current->utask->xol_vaddr;
+ set_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
+ update_cr_regs(current);
+ return 0;
+}
+
+bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
+{
+ struct pt_regs *regs = task_pt_regs(tsk);
+
+ if (regs->int_code != UPROBE_TRAP_NR)
+ return true;
+ return false;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ int fixup = probe_get_fixup_type(auprobe->insn);
+ struct uprobe_task *utask = current->utask;
+
+ clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
+ update_cr_regs(current);
+ psw_bits(regs->psw).r = auprobe->saved_per;
+ regs->int_code = auprobe->saved_int_code;
+
+ if (fixup & FIXUP_PSW_NORMAL)
+ regs->psw.addr += utask->vaddr - utask->xol_vaddr;
+ if (fixup & FIXUP_RETURN_REGISTER) {
+ int reg = (auprobe->insn[0] & 0xf0) >> 4;
+
+ regs->gprs[reg] += utask->vaddr - utask->xol_vaddr;
+ }
+ if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
+ int ilen = insn_length(auprobe->insn[0] >> 8);
+
+ if (regs->psw.addr - utask->xol_vaddr == ilen)
+ regs->psw.addr = utask->vaddr + ilen;
+ }
+ /* If per tracing was active generate trap */
+ if (regs->psw.mask & PSW_MASK_PER)
+ do_per_trap(regs);
+ return 0;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
+ void *data)
+{
+ struct die_args *args = data;
+ struct pt_regs *regs = args->regs;
+
+ if (!user_mode(regs))
+ return NOTIFY_DONE;
+ if (regs->int_code & 0x200) /* Trap during transaction */
+ return NOTIFY_DONE;
+ switch (val) {
+ case DIE_BPT:
+ if (uprobe_pre_sstep_notifier(regs))
+ return NOTIFY_STOP;
+ break;
+ case DIE_SSTEP:
+ if (uprobe_post_sstep_notifier(regs))
+ return NOTIFY_STOP;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ clear_thread_flag(TIF_UPROBE_SINGLESTEP);
+ regs->int_code = auprobe->saved_int_code;
+ regs->psw.addr = current->utask->vaddr;
+}
+
+unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
+ struct pt_regs *regs)
+{
+ unsigned long orig;
+
+ orig = regs->gprs[14];
+ regs->gprs[14] = trampoline;
+ return orig;
+}
+
+/* Instruction Emulation */
+
+static void adjust_psw_addr(psw_t *psw, unsigned long len)
+{
+ psw->addr = __rewind_psw(*psw, -len);
+}
+
+#define EMU_ILLEGAL_OP 1
+#define EMU_SPECIFICATION 2
+#define EMU_ADDRESSING 3
+
+#define emu_load_ril(ptr, output) \
+({ \
+ unsigned int mask = sizeof(*(ptr)) - 1; \
+ __typeof__(*(ptr)) input; \
+ int __rc = 0; \
+ \
+ if (!test_facility(34)) \
+ __rc = EMU_ILLEGAL_OP; \
+ else if ((u64 __force)ptr & mask) \
+ __rc = EMU_SPECIFICATION; \
+ else if (get_user(input, ptr)) \
+ __rc = EMU_ADDRESSING; \
+ else \
+ *(output) = input; \
+ __rc; \
+})
+
+#define emu_store_ril(ptr, input) \
+({ \
+ unsigned int mask = sizeof(*(ptr)) - 1; \
+ int __rc = 0; \
+ \
+ if (!test_facility(34)) \
+ __rc = EMU_ILLEGAL_OP; \
+ else if ((u64 __force)ptr & mask) \
+ __rc = EMU_SPECIFICATION; \
+ else if (put_user(*(input), ptr)) \
+ __rc = EMU_ADDRESSING; \
+ __rc; \
+})
+
+#define emu_cmp_ril(regs, ptr, cmp) \
+({ \
+ unsigned int mask = sizeof(*(ptr)) - 1; \
+ __typeof__(*(ptr)) input; \
+ int __rc = 0; \
+ \
+ if (!test_facility(34)) \
+ __rc = EMU_ILLEGAL_OP; \
+ else if ((u64 __force)ptr & mask) \
+ __rc = EMU_SPECIFICATION; \
+ else if (get_user(input, ptr)) \
+ __rc = EMU_ADDRESSING; \
+ else if (input > *(cmp)) \
+ psw_bits((regs)->psw).cc = 1; \
+ else if (input < *(cmp)) \
+ psw_bits((regs)->psw).cc = 2; \
+ else \
+ psw_bits((regs)->psw).cc = 0; \
+ __rc; \
+})
+
+struct insn_ril {
+ u8 opc0;
+ u8 reg : 4;
+ u8 opc1 : 4;
+ s32 disp;
+} __packed;
+
+union split_register {
+ u64 u64;
+ u32 u32[2];
+ u16 u16[4];
+ s64 s64;
+ s32 s32[2];
+ s16 s16[4];
+};
+
+/*
+ * pc relative instructions are emulated, since parameters may not be
+ * accessible from the xol area due to range limitations.
+ */
+static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ union split_register *rx;
+ struct insn_ril *insn;
+ unsigned int ilen;
+ void *uptr;
+ int rc = 0;
+
+ insn = (struct insn_ril *) &auprobe->insn;
+ rx = (union split_register *) &regs->gprs[insn->reg];
+ uptr = (void *)(regs->psw.addr + (insn->disp * 2));
+ ilen = insn_length(insn->opc0);
+
+ switch (insn->opc0) {
+ case 0xc0:
+ switch (insn->opc1) {
+ case 0x00: /* larl */
+ rx->u64 = (unsigned long)uptr;
+ break;
+ }
+ break;
+ case 0xc4:
+ switch (insn->opc1) {
+ case 0x02: /* llhrl */
+ rc = emu_load_ril((u16 __user *)uptr, &rx->u32[1]);
+ break;
+ case 0x04: /* lghrl */
+ rc = emu_load_ril((s16 __user *)uptr, &rx->u64);
+ break;
+ case 0x05: /* lhrl */
+ rc = emu_load_ril((s16 __user *)uptr, &rx->u32[1]);
+ break;
+ case 0x06: /* llghrl */
+ rc = emu_load_ril((u16 __user *)uptr, &rx->u64);
+ break;
+ case 0x08: /* lgrl */
+ rc = emu_load_ril((u64 __user *)uptr, &rx->u64);
+ break;
+ case 0x0c: /* lgfrl */
+ rc = emu_load_ril((s32 __user *)uptr, &rx->u64);
+ break;
+ case 0x0d: /* lrl */
+ rc = emu_load_ril((u32 __user *)uptr, &rx->u32[1]);
+ break;
+ case 0x0e: /* llgfrl */
+ rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
+ break;
+ case 0x07: /* sthrl */
+ rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]);
+ break;
+ case 0x0b: /* stgrl */
+ rc = emu_store_ril((u64 __user *)uptr, &rx->u64);
+ break;
+ case 0x0f: /* strl */
+ rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]);
+ break;
+ }
+ break;
+ case 0xc6:
+ switch (insn->opc1) {
+ case 0x02: /* pfdrl */
+ if (!test_facility(34))
+ rc = EMU_ILLEGAL_OP;
+ break;
+ case 0x04: /* cghrl */
+ rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64);
+ break;
+ case 0x05: /* chrl */
+ rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s32[1]);
+ break;
+ case 0x06: /* clghrl */
+ rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u64);
+ break;
+ case 0x07: /* clhrl */
+ rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u32[1]);
+ break;
+ case 0x08: /* cgrl */
+ rc = emu_cmp_ril(regs, (s64 __user *)uptr, &rx->s64);
+ break;
+ case 0x0a: /* clgrl */
+ rc = emu_cmp_ril(regs, (u64 __user *)uptr, &rx->u64);
+ break;
+ case 0x0c: /* cgfrl */
+ rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s64);
+ break;
+ case 0x0d: /* crl */
+ rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s32[1]);
+ break;
+ case 0x0e: /* clgfrl */
+ rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u64);
+ break;
+ case 0x0f: /* clrl */
+ rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
+ break;
+ }
+ break;
+ }
+ adjust_psw_addr(&regs->psw, ilen);
+ switch (rc) {
+ case EMU_ILLEGAL_OP:
+ regs->int_code = ilen << 16 | 0x0001;
+ do_report_trap(regs, SIGILL, ILL_ILLOPC, NULL);
+ break;
+ case EMU_SPECIFICATION:
+ regs->int_code = ilen << 16 | 0x0006;
+ do_report_trap(regs, SIGILL, ILL_ILLOPC , NULL);
+ break;
+ case EMU_ADDRESSING:
+ regs->int_code = ilen << 16 | 0x0005;
+ do_report_trap(regs, SIGSEGV, SEGV_MAPERR, NULL);
+ break;
+ }
+}
+
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ if ((psw_bits(regs->psw).eaba == PSW_AMODE_24BIT) ||
+ ((psw_bits(regs->psw).eaba == PSW_AMODE_31BIT) &&
+ !is_compat_task())) {
+ regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE);
+ do_report_trap(regs, SIGILL, ILL_ILLADR, NULL);
+ return true;
+ }
+ if (probe_is_insn_relative_long(auprobe->insn)) {
+ handle_insn_ril(auprobe, regs);
+ return true;
+ }
+ return false;
+}
diff --git a/arch/s390/kernel/vdso32/clock_getres.S b/arch/s390/kernel/vdso32/clock_getres.S
index 36aaa25d05da..eca3f001f081 100644
--- a/arch/s390/kernel/vdso32/clock_getres.S
+++ b/arch/s390/kernel/vdso32/clock_getres.S
@@ -19,14 +19,20 @@
.type __kernel_clock_getres,@function
__kernel_clock_getres:
.cfi_startproc
+ basr %r1,0
+ la %r1,4f-.(%r1)
chi %r2,__CLOCK_REALTIME
je 0f
chi %r2,__CLOCK_MONOTONIC
+ je 0f
+ la %r1,5f-4f(%r1)
+ chi %r2,__CLOCK_REALTIME_COARSE
+ je 0f
+ chi %r2,__CLOCK_MONOTONIC_COARSE
jne 3f
0: ltr %r3,%r3
jz 2f /* res == NULL */
- basr %r1,0
-1: l %r0,4f-1b(%r1)
+1: l %r0,0(%r1)
xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */
st %r0,4(%r3) /* store tp->tv_usec */
2: lhi %r2,0
@@ -35,5 +41,6 @@ __kernel_clock_getres:
svc 0
br %r14
4: .long __CLOCK_REALTIME_RES
+5: .long __CLOCK_COARSE_RES
.cfi_endproc
.size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 7cf18f8d4cb4..48c2206a3956 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -21,8 +21,12 @@ __kernel_clock_gettime:
.cfi_startproc
basr %r5,0
0: al %r5,21f-0b(%r5) /* get &_vdso_data */
+ chi %r2,__CLOCK_REALTIME_COARSE
+ je 10f
chi %r2,__CLOCK_REALTIME
je 11f
+ chi %r2,__CLOCK_MONOTONIC_COARSE
+ je 9f
chi %r2,__CLOCK_MONOTONIC
jne 19f
@@ -30,8 +34,8 @@ __kernel_clock_gettime:
1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 1b
- stck 24(%r15) /* Store TOD clock */
- lm %r0,%r1,24(%r15)
+ stcke 24(%r15) /* Store TOD clock */
+ lm %r0,%r1,25(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,2f
@@ -68,12 +72,32 @@ __kernel_clock_gettime:
lhi %r2,0
br %r14
+ /* CLOCK_MONOTONIC_COARSE */
+9: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
+ tml %r4,0x0001 /* pending update ? loop */
+ jnz 9b
+ l %r2,__VDSO_WTOM_CRS_SEC+4(%r5)
+ l %r1,__VDSO_WTOM_CRS_NSEC+4(%r5)
+ cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
+ jne 9b
+ j 8b
+
+ /* CLOCK_REALTIME_COARSE */
+10: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
+ tml %r4,0x0001 /* pending update ? loop */
+ jnz 10b
+ l %r2,__VDSO_XTIME_CRS_SEC+4(%r5)
+ l %r1,__VDSO_XTIME_CRS_NSEC+4(%r5)
+ cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
+ jne 10b
+ j 17f
+
/* CLOCK_REALTIME */
11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 11b
- stck 24(%r15) /* Store TOD clock */
- lm %r0,%r1,24(%r15)
+ stcke 24(%r15) /* Store TOD clock */
+ lm %r0,%r1,25(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,12f
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index fd621a950f7c..60def5f562db 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -29,8 +29,8 @@ __kernel_gettimeofday:
l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 1b
- stck 24(%r15) /* Store TOD clock */
- lm %r0,%r1,24(%r15)
+ stcke 24(%r15) /* Store TOD clock */
+ lm %r0,%r1,25(%r15)
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,3f
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index 34deba7c7ed1..c8513deb8c66 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -19,6 +19,12 @@
.type __kernel_clock_getres,@function
__kernel_clock_getres:
.cfi_startproc
+ larl %r1,4f
+ cghi %r2,__CLOCK_REALTIME_COARSE
+ je 0f
+ cghi %r2,__CLOCK_MONOTONIC_COARSE
+ je 0f
+ larl %r1,3f
cghi %r2,__CLOCK_REALTIME
je 0f
cghi %r2,__CLOCK_MONOTONIC
@@ -32,7 +38,6 @@ __kernel_clock_getres:
jz 2f
0: ltgr %r3,%r3
jz 1f /* res == NULL */
- larl %r1,3f
lg %r0,0(%r1)
xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */
stg %r0,8(%r3) /* store tp->tv_usec */
@@ -42,5 +47,6 @@ __kernel_clock_getres:
svc 0
br %r14
3: .quad __CLOCK_REALTIME_RES
+4: .quad __CLOCK_COARSE_RES
.cfi_endproc
.size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 3f34e09db5f4..9d9761f8e110 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -20,12 +20,16 @@
__kernel_clock_gettime:
.cfi_startproc
larl %r5,_vdso_data
+ cghi %r2,__CLOCK_REALTIME_COARSE
+ je 4f
cghi %r2,__CLOCK_REALTIME
je 5f
cghi %r2,__CLOCK_THREAD_CPUTIME_ID
je 9f
cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
je 9f
+ cghi %r2,__CLOCK_MONOTONIC_COARSE
+ je 3f
cghi %r2,__CLOCK_MONOTONIC
jne 12f
@@ -33,10 +37,10 @@ __kernel_clock_gettime:
0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 0b
- stck 48(%r15) /* Store TOD clock */
+ stcke 48(%r15) /* Store TOD clock */
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
lg %r0,__VDSO_WTOM_SEC(%r5)
- lg %r1,48(%r15)
+ lg %r1,49(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_WTOM_NSEC(%r5)
@@ -54,13 +58,33 @@ __kernel_clock_gettime:
lghi %r2,0
br %r14
+ /* CLOCK_MONOTONIC_COARSE */
+3: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
+ tmll %r4,0x0001 /* pending update ? loop */
+ jnz 3b
+ lg %r0,__VDSO_WTOM_CRS_SEC(%r5)
+ lg %r1,__VDSO_WTOM_CRS_NSEC(%r5)
+ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
+ jne 3b
+ j 2b
+
+ /* CLOCK_REALTIME_COARSE */
+4: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
+ tmll %r4,0x0001 /* pending update ? loop */
+ jnz 4b
+ lg %r0,__VDSO_XTIME_CRS_SEC(%r5)
+ lg %r1,__VDSO_XTIME_CRS_NSEC(%r5)
+ clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
+ jne 4b
+ j 7f
+
/* CLOCK_REALTIME */
5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 5b
- stck 48(%r15) /* Store TOD clock */
+ stcke 48(%r15) /* Store TOD clock */
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
- lg %r1,48(%r15)
+ lg %r1,49(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index d0860d1d0ccc..7a344995a97f 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -28,8 +28,8 @@ __kernel_gettimeofday:
lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 0b
- stck 48(%r15) /* Store TOD clock */
- lg %r1,48(%r15)
+ stcke 48(%r15) /* Store TOD clock */
+ lg %r1,49(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 8c34363d6f1e..416f2a323ba5 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -6,27 +6,18 @@
*/
#include <linux/kernel_stat.h>
-#include <linux/notifier.h>
-#include <linux/kprobes.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/timex.h>
#include <linux/types.h>
#include <linux/time.h>
-#include <linux/cpu.h>
-#include <linux/smp.h>
-#include <asm/irq_regs.h>
#include <asm/cputime.h>
#include <asm/vtimer.h>
#include <asm/vtime.h>
-#include <asm/irq.h>
-#include "entry.h"
static void virt_timer_expire(void);
-DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
-
static LIST_HEAD(virt_timer_list);
static DEFINE_SPINLOCK(virt_timer_lock);
static atomic64_t virt_timer_current;
@@ -152,49 +143,6 @@ void vtime_account_system(struct task_struct *tsk)
__attribute__((alias("vtime_account_irq_enter")));
EXPORT_SYMBOL_GPL(vtime_account_system);
-void __kprobes vtime_stop_cpu(void)
-{
- struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
- unsigned long long idle_time;
- unsigned long psw_mask;
-
- trace_hardirqs_on();
-
- /* Wait for external, I/O or machine check interrupt. */
- psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
- PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
- idle->nohz_delay = 0;
-
- /* Call the assembler magic in entry.S */
- psw_idle(idle, psw_mask);
-
- /* Account time spent with enabled wait psw loaded as idle time. */
- idle->sequence++;
- smp_wmb();
- idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
- idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
- idle->idle_time += idle_time;
- idle->idle_count++;
- account_idle_time(idle_time);
- smp_wmb();
- idle->sequence++;
-}
-
-cputime64_t s390_get_idle_time(int cpu)
-{
- struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
- unsigned long long now, idle_enter, idle_exit;
- unsigned int sequence;
-
- do {
- now = get_tod_clock();
- sequence = ACCESS_ONCE(idle->sequence);
- idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
- idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
- } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
- return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
-}
-
/*
* Sorted add to a list. List is linear searched until first bigger
* element is found.
@@ -372,31 +320,8 @@ EXPORT_SYMBOL(del_virt_timer);
/*
* Start the virtual CPU timer on the current CPU.
*/
-void init_cpu_vtimer(void)
+void vtime_init(void)
{
/* set initial cpu timer */
set_vtimer(VTIMER_MAX_SLICE);
}
-
-static int s390_nohz_notify(struct notifier_block *self, unsigned long action,
- void *hcpu)
-{
- struct s390_idle_data *idle;
- long cpu = (long) hcpu;
-
- idle = &per_cpu(s390_idle, cpu);
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DYING:
- idle->nohz_delay = 0;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-void __init vtime_init(void)
-{
- /* Enable cpu timer interrupts on the boot cpu. */
- init_cpu_vtimer();
- cpu_notifier(s390_nohz_notify, 0);
-}
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index c6d752e8bf28..a01df233856f 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -6,3 +6,5 @@ lib-y += delay.o string.o uaccess.o find.o
obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
obj-$(CONFIG_64BIT) += mem64.o
lib-$(CONFIG_SMP) += spinlock.o
+lib-$(CONFIG_KPROBES) += probes.o
+lib-$(CONFIG_UPROBES) += probes.o
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index a9f3d0042d58..16dc42d83f93 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -43,7 +43,7 @@ static void __udelay_disabled(unsigned long long usecs)
lockdep_off();
do {
set_clock_comparator(end);
- vtime_stop_cpu();
+ enabled_wait();
} while (get_tod_clock_fast() < end);
lockdep_on();
__ctl_load(cr0, 0, 0);
@@ -62,7 +62,7 @@ static void __udelay_enabled(unsigned long long usecs)
clock_saved = local_tick_disable();
set_clock_comparator(end);
}
- vtime_stop_cpu();
+ enabled_wait();
if (clock_saved)
local_tick_enable(clock_saved);
} while (get_tod_clock_fast() < end);
diff --git a/arch/s390/lib/probes.c b/arch/s390/lib/probes.c
new file mode 100644
index 000000000000..c5d64a099719
--- /dev/null
+++ b/arch/s390/lib/probes.c
@@ -0,0 +1,159 @@
+/*
+ * Common helper functions for kprobes and uprobes
+ *
+ * Copyright IBM Corp. 2014
+ */
+
+#include <linux/kprobes.h>
+#include <asm/dis.h>
+
+int probe_is_prohibited_opcode(u16 *insn)
+{
+ if (!is_known_insn((unsigned char *)insn))
+ return -EINVAL;
+ switch (insn[0] >> 8) {
+ case 0x0c: /* bassm */
+ case 0x0b: /* bsm */
+ case 0x83: /* diag */
+ case 0x44: /* ex */
+ case 0xac: /* stnsm */
+ case 0xad: /* stosm */
+ return -EINVAL;
+ case 0xc6:
+ switch (insn[0] & 0x0f) {
+ case 0x00: /* exrl */
+ return -EINVAL;
+ }
+ }
+ switch (insn[0]) {
+ case 0x0101: /* pr */
+ case 0xb25a: /* bsa */
+ case 0xb240: /* bakr */
+ case 0xb258: /* bsg */
+ case 0xb218: /* pc */
+ case 0xb228: /* pt */
+ case 0xb98d: /* epsw */
+ case 0xe560: /* tbegin */
+ case 0xe561: /* tbeginc */
+ case 0xb2f8: /* tend */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int probe_get_fixup_type(u16 *insn)
+{
+ /* default fixup method */
+ int fixup = FIXUP_PSW_NORMAL;
+
+ switch (insn[0] >> 8) {
+ case 0x05: /* balr */
+ case 0x0d: /* basr */
+ fixup = FIXUP_RETURN_REGISTER;
+ /* if r2 = 0, no branch will be taken */
+ if ((insn[0] & 0x0f) == 0)
+ fixup |= FIXUP_BRANCH_NOT_TAKEN;
+ break;
+ case 0x06: /* bctr */
+ case 0x07: /* bcr */
+ fixup = FIXUP_BRANCH_NOT_TAKEN;
+ break;
+ case 0x45: /* bal */
+ case 0x4d: /* bas */
+ fixup = FIXUP_RETURN_REGISTER;
+ break;
+ case 0x47: /* bc */
+ case 0x46: /* bct */
+ case 0x86: /* bxh */
+ case 0x87: /* bxle */
+ fixup = FIXUP_BRANCH_NOT_TAKEN;
+ break;
+ case 0x82: /* lpsw */
+ fixup = FIXUP_NOT_REQUIRED;
+ break;
+ case 0xb2: /* lpswe */
+ if ((insn[0] & 0xff) == 0xb2)
+ fixup = FIXUP_NOT_REQUIRED;
+ break;
+ case 0xa7: /* bras */
+ if ((insn[0] & 0x0f) == 0x05)
+ fixup |= FIXUP_RETURN_REGISTER;
+ break;
+ case 0xc0:
+ if ((insn[0] & 0x0f) == 0x05) /* brasl */
+ fixup |= FIXUP_RETURN_REGISTER;
+ break;
+ case 0xeb:
+ switch (insn[2] & 0xff) {
+ case 0x44: /* bxhg */
+ case 0x45: /* bxleg */
+ fixup = FIXUP_BRANCH_NOT_TAKEN;
+ break;
+ }
+ break;
+ case 0xe3: /* bctg */
+ if ((insn[2] & 0xff) == 0x46)
+ fixup = FIXUP_BRANCH_NOT_TAKEN;
+ break;
+ case 0xec:
+ switch (insn[2] & 0xff) {
+ case 0xe5: /* clgrb */
+ case 0xe6: /* cgrb */
+ case 0xf6: /* crb */
+ case 0xf7: /* clrb */
+ case 0xfc: /* cgib */
+ case 0xfd: /* cglib */
+ case 0xfe: /* cib */
+ case 0xff: /* clib */
+ fixup = FIXUP_BRANCH_NOT_TAKEN;
+ break;
+ }
+ break;
+ }
+ return fixup;
+}
+
+int probe_is_insn_relative_long(u16 *insn)
+{
+ /* Check if we have a RIL-b or RIL-c format instruction which
+ * we need to modify in order to avoid instruction emulation. */
+ switch (insn[0] >> 8) {
+ case 0xc0:
+ if ((insn[0] & 0x0f) == 0x00) /* larl */
+ return true;
+ break;
+ case 0xc4:
+ switch (insn[0] & 0x0f) {
+ case 0x02: /* llhrl */
+ case 0x04: /* lghrl */
+ case 0x05: /* lhrl */
+ case 0x06: /* llghrl */
+ case 0x07: /* sthrl */
+ case 0x08: /* lgrl */
+ case 0x0b: /* stgrl */
+ case 0x0c: /* lgfrl */
+ case 0x0d: /* lrl */
+ case 0x0e: /* llgfrl */
+ case 0x0f: /* strl */
+ return true;
+ }
+ break;
+ case 0xc6:
+ switch (insn[0] & 0x0f) {
+ case 0x02: /* pfdrl */
+ case 0x04: /* cghrl */
+ case 0x05: /* chrl */
+ case 0x06: /* clghrl */
+ case 0x07: /* clhrl */
+ case 0x08: /* cgrl */
+ case 0x0a: /* clgrl */
+ case 0x0c: /* cgfrl */
+ case 0x0d: /* crl */
+ case 0x0e: /* clgfrl */
+ case 0x0f: /* clrl */
+ return true;
+ }
+ break;
+ }
+ return false;
+}
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 5b0e445bc3f3..034a35a3e9c1 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -98,17 +98,6 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
}
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
-void arch_spin_relax(arch_spinlock_t *lp)
-{
- unsigned int cpu = lp->lock;
- if (cpu != 0) {
- if (MACHINE_IS_VM || MACHINE_IS_KVM ||
- !smp_vcpu_scheduled(~cpu))
- smp_yield_cpu(~cpu);
- }
-}
-EXPORT_SYMBOL(arch_spin_relax);
-
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
int count;
@@ -122,15 +111,21 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
void _raw_read_lock_wait(arch_rwlock_t *rw)
{
- unsigned int old;
+ unsigned int owner, old;
int count = spin_retry;
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
+#endif
+ owner = 0;
while (1) {
if (count-- <= 0) {
- smp_yield();
+ if (owner && !smp_vcpu_scheduled(~owner))
+ smp_yield_cpu(~owner);
count = spin_retry;
}
old = ACCESS_ONCE(rw->lock);
+ owner = ACCESS_ONCE(rw->owner);
if ((int) old < 0)
continue;
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
@@ -139,28 +134,6 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_lock_wait);
-void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
-{
- unsigned int old;
- int count = spin_retry;
-
- local_irq_restore(flags);
- while (1) {
- if (count-- <= 0) {
- smp_yield();
- count = spin_retry;
- }
- old = ACCESS_ONCE(rw->lock);
- if ((int) old < 0)
- continue;
- local_irq_disable();
- if (_raw_compare_and_swap(&rw->lock, old, old + 1))
- return;
- local_irq_restore(flags);
- }
-}
-EXPORT_SYMBOL(_raw_read_lock_wait_flags);
-
int _raw_read_trylock_retry(arch_rwlock_t *rw)
{
unsigned int old;
@@ -177,46 +150,62 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_trylock_retry);
-void _raw_write_lock_wait(arch_rwlock_t *rw)
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
{
- unsigned int old;
+ unsigned int owner, old;
int count = spin_retry;
+ owner = 0;
while (1) {
if (count-- <= 0) {
- smp_yield();
+ if (owner && !smp_vcpu_scheduled(~owner))
+ smp_yield_cpu(~owner);
count = spin_retry;
}
old = ACCESS_ONCE(rw->lock);
- if (old)
- continue;
- if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
- return;
+ owner = ACCESS_ONCE(rw->owner);
+ smp_rmb();
+ if ((int) old >= 0) {
+ prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
+ old = prev;
+ }
+ if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
+ break;
}
}
EXPORT_SYMBOL(_raw_write_lock_wait);
-void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+void _raw_write_lock_wait(arch_rwlock_t *rw)
{
- unsigned int old;
+ unsigned int owner, old, prev;
int count = spin_retry;
- local_irq_restore(flags);
+ prev = 0x80000000;
+ owner = 0;
while (1) {
if (count-- <= 0) {
- smp_yield();
+ if (owner && !smp_vcpu_scheduled(~owner))
+ smp_yield_cpu(~owner);
count = spin_retry;
}
old = ACCESS_ONCE(rw->lock);
- if (old)
- continue;
- local_irq_disable();
- if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
- return;
- local_irq_restore(flags);
+ owner = ACCESS_ONCE(rw->owner);
+ if ((int) old >= 0 &&
+ _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
+ prev = old;
+ else
+ smp_rmb();
+ if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
+ break;
}
}
-EXPORT_SYMBOL(_raw_write_lock_wait_flags);
+EXPORT_SYMBOL(_raw_write_lock_wait);
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
int _raw_write_trylock_retry(arch_rwlock_t *rw)
{
@@ -233,3 +222,13 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
return 0;
}
EXPORT_SYMBOL(_raw_write_trylock_retry);
+
+void arch_lock_relax(unsigned int cpu)
+{
+ if (!cpu)
+ return;
+ if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu))
+ return;
+ smp_yield_cpu(~cpu);
+}
+EXPORT_SYMBOL(arch_lock_relax);
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 46d517c3c763..d46cadeda204 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -54,7 +54,6 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
return;
}
seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
- seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " ");
seq_putc(m, '\n');
}
@@ -129,7 +128,7 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
}
#ifdef CONFIG_64BIT
-#define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO)
+#define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT
#else
#define _PMD_PROT_MASK 0
#endif
@@ -157,7 +156,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
}
#ifdef CONFIG_64BIT
-#define _PUD_PROT_MASK (_REGION3_ENTRY_RO | _REGION3_ENTRY_CO)
+#define _PUD_PROT_MASK _REGION3_ENTRY_RO
#else
#define _PUD_PROT_MASK 0
#endif
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 389bc17934b7..3c80d2e38f03 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -88,7 +88,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) |= pte_page(pte)[1].index;
} else
- pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
+ pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
*(pmd_t *) ptep = pmd;
}
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 8400f494623f..3fef3b299665 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
+#include <asm/facility.h>
#include <asm/pgtable.h>
#include <asm/page.h>
@@ -103,27 +104,50 @@ int set_memory_x(unsigned long addr, int numpages)
}
#ifdef CONFIG_DEBUG_PAGEALLOC
+
+static void ipte_range(pte_t *pte, unsigned long address, int nr)
+{
+ int i;
+
+ if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) {
+ __ptep_ipte_range(address, nr - 1, pte);
+ return;
+ }
+ for (i = 0; i < nr; i++) {
+ __ptep_ipte(address, pte);
+ address += PAGE_SIZE;
+ pte++;
+ }
+}
+
void kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long address;
+ int nr, i, j;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
- int i;
- for (i = 0; i < numpages; i++) {
+ for (i = 0; i < numpages;) {
address = page_to_phys(page + i);
pgd = pgd_offset_k(address);
pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address);
- if (!enable) {
- __ptep_ipte(address, pte);
- pte_val(*pte) = _PAGE_INVALID;
- continue;
+ nr = (unsigned long)pte >> ilog2(sizeof(long));
+ nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
+ nr = min(numpages - i, nr);
+ if (enable) {
+ for (j = 0; j < nr; j++) {
+ pte_val(*pte) = __pa(address);
+ address += PAGE_SIZE;
+ pte++;
+ }
+ } else {
+ ipte_range(pte, address, nr);
}
- pte_val(*pte) = __pa(address);
+ i += nr;
}
}
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index fdbd7888cb07..b1593c2f751a 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -236,8 +236,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
if (!new_page)
goto out;
pmd_val(*pm_dir) = __pa(new_page) |
- _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
- _SEGMENT_ENTRY_CO;
+ _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
@@ -253,9 +252,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
pt_dir = pte_offset_kernel(pm_dir, address);
if (pte_none(*pt_dir)) {
- unsigned long new_page;
+ void *new_page;
- new_page =__pa(vmem_alloc_pages(0));
+ new_page = vmemmap_alloc_block(PAGE_SIZE, node);
if (!new_page)
goto out;
pte_val(*pt_dir) =
@@ -263,7 +262,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
}
address += PAGE_SIZE;
}
- memset((void *)start, 0, end - start);
ret = 0;
out:
return ret;
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index a273c88578fc..97a5fda83450 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -1,85 +1,56 @@
#ifndef __ASM_SH_ATOMIC_GRB_H
#define __ASM_SH_ATOMIC_GRB_H
-static inline void atomic_add(int i, atomic_t *v)
-{
- int tmp;
-
- __asm__ __volatile__ (
- " .align 2 \n\t"
- " mova 1f, r0 \n\t" /* r0 = end point */
- " mov r15, r1 \n\t" /* r1 = saved sp */
- " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
- " mov.l @%1, %0 \n\t" /* load old value */
- " add %2, %0 \n\t" /* add */
- " mov.l %0, @%1 \n\t" /* store new value */
- "1: mov r1, r15 \n\t" /* LOGOUT */
- : "=&r" (tmp),
- "+r" (v)
- : "r" (i)
- : "memory" , "r0", "r1");
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- int tmp;
-
- __asm__ __volatile__ (
- " .align 2 \n\t"
- " mova 1f, r0 \n\t" /* r0 = end point */
- " mov r15, r1 \n\t" /* r1 = saved sp */
- " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
- " mov.l @%1, %0 \n\t" /* load old value */
- " sub %2, %0 \n\t" /* sub */
- " mov.l %0, @%1 \n\t" /* store new value */
- "1: mov r1, r15 \n\t" /* LOGOUT */
- : "=&r" (tmp),
- "+r" (v)
- : "r" (i)
- : "memory" , "r0", "r1");
-}
-
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- int tmp;
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ int tmp; \
+ \
+ __asm__ __volatile__ ( \
+ " .align 2 \n\t" \
+ " mova 1f, r0 \n\t" /* r0 = end point */ \
+ " mov r15, r1 \n\t" /* r1 = saved sp */ \
+ " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
+ " mov.l @%1, %0 \n\t" /* load old value */ \
+ " " #op " %2, %0 \n\t" /* $op */ \
+ " mov.l %0, @%1 \n\t" /* store new value */ \
+ "1: mov r1, r15 \n\t" /* LOGOUT */ \
+ : "=&r" (tmp), \
+ "+r" (v) \
+ : "r" (i) \
+ : "memory" , "r0", "r1"); \
+} \
- __asm__ __volatile__ (
- " .align 2 \n\t"
- " mova 1f, r0 \n\t" /* r0 = end point */
- " mov r15, r1 \n\t" /* r1 = saved sp */
- " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
- " mov.l @%1, %0 \n\t" /* load old value */
- " add %2, %0 \n\t" /* add */
- " mov.l %0, @%1 \n\t" /* store new value */
- "1: mov r1, r15 \n\t" /* LOGOUT */
- : "=&r" (tmp),
- "+r" (v)
- : "r" (i)
- : "memory" , "r0", "r1");
-
- return tmp;
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int tmp; \
+ \
+ __asm__ __volatile__ ( \
+ " .align 2 \n\t" \
+ " mova 1f, r0 \n\t" /* r0 = end point */ \
+ " mov r15, r1 \n\t" /* r1 = saved sp */ \
+ " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
+ " mov.l @%1, %0 \n\t" /* load old value */ \
+ " " #op " %2, %0 \n\t" /* $op */ \
+ " mov.l %0, @%1 \n\t" /* store new value */ \
+ "1: mov r1, r15 \n\t" /* LOGOUT */ \
+ : "=&r" (tmp), \
+ "+r" (v) \
+ : "r" (i) \
+ : "memory" , "r0", "r1"); \
+ \
+ return tmp; \
}
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- int tmp;
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
- __asm__ __volatile__ (
- " .align 2 \n\t"
- " mova 1f, r0 \n\t" /* r0 = end point */
- " mov r15, r1 \n\t" /* r1 = saved sp */
- " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
- " mov.l @%1, %0 \n\t" /* load old value */
- " sub %2, %0 \n\t" /* sub */
- " mov.l %0, @%1 \n\t" /* store new value */
- "1: mov r1, r15 \n\t" /* LOGOUT */
- : "=&r" (tmp),
- "+r" (v)
- : "r" (i)
- : "memory", "r0", "r1");
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
- return tmp;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index 9f7c56609e53..61d107523f06 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -8,49 +8,39 @@
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
-static inline void atomic_add(int i, atomic_t *v)
-{
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter += i;
- raw_local_irq_restore(flags);
-}
-static inline void atomic_sub(int i, atomic_t *v)
-{
- unsigned long flags;
-
- raw_local_irq_save(flags);
- v->counter -= i;
- raw_local_irq_restore(flags);
+#define ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
}
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long temp, flags;
-
- raw_local_irq_save(flags);
- temp = v->counter;
- temp += i;
- v->counter = temp;
- raw_local_irq_restore(flags);
-
- return temp;
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long temp, flags; \
+ \
+ raw_local_irq_save(flags); \
+ temp = v->counter; \
+ temp c_op i; \
+ v->counter = temp; \
+ raw_local_irq_restore(flags); \
+ \
+ return temp; \
}
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long temp, flags;
+#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
- raw_local_irq_save(flags);
- temp = v->counter;
- temp -= i;
- v->counter = temp;
- raw_local_irq_restore(flags);
+ATOMIC_OPS(add, +=)
+ATOMIC_OPS(sub, -=)
- return temp;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index 4b00b78e3f4f..8575dccb9ef7 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -2,39 +2,6 @@
#define __ASM_SH_ATOMIC_LLSC_H
/*
- * To get proper branch prediction for the main line, we must branch
- * forward to code at the end of this object's .text section, then
- * branch back to restart the operation.
- */
-static inline void atomic_add(int i, atomic_t *v)
-{
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_add \n"
-" add %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (i), "r" (&v->counter)
- : "t");
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_sub \n"
-" sub %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (i), "r" (&v->counter)
- : "t");
-}
-
-/*
* SH-4A note:
*
* We basically get atomic_xxx_return() for free compared with
@@ -42,39 +9,53 @@ static inline void atomic_sub(int i, atomic_t *v)
* encoding, so the retval is automatically set without having to
* do any special work.
*/
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long temp;
+/*
+ * To get proper branch prediction for the main line, we must branch
+ * forward to code at the end of this object's .text section, then
+ * branch back to restart the operation.
+ */
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_add_return \n"
-" add %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
-" synco \n"
- : "=&z" (temp)
- : "r" (i), "r" (&v->counter)
- : "t");
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ \
+ __asm__ __volatile__ ( \
+"1: movli.l @%2, %0 ! atomic_" #op "\n" \
+" " #op " %1, %0 \n" \
+" movco.l %0, @%2 \n" \
+" bf 1b \n" \
+ : "=&z" (tmp) \
+ : "r" (i), "r" (&v->counter) \
+ : "t"); \
+}
- return temp;
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long temp; \
+ \
+ __asm__ __volatile__ ( \
+"1: movli.l @%2, %0 ! atomic_" #op "_return \n" \
+" " #op " %1, %0 \n" \
+" movco.l %0, @%2 \n" \
+" bf 1b \n" \
+" synco \n" \
+ : "=&z" (temp) \
+ : "r" (i), "r" (&v->counter) \
+ : "t"); \
+ \
+ return temp; \
}
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long temp;
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_sub_return \n"
-" sub %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
-" synco \n"
- : "=&z" (temp)
- : "r" (i), "r" (&v->counter)
- : "t");
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
- return temp;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index f57b8a6743b3..05b9f74ce2d5 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -14,7 +14,7 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
#if defined(CONFIG_GUSA_RB)
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index a537816613f9..96ac69c5eba0 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -67,6 +67,7 @@ config SPARC64
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_CONTEXT_TRACKING
select HAVE_DEBUG_KMEMLEAK
+ select SPARSE_IRQ
select RTC_DRV_CMOS
select RTC_DRV_BQ4802
select RTC_DRV_SUN4V
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 7aed2be45b44..765c1776ec9f 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -20,23 +20,22 @@
#define ATOMIC_INIT(i) { (i) }
-int __atomic_add_return(int, atomic_t *);
+int atomic_add_return(int, atomic_t *);
int atomic_cmpxchg(atomic_t *, int, int);
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
int __atomic_add_unless(atomic_t *, int, int);
void atomic_set(atomic_t *, int);
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
-#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
-#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
-#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
+#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
+#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
+#define atomic_inc(v) ((void)atomic_add_return( 1, (v)))
+#define atomic_dec(v) ((void)atomic_add_return( -1, (v)))
-#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
-#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
-#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
-#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
+#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v)))
+#define atomic_inc_return(v) (atomic_add_return( 1, (v)))
+#define atomic_dec_return(v) (atomic_add_return( -1, (v)))
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index bb894c8bec56..4082749913ce 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -14,33 +14,34 @@
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic64_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
#define atomic64_set(v, i) (((v)->counter) = i)
-void atomic_add(int, atomic_t *);
-void atomic64_add(long, atomic64_t *);
-void atomic_sub(int, atomic_t *);
-void atomic64_sub(long, atomic64_t *);
+#define ATOMIC_OP(op) \
+void atomic_##op(int, atomic_t *); \
+void atomic64_##op(long, atomic64_t *);
-int atomic_add_ret(int, atomic_t *);
-long atomic64_add_ret(long, atomic64_t *);
-int atomic_sub_ret(int, atomic_t *);
-long atomic64_sub_ret(long, atomic64_t *);
+#define ATOMIC_OP_RETURN(op) \
+int atomic_##op##_return(int, atomic_t *); \
+long atomic64_##op##_return(long, atomic64_t *);
-#define atomic_dec_return(v) atomic_sub_ret(1, v)
-#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
-#define atomic_inc_return(v) atomic_add_ret(1, v)
-#define atomic64_inc_return(v) atomic64_add_ret(1, v)
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
-#define atomic_sub_return(i, v) atomic_sub_ret(i, v)
-#define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
-#define atomic_add_return(i, v) atomic_add_ret(i, v)
-#define atomic64_add_return(i, v) atomic64_add_ret(i, v)
+#define atomic_dec_return(v) atomic_sub_return(1, v)
+#define atomic64_dec_return(v) atomic64_sub_return(1, v)
+
+#define atomic_inc_return(v) atomic_add_return(1, v)
+#define atomic64_inc_return(v) atomic64_add_return(1, v)
/*
* atomic_inc_and_test - increment and test
@@ -53,11 +54,11 @@ long atomic64_sub_ret(long, atomic64_t *);
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
-#define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0)
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+#define atomic64_sub_and_test(i, v) (atomic64_sub_return(i, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_ret(1, v) == 0)
-#define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+#define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
#define atomic_inc(v) atomic_add(1, v)
#define atomic64_inc(v) atomic64_add(1, v)
@@ -65,8 +66,8 @@ long atomic64_sub_ret(long, atomic64_t *);
#define atomic_dec(v) atomic_sub(1, v)
#define atomic64_dec(v) atomic64_sub(1, v)
-#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
-#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
+#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
+#define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 1ee02710b2dc..5b1b52a04ad6 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -20,10 +20,12 @@ extern struct bus_type pci_bus_type;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
-#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
+#ifdef CONFIG_SPARC_LEON
if (sparc_cpu_model == sparc_leon)
return leon_dma_ops;
- else if (dev->bus == &pci_bus_type)
+#endif
+#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
+ if (dev->bus == &pci_bus_type)
return &pci32_dma_ops;
#endif
return dma_ops;
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index 94b39caea3eb..4f6725ff4c33 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -2947,6 +2947,16 @@ unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
unsigned long reg_val);
#endif
+#define HV_FAST_T5_GET_PERFREG 0x1a8
+#define HV_FAST_T5_SET_PERFREG 0x1a9
+
+#ifndef __ASSEMBLY__
+unsigned long sun4v_t5_get_perfreg(unsigned long reg_num,
+ unsigned long *reg_val);
+unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
+ unsigned long reg_val);
+#endif
+
/* Function numbers for HV_CORE_TRAP. */
#define HV_CORE_SET_VER 0x00
#define HV_CORE_PUTCHAR 0x01
@@ -2978,6 +2988,7 @@ unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
#define HV_GRP_VF_CPU 0x0205
#define HV_GRP_KT_CPU 0x0209
#define HV_GRP_VT_CPU 0x020c
+#define HV_GRP_T5_CPU 0x0211
#define HV_GRP_DIAG 0x0300
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 91d219381306..3f70f900e834 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -37,7 +37,7 @@
*
* ino_bucket->irq allocation is made during {sun4v_,}build_irq().
*/
-#define NR_IRQS 255
+#define NR_IRQS (2048)
void irq_install_pre_handler(int irq,
void (*func)(unsigned int, void *, void *),
@@ -57,11 +57,8 @@ unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
unsigned long iclr_base);
void sun4u_destroy_msi(unsigned int irq);
-unsigned char irq_alloc(unsigned int dev_handle,
- unsigned int dev_ino);
-#ifdef CONFIG_PCI_MSI
+unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino);
void irq_free(unsigned int irq);
-#endif
void __init init_IRQ(void);
void fixup_irqs(void);
diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h
index c8c67f621f4f..58ab64de25d2 100644
--- a/arch/sparc/include/asm/ldc.h
+++ b/arch/sparc/include/asm/ldc.h
@@ -53,13 +53,14 @@ struct ldc_channel;
/* Allocate state for a channel. */
struct ldc_channel *ldc_alloc(unsigned long id,
const struct ldc_channel_config *cfgp,
- void *event_arg);
+ void *event_arg,
+ const char *name);
/* Shut down and free state for a channel. */
void ldc_free(struct ldc_channel *lp);
/* Register TX and RX queues of the link with the hypervisor. */
-int ldc_bind(struct ldc_channel *lp, const char *name);
+int ldc_bind(struct ldc_channel *lp);
/* For non-RAW protocols we need to complete a handshake before
* communication can proceed. ldc_connect() does that, if the
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index bf109984a032..8c2a8c937540 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -57,18 +57,21 @@ void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topa
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long iopte; } iopte_t;
typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pud; } pud_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
#define iopte_val(x) ((x).iopte)
#define pmd_val(x) ((x).pmd)
+#define pud_val(x) ((x).pud)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
#define __iopte(x) ((iopte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
+#define __pud(x) ((pud_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
@@ -77,18 +80,21 @@ typedef struct { unsigned long pgprot; } pgprot_t;
typedef unsigned long pte_t;
typedef unsigned long iopte_t;
typedef unsigned long pmd_t;
+typedef unsigned long pud_t;
typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;
#define pte_val(x) (x)
#define iopte_val(x) (x)
#define pmd_val(x) (x)
+#define pud_val(x) (x)
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
#define __pte(x) (x)
#define __iopte(x) (x)
#define __pmd(x) (x)
+#define __pud(x) (x)
#define __pgd(x) (x)
#define __pgprot(x) (x)
@@ -96,21 +102,14 @@ typedef unsigned long pgprot_t;
typedef pte_t *pgtable_t;
-/* These two values define the virtual address space range in which we
- * must forbid 64-bit user processes from making mappings. It used to
- * represent precisely the virtual address space hole present in most
- * early sparc64 chips including UltraSPARC-I. But now it also is
- * further constrained by the limits of our page tables, which is
- * 43-bits of virtual address.
- */
-#define SPARC64_VA_HOLE_TOP _AC(0xfffffc0000000000,UL)
-#define SPARC64_VA_HOLE_BOTTOM _AC(0x0000040000000000,UL)
+extern unsigned long sparc64_va_hole_top;
+extern unsigned long sparc64_va_hole_bottom;
/* The next two defines specify the actual exclusion region we
* enforce, wherein we use a 4GB red zone on each side of the VA hole.
*/
-#define VA_EXCLUDE_START (SPARC64_VA_HOLE_BOTTOM - (1UL << 32UL))
-#define VA_EXCLUDE_END (SPARC64_VA_HOLE_TOP + (1UL << 32UL))
+#define VA_EXCLUDE_START (sparc64_va_hole_bottom - (1UL << 32UL))
+#define VA_EXCLUDE_END (sparc64_va_hole_top + (1UL << 32UL))
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
_AC(0x0000000070000000,UL) : \
@@ -118,20 +117,16 @@ typedef pte_t *pgtable_t;
#include <asm-generic/memory_model.h>
-#define PAGE_OFFSET_BY_BITS(X) (-(_AC(1,UL) << (X)))
extern unsigned long PAGE_OFFSET;
#endif /* !(__ASSEMBLY__) */
-/* The maximum number of physical memory address bits we support, this
- * is used to size various tables used to manage kernel TLB misses and
- * also the sparsemem code.
+/* The maximum number of physical memory address bits we support. The
+ * largest value we can support is whatever "KPGD_SHIFT + KPTE_BITS"
+ * evaluates to.
*/
-#define MAX_PHYS_ADDRESS_BITS 47
+#define MAX_PHYS_ADDRESS_BITS 53
-/* These two shift counts are used when indexing sparc64_valid_addr_bitmap
- * and kpte_linear_bitmap.
- */
#define ILOG2_4MB 22
#define ILOG2_256MB 28
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
index 39a7ac49b00c..5e3187185b4a 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
@@ -15,6 +15,13 @@
extern struct kmem_cache *pgtable_cache;
+static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
+{
+ pgd_set(pgd, pud);
+}
+
+#define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
+
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
@@ -25,7 +32,23 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
kmem_cache_free(pgtable_cache, pgd);
}
-#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
+static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
+{
+ pud_set(pud, pmd);
+}
+
+#define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(pgtable_cache,
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ kmem_cache_free(pgtable_cache, pud);
+}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
@@ -91,4 +114,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pte_t *pte,
#define __pmd_free_tlb(tlb, pmd, addr) \
pgtable_free_tlb(tlb, pmd, false)
+#define __pud_free_tlb(tlb, pud, addr) \
+ pgtable_free_tlb(tlb, pud, false)
+
#endif /* _SPARC64_PGALLOC_H */
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 3770bf5c6e1b..bfeb626085ac 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -20,8 +20,6 @@
#include <asm/page.h>
#include <asm/processor.h>
-#include <asm-generic/pgtable-nopud.h>
-
/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
* The page copy blockops can use 0x6000000 to 0x8000000.
* The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
@@ -42,10 +40,7 @@
#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
#define VMALLOC_START _AC(0x0000000100000000,UL)
-#define VMALLOC_END _AC(0x0000010000000000,UL)
-#define VMEMMAP_BASE _AC(0x0000010000000000,UL)
-
-#define vmemmap ((struct page *)VMEMMAP_BASE)
+#define VMEMMAP_BASE VMALLOC_END
/* PMD_SHIFT determines the size of the area a second-level page
* table can map
@@ -55,13 +50,25 @@
#define PMD_MASK (~(PMD_SIZE-1))
#define PMD_BITS (PAGE_SHIFT - 3)
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
+/* PUD_SHIFT determines the size of the area a third-level page
+ * table can map
+ */
+#define PUD_SHIFT (PMD_SHIFT + PMD_BITS)
+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+#define PUD_BITS (PAGE_SHIFT - 3)
+
+/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
+#define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS)
#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PGDIR_BITS (PAGE_SHIFT - 3)
-#if (PGDIR_SHIFT + PGDIR_BITS) != 43
+#if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
+#error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
+#endif
+
+#if (PGDIR_SHIFT + PGDIR_BITS) != 53
#error Page table parameters do not cover virtual address space properly.
#endif
@@ -71,28 +78,18 @@
#ifndef __ASSEMBLY__
-#include <linux/sched.h>
-
-extern unsigned long sparc64_valid_addr_bitmap[];
+extern unsigned long VMALLOC_END;
-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
-static inline bool __kern_addr_valid(unsigned long paddr)
-{
- if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL)
- return false;
- return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap);
-}
+#define vmemmap ((struct page *)VMEMMAP_BASE)
-static inline bool kern_addr_valid(unsigned long addr)
-{
- unsigned long paddr = __pa(addr);
+#include <linux/sched.h>
- return __kern_addr_valid(paddr);
-}
+bool kern_addr_valid(unsigned long addr);
/* Entries per page directory level. */
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PMD (1UL << PMD_BITS)
+#define PTRS_PER_PUD (1UL << PUD_BITS)
#define PTRS_PER_PGD (1UL << PGDIR_BITS)
/* Kernel has a separate 44bit address space. */
@@ -101,6 +98,9 @@ static inline bool kern_addr_valid(unsigned long addr)
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
__FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
+#define pud_ERROR(e) \
+ pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n", \
+ __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
__FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
@@ -112,6 +112,7 @@ static inline bool kern_addr_valid(unsigned long addr)
#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
#define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */
#define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */
+#define _PAGE_PUD_HUGE _PAGE_PMD_HUGE
/* Advertise support for _PAGE_SPECIAL */
#define __HAVE_ARCH_PTE_SPECIAL
@@ -658,26 +659,26 @@ static inline unsigned long pmd_large(pmd_t pmd)
return pte_val(pte) & _PAGE_PMD_HUGE;
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline unsigned long pmd_young(pmd_t pmd)
+static inline unsigned long pmd_pfn(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
- return pte_young(pte);
+ return pte_pfn(pte);
}
-static inline unsigned long pmd_write(pmd_t pmd)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline unsigned long pmd_young(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
- return pte_write(pte);
+ return pte_young(pte);
}
-static inline unsigned long pmd_pfn(pmd_t pmd)
+static inline unsigned long pmd_write(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
- return pte_pfn(pte);
+ return pte_write(pte);
}
static inline unsigned long pmd_trans_huge(pmd_t pmd)
@@ -771,13 +772,15 @@ static inline int pmd_present(pmd_t pmd)
* the top bits outside of the range of any physical address size we
* support are clear as well. We also validate the physical itself.
*/
-#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) || \
- !__kern_addr_valid(pmd_val(pmd)))
+#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \
- !__kern_addr_valid(pud_val(pud)))
+#define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK)
+
+#define pgd_none(pgd) (!pgd_val(pgd))
+
+#define pgd_bad(pgd) (pgd_val(pgd) & ~PAGE_MASK)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
@@ -815,10 +818,31 @@ static inline unsigned long __pmd_page(pmd_t pmd)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
#define pud_present(pud) (pud_val(pud) != 0U)
#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
+#define pgd_page_vaddr(pgd) \
+ ((unsigned long) __va(pgd_val(pgd)))
+#define pgd_present(pgd) (pgd_val(pgd) != 0U)
+#define pgd_clear(pgdp) (pgd_val(*(pgd)) = 0UL)
+
+static inline unsigned long pud_large(pud_t pud)
+{
+ pte_t pte = __pte(pud_val(pud));
+
+ return pte_val(pte) & _PAGE_PMD_HUGE;
+}
+
+static inline unsigned long pud_pfn(pud_t pud)
+{
+ pte_t pte = __pte(pud_val(pud));
+
+ return pte_pfn(pte);
+}
/* Same in both SUN4V and SUN4U. */
#define pte_none(pte) (!pte_val(pte))
+#define pgd_set(pgdp, pudp) \
+ (pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp))))
+
/* to find an entry in a page-table-directory. */
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
@@ -826,6 +850,11 @@ static inline unsigned long __pmd_page(pmd_t pmd)
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+/* Find an entry in the third-level page table.. */
+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+#define pud_offset(pgdp, address) \
+ ((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address))
+
/* Find an entry in the second-level page table.. */
#define pmd_offset(pudp, address) \
((pmd_t *) pud_page_vaddr(*(pudp)) + \
@@ -898,7 +927,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
#endif
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-extern pmd_t swapper_low_pmd_dir[PTRS_PER_PMD];
void paging_init(void);
unsigned long find_ecache_flush_span(unsigned long size);
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
index 3fc58691dbd0..56f933816144 100644
--- a/arch/sparc/include/asm/spitfire.h
+++ b/arch/sparc/include/asm/spitfire.h
@@ -45,6 +45,8 @@
#define SUN4V_CHIP_NIAGARA3 0x03
#define SUN4V_CHIP_NIAGARA4 0x04
#define SUN4V_CHIP_NIAGARA5 0x05
+#define SUN4V_CHIP_SPARC_M6 0x06
+#define SUN4V_CHIP_SPARC_M7 0x07
#define SUN4V_CHIP_SPARC64X 0x8a
#define SUN4V_CHIP_UNKNOWN 0xff
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index a5f01ac6d0f1..f85dc8512ab3 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -102,6 +102,7 @@ struct thread_info {
#define FAULT_CODE_ITLB 0x04 /* Miss happened in I-TLB */
#define FAULT_CODE_WINFIXUP 0x08 /* Miss happened during spill/fill */
#define FAULT_CODE_BLKCOMMIT 0x10 /* Use blk-commit ASI in copy_page */
+#define FAULT_CODE_BAD_RA 0x20 /* Bad RA for sun4v */
#if PAGE_SHIFT == 13
#define THREAD_SIZE (2*PAGE_SIZE)
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
index 90916f955cac..ecb49cfa3be9 100644
--- a/arch/sparc/include/asm/tsb.h
+++ b/arch/sparc/include/asm/tsb.h
@@ -133,9 +133,24 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
sub TSB, 0x8, TSB; \
TSB_STORE(TSB, TAG);
- /* Do a kernel page table walk. Leaves physical PTE pointer in
- * REG1. Jumps to FAIL_LABEL on early page table walk termination.
- * VADDR will not be clobbered, but REG2 will.
+ /* Do a kernel page table walk. Leaves valid PTE value in
+ * REG1. Jumps to FAIL_LABEL on early page table walk
+ * termination. VADDR will not be clobbered, but REG2 will.
+ *
+ * There are two masks we must apply to propagate bits from
+ * the virtual address into the PTE physical address field
+ * when dealing with huge pages. This is because the page
+ * table boundaries do not match the huge page size(s) the
+ * hardware supports.
+ *
+ * In these cases we propagate the bits that are below the
+ * page table level where we saw the huge page mapping, but
+ * are still within the relevant physical bits for the huge
+ * page size in question. So for PMD mappings (which fall on
+ * bit 23, for 8MB per PMD) we must propagate bit 22 for a
+ * 4MB huge page. For huge PUDs (which fall on bit 33, for
+ * 8GB per PUD), we have to accomodate 256MB and 2GB huge
+ * pages. So for those we propagate bits 32 to 28.
*/
#define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \
sethi %hi(swapper_pg_dir), REG1; \
@@ -145,15 +160,40 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
andn REG2, 0x7, REG2; \
ldx [REG1 + REG2], REG1; \
brz,pn REG1, FAIL_LABEL; \
- sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
+ sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \
srlx REG2, 64 - PAGE_SHIFT, REG2; \
andn REG2, 0x7, REG2; \
ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
brz,pn REG1, FAIL_LABEL; \
- sllx VADDR, 64 - PMD_SHIFT, REG2; \
+ sethi %uhi(_PAGE_PUD_HUGE), REG2; \
+ brz,pn REG1, FAIL_LABEL; \
+ sllx REG2, 32, REG2; \
+ andcc REG1, REG2, %g0; \
+ sethi %hi(0xf8000000), REG2; \
+ bne,pt %xcc, 697f; \
+ sllx REG2, 1, REG2; \
+ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
srlx REG2, 64 - PAGE_SHIFT, REG2; \
andn REG2, 0x7, REG2; \
- add REG1, REG2, REG1;
+ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
+ sethi %uhi(_PAGE_PMD_HUGE), REG2; \
+ brz,pn REG1, FAIL_LABEL; \
+ sllx REG2, 32, REG2; \
+ andcc REG1, REG2, %g0; \
+ be,pn %xcc, 698f; \
+ sethi %hi(0x400000), REG2; \
+697: brgez,pn REG1, FAIL_LABEL; \
+ andn REG1, REG2, REG1; \
+ and VADDR, REG2, REG2; \
+ ba,pt %xcc, 699f; \
+ or REG1, REG2, REG1; \
+698: sllx VADDR, 64 - PMD_SHIFT, REG2; \
+ srlx REG2, 64 - PAGE_SHIFT, REG2; \
+ andn REG2, 0x7, REG2; \
+ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
+ brgez,pn REG1, FAIL_LABEL; \
+ nop; \
+699:
/* PMD has been loaded into REG1, interpret the value, seeing
* if it is a HUGE PMD or a normal one. If it is not valid
@@ -198,6 +238,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
andn REG2, 0x7, REG2; \
ldxa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
brz,pn REG1, FAIL_LABEL; \
+ sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \
+ srlx REG2, 64 - PAGE_SHIFT, REG2; \
+ andn REG2, 0x7, REG2; \
+ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
+ brz,pn REG1, FAIL_LABEL; \
sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
srlx REG2, 64 - PAGE_SHIFT, REG2; \
andn REG2, 0x7, REG2; \
@@ -246,8 +291,6 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
(KERNEL_TSB_SIZE_BYTES / 16)
#define KERNEL_TSB4M_NENTRIES 4096
-#define KTSB_PHYS_SHIFT 15
-
/* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
* on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries
* and the found TTE will be left in REG1. REG3 and REG4 must
@@ -256,17 +299,15 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
* VADDR and TAG will be preserved and not clobbered by this macro.
*/
#define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
-661: sethi %hi(swapper_tsb), REG1; \
- or REG1, %lo(swapper_tsb), REG1; \
+661: sethi %uhi(swapper_tsb), REG1; \
+ sethi %hi(swapper_tsb), REG2; \
+ or REG1, %ulo(swapper_tsb), REG1; \
+ or REG2, %lo(swapper_tsb), REG2; \
.section .swapper_tsb_phys_patch, "ax"; \
.word 661b; \
.previous; \
-661: nop; \
- .section .tsb_ldquad_phys_patch, "ax"; \
- .word 661b; \
- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
- .previous; \
+ sllx REG1, 32, REG1; \
+ or REG1, REG2, REG1; \
srlx VADDR, PAGE_SHIFT, REG2; \
and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
sllx REG2, 4, REG2; \
@@ -281,17 +322,15 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
* we can make use of that for the index computation.
*/
#define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
-661: sethi %hi(swapper_4m_tsb), REG1; \
- or REG1, %lo(swapper_4m_tsb), REG1; \
+661: sethi %uhi(swapper_4m_tsb), REG1; \
+ sethi %hi(swapper_4m_tsb), REG2; \
+ or REG1, %ulo(swapper_4m_tsb), REG1; \
+ or REG2, %lo(swapper_4m_tsb), REG2; \
.section .swapper_4m_tsb_phys_patch, "ax"; \
.word 661b; \
.previous; \
-661: nop; \
- .section .tsb_ldquad_phys_patch, "ax"; \
- .word 661b; \
- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
- sllx REG1, KTSB_PHYS_SHIFT, REG1; \
- .previous; \
+ sllx REG1, 32, REG1; \
+ or REG1, REG2, REG1; \
and TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \
sllx REG2, 4, REG2; \
add REG1, REG2, REG2; \
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 6b135a8ab07b..d758c8d8f47d 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -121,12 +121,18 @@ struct vio_disk_attr_info {
u8 vdisk_type;
#define VD_DISK_TYPE_SLICE 0x01 /* Slice in block device */
#define VD_DISK_TYPE_DISK 0x02 /* Entire block device */
- u16 resv1;
+ u8 vdisk_mtype; /* v1.1 */
+#define VD_MEDIA_TYPE_FIXED 0x01 /* Fixed device */
+#define VD_MEDIA_TYPE_CD 0x02 /* CD Device */
+#define VD_MEDIA_TYPE_DVD 0x03 /* DVD Device */
+ u8 resv1;
u32 vdisk_block_size;
u64 operations;
- u64 vdisk_size;
+ u64 vdisk_size; /* v1.1 */
u64 max_xfer_size;
- u64 resv2[2];
+ u32 phys_block_size; /* v1.2 */
+ u32 resv2;
+ u64 resv3[1];
};
struct vio_disk_desc {
@@ -272,7 +278,7 @@ static inline u32 vio_dring_avail(struct vio_dring_state *dr,
unsigned int ring_size)
{
return (dr->pending -
- ((dr->prod - dr->cons) & (ring_size - 1)));
+ ((dr->prod - dr->cons) & (ring_size - 1)) - 1);
}
#define VIO_MAX_TYPE_LEN 32
@@ -292,6 +298,7 @@ struct vio_dev {
unsigned int tx_irq;
unsigned int rx_irq;
+ u64 rx_ino;
struct device dev;
};
@@ -447,5 +454,6 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
char *name);
void vio_port_up(struct vio_driver_state *vio);
+int vio_set_intr(unsigned long dev_ino, int state);
#endif /* _SPARC64_VIO_H */
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 82a3a71c451e..dfad8b1aea9f 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -494,6 +494,18 @@ static void __init sun4v_cpu_probe(void)
sparc_pmu_type = "niagara5";
break;
+ case SUN4V_CHIP_SPARC_M6:
+ sparc_cpu_type = "SPARC-M6";
+ sparc_fpu_type = "SPARC-M6 integrated FPU";
+ sparc_pmu_type = "sparc-m6";
+ break;
+
+ case SUN4V_CHIP_SPARC_M7:
+ sparc_cpu_type = "SPARC-M7";
+ sparc_fpu_type = "SPARC-M7 integrated FPU";
+ sparc_pmu_type = "sparc-m7";
+ break;
+
case SUN4V_CHIP_SPARC64X:
sparc_cpu_type = "SPARC64-X";
sparc_fpu_type = "SPARC64-X integrated FPU";
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index de1c844dfabc..e69ec0e3f155 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -326,6 +326,8 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
case SUN4V_CHIP_NIAGARA3:
case SUN4V_CHIP_NIAGARA4:
case SUN4V_CHIP_NIAGARA5:
+ case SUN4V_CHIP_SPARC_M6:
+ case SUN4V_CHIP_SPARC_M7:
case SUN4V_CHIP_SPARC64X:
rover_inc_table = niagara_iterate_method;
break;
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index dff60abbea01..f87a55d77094 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -1200,14 +1200,14 @@ static int ds_probe(struct vio_dev *vdev, const struct vio_device_id *id)
ds_cfg.tx_irq = vdev->tx_irq;
ds_cfg.rx_irq = vdev->rx_irq;
- lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp);
+ lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp, "DS");
if (IS_ERR(lp)) {
err = PTR_ERR(lp);
goto out_free_ds_states;
}
dp->lp = lp;
- err = ldc_bind(lp, "DS");
+ err = ldc_bind(lp);
if (err)
goto out_free_ldc;
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 452f04fe8da6..4fdeb8040d4d 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -427,6 +427,12 @@ sun4v_chip_type:
cmp %g2, '5'
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA5, %g4
+ cmp %g2, '6'
+ be,pt %xcc, 5f
+ mov SUN4V_CHIP_SPARC_M6, %g4
+ cmp %g2, '7'
+ be,pt %xcc, 5f
+ mov SUN4V_CHIP_SPARC_M7, %g4
ba,pt %xcc, 49f
nop
@@ -585,6 +591,12 @@ niagara_tlb_fixup:
cmp %g1, SUN4V_CHIP_NIAGARA5
be,pt %xcc, niagara4_patch
nop
+ cmp %g1, SUN4V_CHIP_SPARC_M6
+ be,pt %xcc, niagara4_patch
+ nop
+ cmp %g1, SUN4V_CHIP_SPARC_M7
+ be,pt %xcc, niagara4_patch
+ nop
call generic_patch_copyops
nop
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
index c0a2de0fd624..5c55145bfbf0 100644
--- a/arch/sparc/kernel/hvapi.c
+++ b/arch/sparc/kernel/hvapi.c
@@ -46,6 +46,7 @@ static struct api_info api_table[] = {
{ .group = HV_GRP_VF_CPU, },
{ .group = HV_GRP_KT_CPU, },
{ .group = HV_GRP_VT_CPU, },
+ { .group = HV_GRP_T5_CPU, },
{ .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
};
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
index f3ab509b76a8..caedf8320416 100644
--- a/arch/sparc/kernel/hvcalls.S
+++ b/arch/sparc/kernel/hvcalls.S
@@ -821,3 +821,19 @@ ENTRY(sun4v_vt_set_perfreg)
retl
nop
ENDPROC(sun4v_vt_set_perfreg)
+
+ENTRY(sun4v_t5_get_perfreg)
+ mov %o1, %o4
+ mov HV_FAST_T5_GET_PERFREG, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%o4]
+ retl
+ nop
+ENDPROC(sun4v_t5_get_perfreg)
+
+ENTRY(sun4v_t5_set_perfreg)
+ mov HV_FAST_T5_SET_PERFREG, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(sun4v_t5_set_perfreg)
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 7f08ec8a7c68..28fed53b13a0 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -278,7 +278,8 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
}
order = get_order(len_total);
- if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0)
+ va = __get_free_pages(gfp, order);
+ if (va == 0)
goto err_nopages;
if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
@@ -443,7 +444,7 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
}
order = get_order(len_total);
- va = (void *) __get_free_pages(GFP_KERNEL, order);
+ va = (void *) __get_free_pages(gfp, order);
if (va == NULL) {
printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
goto err_nopages;
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 666193f4e8bb..4033c23bdfa6 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -47,8 +47,6 @@
#include "cpumap.h"
#include "kstack.h"
-#define NUM_IVECS (IMAP_INR + 1)
-
struct ino_bucket *ivector_table;
unsigned long ivector_table_pa;
@@ -107,55 +105,196 @@ static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
#define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
-static struct {
- unsigned int dev_handle;
- unsigned int dev_ino;
- unsigned int in_use;
-} irq_table[NR_IRQS];
-static DEFINE_SPINLOCK(irq_alloc_lock);
+static unsigned long hvirq_major __initdata;
+static int __init early_hvirq_major(char *p)
+{
+ int rc = kstrtoul(p, 10, &hvirq_major);
+
+ return rc;
+}
+early_param("hvirq", early_hvirq_major);
+
+static int hv_irq_version;
+
+/* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie
+ * based interfaces, but:
+ *
+ * 1) Several OSs, Solaris and Linux included, use them even when only
+ * negotiating version 1.0 (or failing to negotiate at all). So the
+ * hypervisor has a workaround that provides the VIRQ interfaces even
+ * when only verion 1.0 of the API is in use.
+ *
+ * 2) Second, and more importantly, with major version 2.0 these VIRQ
+ * interfaces only were actually hooked up for LDC interrupts, even
+ * though the Hypervisor specification clearly stated:
+ *
+ * The new interrupt API functions will be available to a guest
+ * when it negotiates version 2.0 in the interrupt API group 0x2. When
+ * a guest negotiates version 2.0, all interrupt sources will only
+ * support using the cookie interface, and any attempt to use the
+ * version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the
+ * ENOTSUPPORTED error being returned.
+ *
+ * with an emphasis on "all interrupt sources".
+ *
+ * To correct this, major version 3.0 was created which does actually
+ * support VIRQs for all interrupt sources (not just LDC devices). So
+ * if we want to move completely over the cookie based VIRQs we must
+ * negotiate major version 3.0 or later of HV_GRP_INTR.
+ */
+static bool sun4v_cookie_only_virqs(void)
+{
+ if (hv_irq_version >= 3)
+ return true;
+ return false;
+}
-unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
+static void __init irq_init_hv(void)
{
- unsigned long flags;
- unsigned char ent;
+ unsigned long hv_error, major, minor = 0;
+
+ if (tlb_type != hypervisor)
+ return;
- BUILD_BUG_ON(NR_IRQS >= 256);
+ if (hvirq_major)
+ major = hvirq_major;
+ else
+ major = 3;
- spin_lock_irqsave(&irq_alloc_lock, flags);
+ hv_error = sun4v_hvapi_register(HV_GRP_INTR, major, &minor);
+ if (!hv_error)
+ hv_irq_version = major;
+ else
+ hv_irq_version = 1;
- for (ent = 1; ent < NR_IRQS; ent++) {
- if (!irq_table[ent].in_use)
+ pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n",
+ hv_irq_version,
+ sun4v_cookie_only_virqs() ? "enabled" : "disabled");
+}
+
+/* This function is for the timer interrupt.*/
+int __init arch_probe_nr_irqs(void)
+{
+ return 1;
+}
+
+#define DEFAULT_NUM_IVECS (0xfffU)
+static unsigned int nr_ivec = DEFAULT_NUM_IVECS;
+#define NUM_IVECS (nr_ivec)
+
+static unsigned int __init size_nr_ivec(void)
+{
+ if (tlb_type == hypervisor) {
+ switch (sun4v_chip_type) {
+ /* Athena's devhandle|devino is large.*/
+ case SUN4V_CHIP_SPARC64X:
+ nr_ivec = 0xffff;
break;
+ }
}
- if (ent >= NR_IRQS) {
- printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
- ent = 0;
- } else {
- irq_table[ent].dev_handle = dev_handle;
- irq_table[ent].dev_ino = dev_ino;
- irq_table[ent].in_use = 1;
- }
+ return nr_ivec;
+}
+
+struct irq_handler_data {
+ union {
+ struct {
+ unsigned int dev_handle;
+ unsigned int dev_ino;
+ };
+ unsigned long sysino;
+ };
+ struct ino_bucket bucket;
+ unsigned long iclr;
+ unsigned long imap;
+};
+
+static inline unsigned int irq_data_to_handle(struct irq_data *data)
+{
+ struct irq_handler_data *ihd = data->handler_data;
+
+ return ihd->dev_handle;
+}
+
+static inline unsigned int irq_data_to_ino(struct irq_data *data)
+{
+ struct irq_handler_data *ihd = data->handler_data;
- spin_unlock_irqrestore(&irq_alloc_lock, flags);
+ return ihd->dev_ino;
+}
+
+static inline unsigned long irq_data_to_sysino(struct irq_data *data)
+{
+ struct irq_handler_data *ihd = data->handler_data;
- return ent;
+ return ihd->sysino;
}
-#ifdef CONFIG_PCI_MSI
void irq_free(unsigned int irq)
{
- unsigned long flags;
+ void *data = irq_get_handler_data(irq);
- if (irq >= NR_IRQS)
- return;
+ kfree(data);
+ irq_set_handler_data(irq, NULL);
+ irq_free_descs(irq, 1);
+}
- spin_lock_irqsave(&irq_alloc_lock, flags);
+unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
+{
+ int irq;
- irq_table[irq].in_use = 0;
+ irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL);
+ if (irq <= 0)
+ goto out;
- spin_unlock_irqrestore(&irq_alloc_lock, flags);
+ return irq;
+out:
+ return 0;
+}
+
+static unsigned int cookie_exists(u32 devhandle, unsigned int devino)
+{
+ unsigned long hv_err, cookie;
+ struct ino_bucket *bucket;
+ unsigned int irq = 0U;
+
+ hv_err = sun4v_vintr_get_cookie(devhandle, devino, &cookie);
+ if (hv_err) {
+ pr_err("HV get cookie failed hv_err = %ld\n", hv_err);
+ goto out;
+ }
+
+ if (cookie & ((1UL << 63UL))) {
+ cookie = ~cookie;
+ bucket = (struct ino_bucket *) __va(cookie);
+ irq = bucket->__irq;
+ }
+out:
+ return irq;
+}
+
+static unsigned int sysino_exists(u32 devhandle, unsigned int devino)
+{
+ unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
+ struct ino_bucket *bucket;
+ unsigned int irq;
+
+ bucket = &ivector_table[sysino];
+ irq = bucket_get_irq(__pa(bucket));
+
+ return irq;
+}
+
+void ack_bad_irq(unsigned int irq)
+{
+ pr_crit("BAD IRQ ack %d\n", irq);
+}
+
+void irq_install_pre_handler(int irq,
+ void (*func)(unsigned int, void *, void *),
+ void *arg1, void *arg2)
+{
+ pr_warn("IRQ pre handler NOT supported.\n");
}
-#endif
/*
* /proc/interrupts printing:
@@ -206,15 +345,6 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
return tid;
}
-struct irq_handler_data {
- unsigned long iclr;
- unsigned long imap;
-
- void (*pre_handler)(unsigned int, void *, void *);
- void *arg1;
- void *arg2;
-};
-
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
{
@@ -316,8 +446,8 @@ static void sun4u_irq_eoi(struct irq_data *data)
static void sun4v_irq_enable(struct irq_data *data)
{
- unsigned int ino = irq_table[data->irq].dev_ino;
unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
+ unsigned int ino = irq_data_to_sysino(data);
int err;
err = sun4v_intr_settarget(ino, cpuid);
@@ -337,8 +467,8 @@ static void sun4v_irq_enable(struct irq_data *data)
static int sun4v_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
- unsigned int ino = irq_table[data->irq].dev_ino;
unsigned long cpuid = irq_choose_cpu(data->irq, mask);
+ unsigned int ino = irq_data_to_sysino(data);
int err;
err = sun4v_intr_settarget(ino, cpuid);
@@ -351,7 +481,7 @@ static int sun4v_set_affinity(struct irq_data *data,
static void sun4v_irq_disable(struct irq_data *data)
{
- unsigned int ino = irq_table[data->irq].dev_ino;
+ unsigned int ino = irq_data_to_sysino(data);
int err;
err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
@@ -362,7 +492,7 @@ static void sun4v_irq_disable(struct irq_data *data)
static void sun4v_irq_eoi(struct irq_data *data)
{
- unsigned int ino = irq_table[data->irq].dev_ino;
+ unsigned int ino = irq_data_to_sysino(data);
int err;
err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
@@ -373,14 +503,13 @@ static void sun4v_irq_eoi(struct irq_data *data)
static void sun4v_virq_enable(struct irq_data *data)
{
- unsigned long cpuid, dev_handle, dev_ino;
+ unsigned long dev_handle = irq_data_to_handle(data);
+ unsigned long dev_ino = irq_data_to_ino(data);
+ unsigned long cpuid;
int err;
cpuid = irq_choose_cpu(data->irq, data->affinity);
- dev_handle = irq_table[data->irq].dev_handle;
- dev_ino = irq_table[data->irq].dev_ino;
-
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
@@ -403,14 +532,13 @@ static void sun4v_virq_enable(struct irq_data *data)
static int sun4v_virt_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
- unsigned long cpuid, dev_handle, dev_ino;
+ unsigned long dev_handle = irq_data_to_handle(data);
+ unsigned long dev_ino = irq_data_to_ino(data);
+ unsigned long cpuid;
int err;
cpuid = irq_choose_cpu(data->irq, mask);
- dev_handle = irq_table[data->irq].dev_handle;
- dev_ino = irq_table[data->irq].dev_ino;
-
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
@@ -422,11 +550,10 @@ static int sun4v_virt_set_affinity(struct irq_data *data,
static void sun4v_virq_disable(struct irq_data *data)
{
- unsigned long dev_handle, dev_ino;
+ unsigned long dev_handle = irq_data_to_handle(data);
+ unsigned long dev_ino = irq_data_to_ino(data);
int err;
- dev_handle = irq_table[data->irq].dev_handle;
- dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_DISABLED);
@@ -438,12 +565,10 @@ static void sun4v_virq_disable(struct irq_data *data)
static void sun4v_virq_eoi(struct irq_data *data)
{
- unsigned long dev_handle, dev_ino;
+ unsigned long dev_handle = irq_data_to_handle(data);
+ unsigned long dev_ino = irq_data_to_ino(data);
int err;
- dev_handle = irq_table[data->irq].dev_handle;
- dev_ino = irq_table[data->irq].dev_ino;
-
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
if (err != HV_EOK)
@@ -479,31 +604,10 @@ static struct irq_chip sun4v_virq = {
.flags = IRQCHIP_EOI_IF_HANDLED,
};
-static void pre_flow_handler(struct irq_data *d)
-{
- struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
- unsigned int ino = irq_table[d->irq].dev_ino;
-
- handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
-}
-
-void irq_install_pre_handler(int irq,
- void (*func)(unsigned int, void *, void *),
- void *arg1, void *arg2)
-{
- struct irq_handler_data *handler_data = irq_get_handler_data(irq);
-
- handler_data->pre_handler = func;
- handler_data->arg1 = arg1;
- handler_data->arg2 = arg2;
-
- __irq_set_preflow_handler(irq, pre_flow_handler);
-}
-
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
- struct ino_bucket *bucket;
struct irq_handler_data *handler_data;
+ struct ino_bucket *bucket;
unsigned int irq;
int ino;
@@ -537,119 +641,166 @@ out:
return irq;
}
-static unsigned int sun4v_build_common(unsigned long sysino,
- struct irq_chip *chip)
+static unsigned int sun4v_build_common(u32 devhandle, unsigned int devino,
+ void (*handler_data_init)(struct irq_handler_data *data,
+ u32 devhandle, unsigned int devino),
+ struct irq_chip *chip)
{
- struct ino_bucket *bucket;
- struct irq_handler_data *handler_data;
+ struct irq_handler_data *data;
unsigned int irq;
- BUG_ON(tlb_type != hypervisor);
+ irq = irq_alloc(devhandle, devino);
+ if (!irq)
+ goto out;
- bucket = &ivector_table[sysino];
- irq = bucket_get_irq(__pa(bucket));
- if (!irq) {
- irq = irq_alloc(0, sysino);
- bucket_set_irq(__pa(bucket), irq);
- irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq,
- "IVEC");
+ data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+ if (unlikely(!data)) {
+ pr_err("IRQ handler data allocation failed.\n");
+ irq_free(irq);
+ irq = 0;
+ goto out;
}
- handler_data = irq_get_handler_data(irq);
- if (unlikely(handler_data))
- goto out;
+ irq_set_handler_data(irq, data);
+ handler_data_init(data, devhandle, devino);
+ irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC");
+ data->imap = ~0UL;
+ data->iclr = ~0UL;
+out:
+ return irq;
+}
- handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
- if (unlikely(!handler_data)) {
- prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
- prom_halt();
- }
- irq_set_handler_data(irq, handler_data);
+static unsigned long cookie_assign(unsigned int irq, u32 devhandle,
+ unsigned int devino)
+{
+ struct irq_handler_data *ihd = irq_get_handler_data(irq);
+ unsigned long hv_error, cookie;
- /* Catch accidental accesses to these things. IMAP/ICLR handling
- * is done by hypervisor calls on sun4v platforms, not by direct
- * register accesses.
+ /* handler_irq needs to find the irq. cookie is seen signed in
+ * sun4v_dev_mondo and treated as a non ivector_table delivery.
*/
- handler_data->imap = ~0UL;
- handler_data->iclr = ~0UL;
+ ihd->bucket.__irq = irq;
+ cookie = ~__pa(&ihd->bucket);
-out:
- return irq;
+ hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie);
+ if (hv_error)
+ pr_err("HV vintr set cookie failed = %ld\n", hv_error);
+
+ return hv_error;
}
-unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
+static void cookie_handler_data(struct irq_handler_data *data,
+ u32 devhandle, unsigned int devino)
{
- unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
+ data->dev_handle = devhandle;
+ data->dev_ino = devino;
+}
- return sun4v_build_common(sysino, &sun4v_irq);
+static unsigned int cookie_build_irq(u32 devhandle, unsigned int devino,
+ struct irq_chip *chip)
+{
+ unsigned long hv_error;
+ unsigned int irq;
+
+ irq = sun4v_build_common(devhandle, devino, cookie_handler_data, chip);
+
+ hv_error = cookie_assign(irq, devhandle, devino);
+ if (hv_error) {
+ irq_free(irq);
+ irq = 0;
+ }
+
+ return irq;
}
-unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
+static unsigned int sun4v_build_cookie(u32 devhandle, unsigned int devino)
{
- struct irq_handler_data *handler_data;
- unsigned long hv_err, cookie;
- struct ino_bucket *bucket;
unsigned int irq;
- bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
- if (unlikely(!bucket))
- return 0;
+ irq = cookie_exists(devhandle, devino);
+ if (irq)
+ goto out;
- /* The only reference we store to the IRQ bucket is
- * by physical address which kmemleak can't see, tell
- * it that this object explicitly is not a leak and
- * should be scanned.
- */
- kmemleak_not_leak(bucket);
+ irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
- __flush_dcache_range((unsigned long) bucket,
- ((unsigned long) bucket +
- sizeof(struct ino_bucket)));
+out:
+ return irq;
+}
- irq = irq_alloc(devhandle, devino);
+static void sysino_set_bucket(unsigned int irq)
+{
+ struct irq_handler_data *ihd = irq_get_handler_data(irq);
+ struct ino_bucket *bucket;
+ unsigned long sysino;
+
+ sysino = sun4v_devino_to_sysino(ihd->dev_handle, ihd->dev_ino);
+ BUG_ON(sysino >= nr_ivec);
+ bucket = &ivector_table[sysino];
bucket_set_irq(__pa(bucket), irq);
+}
- irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq,
- "IVEC");
+static void sysino_handler_data(struct irq_handler_data *data,
+ u32 devhandle, unsigned int devino)
+{
+ unsigned long sysino;
- handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
- if (unlikely(!handler_data))
- return 0;
+ sysino = sun4v_devino_to_sysino(devhandle, devino);
+ data->sysino = sysino;
+}
- /* In order to make the LDC channel startup sequence easier,
- * especially wrt. locking, we do not let request_irq() enable
- * the interrupt.
- */
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
- irq_set_handler_data(irq, handler_data);
+static unsigned int sysino_build_irq(u32 devhandle, unsigned int devino,
+ struct irq_chip *chip)
+{
+ unsigned int irq;
- /* Catch accidental accesses to these things. IMAP/ICLR handling
- * is done by hypervisor calls on sun4v platforms, not by direct
- * register accesses.
- */
- handler_data->imap = ~0UL;
- handler_data->iclr = ~0UL;
+ irq = sun4v_build_common(devhandle, devino, sysino_handler_data, chip);
+ if (!irq)
+ goto out;
- cookie = ~__pa(bucket);
- hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
- if (hv_err) {
- prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
- "err=%lu\n", devhandle, devino, hv_err);
- prom_halt();
- }
+ sysino_set_bucket(irq);
+out:
+ return irq;
+}
+static int sun4v_build_sysino(u32 devhandle, unsigned int devino)
+{
+ int irq;
+
+ irq = sysino_exists(devhandle, devino);
+ if (irq)
+ goto out;
+
+ irq = sysino_build_irq(devhandle, devino, &sun4v_irq);
+out:
return irq;
}
-void ack_bad_irq(unsigned int irq)
+unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
{
- unsigned int ino = irq_table[irq].dev_ino;
+ unsigned int irq;
- if (!ino)
- ino = 0xdeadbeef;
+ if (sun4v_cookie_only_virqs())
+ irq = sun4v_build_cookie(devhandle, devino);
+ else
+ irq = sun4v_build_sysino(devhandle, devino);
- printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
- ino, irq);
+ return irq;
+}
+
+unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
+{
+ int irq;
+
+ irq = cookie_build_irq(devhandle, devino, &sun4v_virq);
+ if (!irq)
+ goto out;
+
+ /* This is borrowed from the original function.
+ */
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+out:
+ return irq;
}
void *hardirq_stack[NR_CPUS];
@@ -720,9 +871,12 @@ void fixup_irqs(void)
for (irq = 0; irq < NR_IRQS; irq++) {
struct irq_desc *desc = irq_to_desc(irq);
- struct irq_data *data = irq_desc_get_irq_data(desc);
+ struct irq_data *data;
unsigned long flags;
+ if (!desc)
+ continue;
+ data = irq_desc_get_irq_data(desc);
raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action && !irqd_is_per_cpu(data)) {
if (data->chip->irq_set_affinity)
@@ -922,16 +1076,22 @@ static struct irqaction timer_irq_action = {
.name = "timer",
};
-/* Only invoked on boot processor. */
-void __init init_IRQ(void)
+static void __init irq_ivector_init(void)
{
- unsigned long size;
+ unsigned long size, order;
+ unsigned int ivecs;
- map_prom_timers();
- kill_prom_timer();
+ /* If we are doing cookie only VIRQs then we do not need the ivector
+ * table to process interrupts.
+ */
+ if (sun4v_cookie_only_virqs())
+ return;
- size = sizeof(struct ino_bucket) * NUM_IVECS;
- ivector_table = kzalloc(size, GFP_KERNEL);
+ ivecs = size_nr_ivec();
+ size = sizeof(struct ino_bucket) * ivecs;
+ order = get_order(size);
+ ivector_table = (struct ino_bucket *)
+ __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!ivector_table) {
prom_printf("Fatal error, cannot allocate ivector_table\n");
prom_halt();
@@ -940,6 +1100,15 @@ void __init init_IRQ(void)
((unsigned long) ivector_table) + size);
ivector_table_pa = __pa(ivector_table);
+}
+
+/* Only invoked on boot processor.*/
+void __init init_IRQ(void)
+{
+ irq_init_hv();
+ irq_ivector_init();
+ map_prom_timers();
+ kill_prom_timer();
if (tlb_type == hypervisor)
sun4v_init_mondo_queues();
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 605d49204580..ef0d8e9e1210 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -47,14 +47,6 @@ kvmap_itlb_vmalloc_addr:
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
TSB_LOCK_TAG(%g1, %g2, %g7)
-
- /* Load and check PTE. */
- ldxa [%g5] ASI_PHYS_USE_EC, %g5
- mov 1, %g7
- sllx %g7, TSB_TAG_INVALID_BIT, %g7
- brgez,a,pn %g5, kvmap_itlb_longpath
- TSB_STORE(%g1, %g7)
-
TSB_WRITE(%g1, %g5, %g6)
/* fallthrough to TLB load */
@@ -118,6 +110,12 @@ kvmap_dtlb_obp:
ba,pt %xcc, kvmap_dtlb_load
nop
+kvmap_linear_early:
+ sethi %hi(kern_linear_pte_xor), %g7
+ ldx [%g7 + %lo(kern_linear_pte_xor)], %g2
+ ba,pt %xcc, kvmap_dtlb_tsb4m_load
+ xor %g2, %g4, %g5
+
.align 32
kvmap_dtlb_tsb4m_load:
TSB_LOCK_TAG(%g1, %g2, %g7)
@@ -146,105 +144,17 @@ kvmap_dtlb_4v:
/* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
#endif
- /* TSB entry address left in %g1, lookup linear PTE.
- * Must preserve %g1 and %g6 (TAG).
- */
-kvmap_dtlb_tsb4m_miss:
- /* Clear the PAGE_OFFSET top virtual bits, shift
- * down to get PFN, and make sure PFN is in range.
- */
-661: sllx %g4, 0, %g5
- .section .page_offset_shift_patch, "ax"
- .word 661b
- .previous
-
- /* Check to see if we know about valid memory at the 4MB
- * chunk this physical address will reside within.
+ /* Linear mapping TSB lookup failed. Fallthrough to kernel
+ * page table based lookup.
*/
-661: srlx %g5, MAX_PHYS_ADDRESS_BITS, %g2
- .section .page_offset_shift_patch, "ax"
- .word 661b
- .previous
-
- brnz,pn %g2, kvmap_dtlb_longpath
- nop
-
- /* This unconditional branch and delay-slot nop gets patched
- * by the sethi sequence once the bitmap is properly setup.
- */
- .globl valid_addr_bitmap_insn
-valid_addr_bitmap_insn:
- ba,pt %xcc, 2f
- nop
- .subsection 2
- .globl valid_addr_bitmap_patch
-valid_addr_bitmap_patch:
- sethi %hi(sparc64_valid_addr_bitmap), %g7
- or %g7, %lo(sparc64_valid_addr_bitmap), %g7
- .previous
-
-661: srlx %g5, ILOG2_4MB, %g2
- .section .page_offset_shift_patch, "ax"
- .word 661b
- .previous
-
- srlx %g2, 6, %g5
- and %g2, 63, %g2
- sllx %g5, 3, %g5
- ldx [%g7 + %g5], %g5
- mov 1, %g7
- sllx %g7, %g2, %g7
- andcc %g5, %g7, %g0
- be,pn %xcc, kvmap_dtlb_longpath
-
-2: sethi %hi(kpte_linear_bitmap), %g2
-
- /* Get the 256MB physical address index. */
-661: sllx %g4, 0, %g5
- .section .page_offset_shift_patch, "ax"
- .word 661b
- .previous
-
- or %g2, %lo(kpte_linear_bitmap), %g2
-
-661: srlx %g5, ILOG2_256MB, %g5
- .section .page_offset_shift_patch, "ax"
- .word 661b
- .previous
-
- and %g5, (32 - 1), %g7
-
- /* Divide by 32 to get the offset into the bitmask. */
- srlx %g5, 5, %g5
- add %g7, %g7, %g7
- sllx %g5, 3, %g5
-
- /* kern_linear_pte_xor[(mask >> shift) & 3)] */
- ldx [%g2 + %g5], %g2
- srlx %g2, %g7, %g7
- sethi %hi(kern_linear_pte_xor), %g5
- and %g7, 3, %g7
- or %g5, %lo(kern_linear_pte_xor), %g5
- sllx %g7, 3, %g7
- ldx [%g5 + %g7], %g2
-
.globl kvmap_linear_patch
kvmap_linear_patch:
- ba,pt %xcc, kvmap_dtlb_tsb4m_load
- xor %g2, %g4, %g5
+ ba,a,pt %xcc, kvmap_linear_early
kvmap_dtlb_vmalloc_addr:
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
TSB_LOCK_TAG(%g1, %g2, %g7)
-
- /* Load and check PTE. */
- ldxa [%g5] ASI_PHYS_USE_EC, %g5
- mov 1, %g7
- sllx %g7, TSB_TAG_INVALID_BIT, %g7
- brgez,a,pn %g5, kvmap_dtlb_longpath
- TSB_STORE(%g1, %g7)
-
TSB_WRITE(%g1, %g5, %g6)
/* fallthrough to TLB load */
@@ -276,13 +186,8 @@ kvmap_dtlb_load:
#ifdef CONFIG_SPARSEMEM_VMEMMAP
kvmap_vmemmap:
- sub %g4, %g5, %g5
- srlx %g5, ILOG2_4MB, %g5
- sethi %hi(vmemmap_table), %g1
- sllx %g5, 3, %g5
- or %g1, %lo(vmemmap_table), %g1
- ba,pt %xcc, kvmap_dtlb_load
- ldx [%g1 + %g5], %g5
+ KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
+ ba,a,pt %xcc, kvmap_dtlb_load
#endif
kvmap_dtlb_nonlinear:
@@ -294,8 +199,8 @@ kvmap_dtlb_nonlinear:
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* Do not use the TSB for vmemmap. */
- mov (VMEMMAP_BASE >> 40), %g5
- sllx %g5, 40, %g5
+ sethi %hi(VMEMMAP_BASE), %g5
+ ldx [%g5 + %lo(VMEMMAP_BASE)], %g5
cmp %g4,%g5
bgeu,pn %xcc, kvmap_vmemmap
nop
@@ -307,8 +212,8 @@ kvmap_dtlb_tsbmiss:
sethi %hi(MODULES_VADDR), %g5
cmp %g4, %g5
blu,pn %xcc, kvmap_dtlb_longpath
- mov (VMALLOC_END >> 40), %g5
- sllx %g5, 40, %g5
+ sethi %hi(VMALLOC_END), %g5
+ ldx [%g5 + %lo(VMALLOC_END)], %g5
cmp %g4, %g5
bgeu,pn %xcc, kvmap_dtlb_longpath
nop
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 0af28b984695..4310332872d4 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1078,7 +1078,8 @@ static void ldc_iommu_release(struct ldc_channel *lp)
struct ldc_channel *ldc_alloc(unsigned long id,
const struct ldc_channel_config *cfgp,
- void *event_arg)
+ void *event_arg,
+ const char *name)
{
struct ldc_channel *lp;
const struct ldc_mode_ops *mops;
@@ -1093,6 +1094,8 @@ struct ldc_channel *ldc_alloc(unsigned long id,
err = -EINVAL;
if (!cfgp)
goto out_err;
+ if (!name)
+ goto out_err;
switch (cfgp->mode) {
case LDC_MODE_RAW:
@@ -1185,6 +1188,21 @@ struct ldc_channel *ldc_alloc(unsigned long id,
INIT_HLIST_HEAD(&lp->mh_list);
+ snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
+ snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
+
+ err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
+ lp->rx_irq_name, lp);
+ if (err)
+ goto out_free_txq;
+
+ err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
+ lp->tx_irq_name, lp);
+ if (err) {
+ free_irq(lp->cfg.rx_irq, lp);
+ goto out_free_txq;
+ }
+
return lp;
out_free_txq:
@@ -1237,31 +1255,14 @@ EXPORT_SYMBOL(ldc_free);
* state. This does not initiate a handshake, ldc_connect() does
* that.
*/
-int ldc_bind(struct ldc_channel *lp, const char *name)
+int ldc_bind(struct ldc_channel *lp)
{
unsigned long hv_err, flags;
int err = -EINVAL;
- if (!name ||
- (lp->state != LDC_STATE_INIT))
+ if (lp->state != LDC_STATE_INIT)
return -EINVAL;
- snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
- snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
-
- err = request_irq(lp->cfg.rx_irq, ldc_rx, 0,
- lp->rx_irq_name, lp);
- if (err)
- return err;
-
- err = request_irq(lp->cfg.tx_irq, ldc_tx, 0,
- lp->tx_irq_name, lp);
- if (err) {
- free_irq(lp->cfg.rx_irq, lp);
- return err;
- }
-
-
spin_lock_irqsave(&lp->lock, flags);
enable_irq(lp->cfg.rx_irq);
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 683c4af999de..9bbb8f2bbfcc 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -37,6 +37,7 @@ unsigned long amba_system_id;
static DEFINE_SPINLOCK(leon_irq_lock);
static unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
+static unsigned long leon3_gptimer_ackmask; /* For clearing pending bit */
unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
unsigned int sparc_leon_eirq;
#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
@@ -260,11 +261,19 @@ void leon_update_virq_handling(unsigned int virq,
static u32 leon_cycles_offset(void)
{
- u32 rld, val, off;
+ u32 rld, val, ctrl, off;
+
rld = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld);
val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val);
- off = rld - val;
- return rld - val;
+ ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
+ if (LEON3_GPTIMER_CTRL_ISPENDING(ctrl)) {
+ val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val);
+ off = 2 * rld - val;
+ } else {
+ off = rld - val;
+ }
+
+ return off;
}
#ifdef CONFIG_SMP
@@ -302,6 +311,7 @@ void __init leon_init_timers(void)
int ampopts;
int err;
u32 config;
+ u32 ctrl;
sparc_config.get_cycles_offset = leon_cycles_offset;
sparc_config.cs_period = 1000000 / HZ;
@@ -374,6 +384,16 @@ void __init leon_init_timers(void)
if (!(leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq))
goto bad;
+ ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
+ ctrl | LEON3_GPTIMER_CTRL_PENDING);
+ ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
+
+ if ((ctrl & LEON3_GPTIMER_CTRL_PENDING) != 0)
+ leon3_gptimer_ackmask = ~LEON3_GPTIMER_CTRL_PENDING;
+ else
+ leon3_gptimer_ackmask = ~0;
+
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val, 0);
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld,
(((1000000 / HZ) - 1)));
@@ -452,6 +472,11 @@ bad:
static void leon_clear_clock_irq(void)
{
+ u32 ctrl;
+
+ ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
+ LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
+ ctrl & leon3_gptimer_ackmask);
}
static void leon_load_profile_irq(int cpu, unsigned int limit)
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 269af58497aa..7e967c8018c8 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -191,12 +191,41 @@ static const struct pcr_ops n4_pcr_ops = {
.pcr_nmi_disable = PCR_N4_PICNPT,
};
+static u64 n5_pcr_read(unsigned long reg_num)
+{
+ unsigned long val;
+
+ (void) sun4v_t5_get_perfreg(reg_num, &val);
+
+ return val;
+}
+
+static void n5_pcr_write(unsigned long reg_num, u64 val)
+{
+ (void) sun4v_t5_set_perfreg(reg_num, val);
+}
+
+static const struct pcr_ops n5_pcr_ops = {
+ .read_pcr = n5_pcr_read,
+ .write_pcr = n5_pcr_write,
+ .read_pic = n4_pic_read,
+ .write_pic = n4_pic_write,
+ .nmi_picl_value = n4_picl_value,
+ .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
+ PCR_N4_UTRACE | PCR_N4_TOE |
+ (26 << PCR_N4_SL_SHIFT)),
+ .pcr_nmi_disable = PCR_N4_PICNPT,
+};
+
+
static unsigned long perf_hsvc_group;
static unsigned long perf_hsvc_major;
static unsigned long perf_hsvc_minor;
static int __init register_perf_hsvc(void)
{
+ unsigned long hverror;
+
if (tlb_type == hypervisor) {
switch (sun4v_chip_type) {
case SUN4V_CHIP_NIAGARA1:
@@ -215,6 +244,10 @@ static int __init register_perf_hsvc(void)
perf_hsvc_group = HV_GRP_VT_CPU;
break;
+ case SUN4V_CHIP_NIAGARA5:
+ perf_hsvc_group = HV_GRP_T5_CPU;
+ break;
+
default:
return -ENODEV;
}
@@ -222,10 +255,12 @@ static int __init register_perf_hsvc(void)
perf_hsvc_major = 1;
perf_hsvc_minor = 0;
- if (sun4v_hvapi_register(perf_hsvc_group,
- perf_hsvc_major,
- &perf_hsvc_minor)) {
- printk("perfmon: Could not register hvapi.\n");
+ hverror = sun4v_hvapi_register(perf_hsvc_group,
+ perf_hsvc_major,
+ &perf_hsvc_minor);
+ if (hverror) {
+ pr_err("perfmon: Could not register hvapi(0x%lx).\n",
+ hverror);
return -ENODEV;
}
}
@@ -254,6 +289,10 @@ static int __init setup_sun4v_pcr_ops(void)
pcr_ops = &n4_pcr_ops;
break;
+ case SUN4V_CHIP_NIAGARA5:
+ pcr_ops = &n5_pcr_ops;
+ break;
+
default:
ret = -ENODEV;
break;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index d35c490a91cb..c9759ad3f34a 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1662,7 +1662,8 @@ static bool __init supported_pmu(void)
sparc_pmu = &niagara2_pmu;
return true;
}
- if (!strcmp(sparc_pmu_type, "niagara4")) {
+ if (!strcmp(sparc_pmu_type, "niagara4") ||
+ !strcmp(sparc_pmu_type, "niagara5")) {
sparc_pmu = &niagara4_pmu;
return true;
}
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 3fdb455e3318..e629b8377587 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -141,21 +141,9 @@ static void __init boot_flags_init(char *commands)
process_switch(*commands++);
continue;
}
- if (!strncmp(commands, "mem=", 4)) {
- /*
- * "mem=XXX[kKmM]" overrides the PROM-reported
- * memory size.
- */
- cmdline_memory_size = simple_strtoul(commands + 4,
- &commands, 0);
- if (*commands == 'K' || *commands == 'k') {
- cmdline_memory_size <<= 10;
- commands++;
- } else if (*commands=='M' || *commands=='m') {
- cmdline_memory_size <<= 20;
- commands++;
- }
- }
+ if (!strncmp(commands, "mem=", 4))
+ cmdline_memory_size = memparse(commands + 4, &commands);
+
while (*commands && *commands != ' ')
commands++;
}
@@ -500,12 +488,16 @@ static void __init init_sparc64_elf_hwcap(void)
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= HWCAP_SPARC_BLKINIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= HWCAP_SPARC_N2;
}
@@ -533,6 +525,8 @@ static void __init init_sparc64_elf_hwcap(void)
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
AV_SPARC_ASI_BLK_INIT |
@@ -540,6 +534,8 @@ static void __init init_sparc64_elf_hwcap(void)
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
AV_SPARC_FMAF);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index f7ba87543e5f..302c476413d5 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1138,7 +1138,7 @@ static unsigned long penguins_are_doing_time;
void smp_capture(void)
{
- int result = atomic_add_ret(1, &smp_capture_depth);
+ int result = atomic_add_return(1, &smp_capture_depth);
if (result == 1) {
int ncpus = num_online_cpus();
@@ -1467,6 +1467,13 @@ static void __init pcpu_populate_pte(unsigned long addr)
pud_t *pud;
pmd_t *pmd;
+ if (pgd_none(*pgd)) {
+ pud_t *new;
+
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ pgd_populate(&init_mm, pgd, new);
+ }
+
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
pmd_t *new;
diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S
index e0c09bf85610..6179e19bc9b9 100644
--- a/arch/sparc/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc/kernel/sun4v_tlb_miss.S
@@ -195,6 +195,11 @@ sun4v_tsb_miss_common:
ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7
sun4v_itlb_error:
+ rdpr %tl, %g1
+ cmp %g1, 1
+ ble,pt %icc, sun4v_bad_ra
+ or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_ITLB, %g1
+
sethi %hi(sun4v_err_itlb_vaddr), %g1
stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)]
sethi %hi(sun4v_err_itlb_ctx), %g1
@@ -206,15 +211,10 @@ sun4v_itlb_error:
sethi %hi(sun4v_err_itlb_error), %g1
stx %o0, [%g1 + %lo(sun4v_err_itlb_error)]
+ sethi %hi(1f), %g7
rdpr %tl, %g4
- cmp %g4, 1
- ble,pt %icc, 1f
- sethi %hi(2f), %g7
ba,pt %xcc, etraptl1
- or %g7, %lo(2f), %g7
-
-1: ba,pt %xcc, etrap
-2: or %g7, %lo(2b), %g7
+1: or %g7, %lo(1f), %g7
mov %l4, %o1
call sun4v_itlb_error_report
add %sp, PTREGS_OFF, %o0
@@ -222,6 +222,11 @@ sun4v_itlb_error:
/* NOTREACHED */
sun4v_dtlb_error:
+ rdpr %tl, %g1
+ cmp %g1, 1
+ ble,pt %icc, sun4v_bad_ra
+ or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_DTLB, %g1
+
sethi %hi(sun4v_err_dtlb_vaddr), %g1
stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)]
sethi %hi(sun4v_err_dtlb_ctx), %g1
@@ -233,21 +238,23 @@ sun4v_dtlb_error:
sethi %hi(sun4v_err_dtlb_error), %g1
stx %o0, [%g1 + %lo(sun4v_err_dtlb_error)]
+ sethi %hi(1f), %g7
rdpr %tl, %g4
- cmp %g4, 1
- ble,pt %icc, 1f
- sethi %hi(2f), %g7
ba,pt %xcc, etraptl1
- or %g7, %lo(2f), %g7
-
-1: ba,pt %xcc, etrap
-2: or %g7, %lo(2b), %g7
+1: or %g7, %lo(1f), %g7
mov %l4, %o1
call sun4v_dtlb_error_report
add %sp, PTREGS_OFF, %o0
/* NOTREACHED */
+sun4v_bad_ra:
+ or %g0, %g4, %g5
+ ba,pt %xcc, sparc64_realfault_common
+ or %g1, %g0, %g4
+
+ /* NOTREACHED */
+
/* Instruction Access Exception, tl0. */
sun4v_iacc:
ldxa [%g0] ASI_SCRATCHPAD, %g2
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index fb6640ec8557..981a769b9558 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2104,6 +2104,11 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
atomic_inc(&sun4v_nonresum_oflow_cnt);
}
+static void sun4v_tlb_error(struct pt_regs *regs)
+{
+ die_if_kernel("TLB/TSB error", regs);
+}
+
unsigned long sun4v_err_itlb_vaddr;
unsigned long sun4v_err_itlb_ctx;
unsigned long sun4v_err_itlb_pte;
@@ -2111,8 +2116,7 @@ unsigned long sun4v_err_itlb_error;
void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
{
- if (tl > 1)
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
regs->tpc, tl);
@@ -2125,7 +2129,7 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
sun4v_err_itlb_pte, sun4v_err_itlb_error);
- prom_halt();
+ sun4v_tlb_error(regs);
}
unsigned long sun4v_err_dtlb_vaddr;
@@ -2135,8 +2139,7 @@ unsigned long sun4v_err_dtlb_error;
void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
{
- if (tl > 1)
- dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
regs->tpc, tl);
@@ -2149,7 +2152,7 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
- prom_halt();
+ sun4v_tlb_error(regs);
}
void hypervisor_tlbop_error(unsigned long err, unsigned long op)
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index 8647fcc5ca6c..cb5789c9f961 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -180,8 +180,10 @@ static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp,
vdev->tx_irq = sun4v_build_virq(cdev_cfg_handle, *irq);
irq = mdesc_get_property(hp, target, "rx-ino", NULL);
- if (irq)
+ if (irq) {
vdev->rx_irq = sun4v_build_virq(cdev_cfg_handle, *irq);
+ vdev->rx_ino = *irq;
+ }
chan_id = mdesc_get_property(hp, target, "id", NULL);
if (chan_id)
@@ -189,6 +191,15 @@ static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp,
}
}
+int vio_set_intr(unsigned long dev_ino, int state)
+{
+ int err;
+
+ err = sun4v_vintr_set_valid(cdev_cfg_handle, dev_ino, state);
+ return err;
+}
+EXPORT_SYMBOL(vio_set_intr);
+
static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
struct device *parent)
{
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
index 7ef081a185b1..526fcb5d8ce9 100644
--- a/arch/sparc/kernel/viohs.c
+++ b/arch/sparc/kernel/viohs.c
@@ -724,7 +724,7 @@ int vio_ldc_alloc(struct vio_driver_state *vio,
cfg.tx_irq = vio->vdev->tx_irq;
cfg.rx_irq = vio->vdev->rx_irq;
- lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg);
+ lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name);
if (IS_ERR(lp))
return PTR_ERR(lp);
@@ -756,7 +756,7 @@ void vio_port_up(struct vio_driver_state *vio)
err = 0;
if (state == LDC_STATE_INIT) {
- err = ldc_bind(vio->lp, vio->name);
+ err = ldc_bind(vio->lp);
if (err)
printk(KERN_WARNING "%s: Port %lu bind failed, "
"err=%d\n",
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 932ff90fd760..09243057cb0b 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -35,8 +35,9 @@ jiffies = jiffies_64;
SECTIONS
{
- /* swapper_low_pmd_dir is sparc64 only */
- swapper_low_pmd_dir = 0x0000000000402000;
+#ifdef CONFIG_SPARC64
+ swapper_pg_dir = 0x0000000000402000;
+#endif
. = INITIAL_ADDRESS;
.text TEXTSTART :
{
@@ -122,11 +123,6 @@ SECTIONS
*(.swapper_4m_tsb_phys_patch)
__swapper_4m_tsb_phys_patch_end = .;
}
- .page_offset_shift_patch : {
- __page_offset_shift_patch = .;
- *(.page_offset_shift_patch)
- __page_offset_shift_patch_end = .;
- }
.popc_3insn_patch : {
__popc_3insn_patch = .;
*(.popc_3insn_patch)
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 1d32b54089aa..a7c418ac26af 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -27,18 +27,23 @@ static DEFINE_SPINLOCK(dummy);
#endif /* SMP */
-int __atomic_add_return(int i, atomic_t *v)
-{
- int ret;
- unsigned long flags;
- spin_lock_irqsave(ATOMIC_HASH(v), flags);
-
- ret = (v->counter += i);
-
- spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
- return ret;
-}
-EXPORT_SYMBOL(__atomic_add_return);
+#define ATOMIC_OP(op, cop) \
+int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int ret; \
+ unsigned long flags; \
+ spin_lock_irqsave(ATOMIC_HASH(v), flags); \
+ \
+ ret = (v->counter cop i); \
+ \
+ spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
+ return ret; \
+} \
+EXPORT_SYMBOL(atomic_##op##_return);
+
+ATOMIC_OP(add, +=)
+
+#undef ATOMIC_OP
int atomic_cmpxchg(atomic_t *v, int old, int new)
{
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 85c233d0a340..05dac43907d1 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -14,109 +14,80 @@
* memory barriers, and a second which returns
* a value and does the barriers.
*/
-ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
-1: lduw [%o1], %g1
- add %g1, %o0, %g7
- cas [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %icc, BACKOFF_LABEL(2f, 1b)
- nop
- retl
- nop
-2: BACKOFF_SPIN(%o2, %o3, 1b)
-ENDPROC(atomic_add)
-ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
-1: lduw [%o1], %g1
- sub %g1, %o0, %g7
- cas [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %icc, BACKOFF_LABEL(2f, 1b)
- nop
- retl
- nop
-2: BACKOFF_SPIN(%o2, %o3, 1b)
-ENDPROC(atomic_sub)
+#define ATOMIC_OP(op) \
+ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ BACKOFF_SETUP(%o2); \
+1: lduw [%o1], %g1; \
+ op %g1, %o0, %g7; \
+ cas [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
+ nop; \
+ retl; \
+ nop; \
+2: BACKOFF_SPIN(%o2, %o3, 1b); \
+ENDPROC(atomic_##op); \
-ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
-1: lduw [%o1], %g1
- add %g1, %o0, %g7
- cas [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %icc, BACKOFF_LABEL(2f, 1b)
- add %g1, %o0, %g1
- retl
- sra %g1, 0, %o0
-2: BACKOFF_SPIN(%o2, %o3, 1b)
-ENDPROC(atomic_add_ret)
+#define ATOMIC_OP_RETURN(op) \
+ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ BACKOFF_SETUP(%o2); \
+1: lduw [%o1], %g1; \
+ op %g1, %o0, %g7; \
+ cas [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
+ op %g1, %o0, %g1; \
+ retl; \
+ sra %g1, 0, %o0; \
+2: BACKOFF_SPIN(%o2, %o3, 1b); \
+ENDPROC(atomic_##op##_return);
-ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
-1: lduw [%o1], %g1
- sub %g1, %o0, %g7
- cas [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %icc, BACKOFF_LABEL(2f, 1b)
- sub %g1, %o0, %g1
- retl
- sra %g1, 0, %o0
-2: BACKOFF_SPIN(%o2, %o3, 1b)
-ENDPROC(atomic_sub_ret)
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
-ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
-1: ldx [%o1], %g1
- add %g1, %o0, %g7
- casx [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
- nop
- retl
- nop
-2: BACKOFF_SPIN(%o2, %o3, 1b)
-ENDPROC(atomic64_add)
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
-ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
-1: ldx [%o1], %g1
- sub %g1, %o0, %g7
- casx [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
- nop
- retl
- nop
-2: BACKOFF_SPIN(%o2, %o3, 1b)
-ENDPROC(atomic64_sub)
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
-ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
-1: ldx [%o1], %g1
- add %g1, %o0, %g7
- casx [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
- nop
- retl
- add %g1, %o0, %o0
-2: BACKOFF_SPIN(%o2, %o3, 1b)
-ENDPROC(atomic64_add_ret)
+#define ATOMIC64_OP(op) \
+ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ BACKOFF_SETUP(%o2); \
+1: ldx [%o1], %g1; \
+ op %g1, %o0, %g7; \
+ casx [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
+ nop; \
+ retl; \
+ nop; \
+2: BACKOFF_SPIN(%o2, %o3, 1b); \
+ENDPROC(atomic64_##op); \
-ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
- BACKOFF_SETUP(%o2)
-1: ldx [%o1], %g1
- sub %g1, %o0, %g7
- casx [%o1], %g1, %g7
- cmp %g1, %g7
- bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
- nop
- retl
- sub %g1, %o0, %o0
-2: BACKOFF_SPIN(%o2, %o3, 1b)
-ENDPROC(atomic64_sub_ret)
+#define ATOMIC64_OP_RETURN(op) \
+ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ BACKOFF_SETUP(%o2); \
+1: ldx [%o1], %g1; \
+ op %g1, %o0, %g7; \
+ casx [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
+ nop; \
+ retl; \
+ op %g1, %o0, %o0; \
+2: BACKOFF_SPIN(%o2, %o3, 1b); \
+ENDPROC(atomic64_##op##_return);
+
+#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
+
+ATOMIC64_OPS(add)
+ATOMIC64_OPS(sub)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
BACKOFF_SETUP(%o2)
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 323335b9cd2b..1d649a95660c 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -99,14 +99,23 @@ EXPORT_SYMBOL(___copy_in_user);
EXPORT_SYMBOL(__clear_user);
/* Atomic counter implementation. */
-EXPORT_SYMBOL(atomic_add);
-EXPORT_SYMBOL(atomic_add_ret);
-EXPORT_SYMBOL(atomic_sub);
-EXPORT_SYMBOL(atomic_sub_ret);
-EXPORT_SYMBOL(atomic64_add);
-EXPORT_SYMBOL(atomic64_add_ret);
-EXPORT_SYMBOL(atomic64_sub);
-EXPORT_SYMBOL(atomic64_sub_ret);
+#define ATOMIC_OP(op) \
+EXPORT_SYMBOL(atomic_##op); \
+EXPORT_SYMBOL(atomic64_##op);
+
+#define ATOMIC_OP_RETURN(op) \
+EXPORT_SYMBOL(atomic_##op##_return); \
+EXPORT_SYMBOL(atomic64_##op##_return);
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
EXPORT_SYMBOL(atomic64_dec_if_positive);
/* Atomic bit operations. */
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
index 99c017be8719..f75e6906df14 100644
--- a/arch/sparc/lib/memset.S
+++ b/arch/sparc/lib/memset.S
@@ -3,8 +3,9 @@
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*
- * Returns 0, if ok, and number of bytes not yet set if exception
- * occurs and we were called as clear_user.
+ * Calls to memset returns initial %o0. Calls to bzero returns 0, if ok, and
+ * number of bytes not yet set if exception occurs and we were called as
+ * clear_user.
*/
#include <asm/ptrace.h>
@@ -65,6 +66,8 @@ __bzero_begin:
.globl __memset_start, __memset_end
__memset_start:
memset:
+ mov %o0, %g1
+ mov 1, %g4
and %o1, 0xff, %g3
sll %g3, 8, %g2
or %g3, %g2, %g3
@@ -89,6 +92,7 @@ memset:
sub %o0, %o2, %o0
__bzero:
+ clr %g4
mov %g0, %g3
1:
cmp %o1, 7
@@ -151,8 +155,8 @@ __bzero:
bne,a 8f
EX(stb %g3, [%o0], and %o1, 1)
8:
- retl
- clr %o0
+ b 0f
+ nop
7:
be 13b
orcc %o1, 0, %g0
@@ -164,6 +168,12 @@ __bzero:
bne 8b
EX(stb %g3, [%o0 - 1], add %o1, 1)
0:
+ andcc %g4, 1, %g0
+ be 5f
+ nop
+ retl
+ mov %g1, %o0
+5:
retl
clr %o0
__memset_end:
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 587cd0565128..18fcd7167095 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -346,6 +346,9 @@ retry:
down_read(&mm->mmap_sem);
}
+ if (fault_code & FAULT_CODE_BAD_RA)
+ goto do_sigbus;
+
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 98ac8e80adae..2d91c62f7f5f 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -75,7 +75,6 @@ unsigned long kern_linear_pte_xor[4] __read_mostly;
* 'cpu' properties, but we need to have this table setup before the
* MDESC is initialized.
*/
-unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
#ifndef CONFIG_DEBUG_PAGEALLOC
/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
@@ -84,10 +83,11 @@ unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
*/
extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
#endif
+extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
static unsigned long cpu_pgsz_mask;
-#define MAX_BANKS 32
+#define MAX_BANKS 1024
static struct linux_prom64_registers pavail[MAX_BANKS];
static int pavail_ents;
@@ -165,10 +165,6 @@ static void __init read_obp_memory(const char *property,
cmp_p64, NULL);
}
-unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
- sizeof(unsigned long)];
-EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
-
/* Kernel physical address base and size in bytes. */
unsigned long kern_base __read_mostly;
unsigned long kern_size __read_mostly;
@@ -840,7 +836,10 @@ static int find_node(unsigned long addr)
if ((addr & p->mask) == p->val)
return i;
}
- return -1;
+ /* The following condition has been observed on LDOM guests.*/
+ WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
+ " rule. Some physical memory will be owned by node 0.");
+ return 0;
}
static u64 memblock_nid_range(u64 start, u64 end, int *nid)
@@ -1366,9 +1365,144 @@ static unsigned long __init bootmem_init(unsigned long phys_base)
static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
static int pall_ents __initdata;
-#ifdef CONFIG_DEBUG_PAGEALLOC
+static unsigned long max_phys_bits = 40;
+
+bool kern_addr_valid(unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if ((long)addr < 0L) {
+ unsigned long pa = __pa(addr);
+
+ if ((addr >> max_phys_bits) != 0UL)
+ return false;
+
+ return pfn_valid(pa >> PAGE_SHIFT);
+ }
+
+ if (addr >= (unsigned long) KERNBASE &&
+ addr < (unsigned long)&_end)
+ return true;
+
+ pgd = pgd_offset_k(addr);
+ if (pgd_none(*pgd))
+ return 0;
+
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud))
+ return 0;
+
+ if (pud_large(*pud))
+ return pfn_valid(pud_pfn(*pud));
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ return 0;
+
+ if (pmd_large(*pmd))
+ return pfn_valid(pmd_pfn(*pmd));
+
+ pte = pte_offset_kernel(pmd, addr);
+ if (pte_none(*pte))
+ return 0;
+
+ return pfn_valid(pte_pfn(*pte));
+}
+EXPORT_SYMBOL(kern_addr_valid);
+
+static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
+ unsigned long vend,
+ pud_t *pud)
+{
+ const unsigned long mask16gb = (1UL << 34) - 1UL;
+ u64 pte_val = vstart;
+
+ /* Each PUD is 8GB */
+ if ((vstart & mask16gb) ||
+ (vend - vstart <= mask16gb)) {
+ pte_val ^= kern_linear_pte_xor[2];
+ pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
+
+ return vstart + PUD_SIZE;
+ }
+
+ pte_val ^= kern_linear_pte_xor[3];
+ pte_val |= _PAGE_PUD_HUGE;
+
+ vend = vstart + mask16gb + 1UL;
+ while (vstart < vend) {
+ pud_val(*pud) = pte_val;
+
+ pte_val += PUD_SIZE;
+ vstart += PUD_SIZE;
+ pud++;
+ }
+ return vstart;
+}
+
+static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
+ bool guard)
+{
+ if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
+ return true;
+
+ return false;
+}
+
+static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
+ unsigned long vend,
+ pmd_t *pmd)
+{
+ const unsigned long mask256mb = (1UL << 28) - 1UL;
+ const unsigned long mask2gb = (1UL << 31) - 1UL;
+ u64 pte_val = vstart;
+
+ /* Each PMD is 8MB */
+ if ((vstart & mask256mb) ||
+ (vend - vstart <= mask256mb)) {
+ pte_val ^= kern_linear_pte_xor[0];
+ pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
+
+ return vstart + PMD_SIZE;
+ }
+
+ if ((vstart & mask2gb) ||
+ (vend - vstart <= mask2gb)) {
+ pte_val ^= kern_linear_pte_xor[1];
+ pte_val |= _PAGE_PMD_HUGE;
+ vend = vstart + mask256mb + 1UL;
+ } else {
+ pte_val ^= kern_linear_pte_xor[2];
+ pte_val |= _PAGE_PMD_HUGE;
+ vend = vstart + mask2gb + 1UL;
+ }
+
+ while (vstart < vend) {
+ pmd_val(*pmd) = pte_val;
+
+ pte_val += PMD_SIZE;
+ vstart += PMD_SIZE;
+ pmd++;
+ }
+
+ return vstart;
+}
+
+static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
+ bool guard)
+{
+ if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
+ return true;
+
+ return false;
+}
+
static unsigned long __ref kernel_map_range(unsigned long pstart,
- unsigned long pend, pgprot_t prot)
+ unsigned long pend, pgprot_t prot,
+ bool use_huge)
{
unsigned long vstart = PAGE_OFFSET + pstart;
unsigned long vend = PAGE_OFFSET + pend;
@@ -1387,19 +1521,34 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
pmd_t *pmd;
pte_t *pte;
+ if (pgd_none(*pgd)) {
+ pud_t *new;
+
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ alloc_bytes += PAGE_SIZE;
+ pgd_populate(&init_mm, pgd, new);
+ }
pud = pud_offset(pgd, vstart);
if (pud_none(*pud)) {
pmd_t *new;
+ if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
+ vstart = kernel_map_hugepud(vstart, vend, pud);
+ continue;
+ }
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
alloc_bytes += PAGE_SIZE;
pud_populate(&init_mm, pud, new);
}
pmd = pmd_offset(pud, vstart);
- if (!pmd_present(*pmd)) {
+ if (pmd_none(*pmd)) {
pte_t *new;
+ if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
+ vstart = kernel_map_hugepmd(vstart, vend, pmd);
+ continue;
+ }
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
alloc_bytes += PAGE_SIZE;
pmd_populate_kernel(&init_mm, pmd, new);
@@ -1422,100 +1571,34 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
return alloc_bytes;
}
-extern unsigned int kvmap_linear_patch[1];
-#endif /* CONFIG_DEBUG_PAGEALLOC */
-
-static void __init kpte_set_val(unsigned long index, unsigned long val)
-{
- unsigned long *ptr = kpte_linear_bitmap;
-
- val <<= ((index % (BITS_PER_LONG / 2)) * 2);
- ptr += (index / (BITS_PER_LONG / 2));
-
- *ptr |= val;
-}
-
-static const unsigned long kpte_shift_min = 28; /* 256MB */
-static const unsigned long kpte_shift_max = 34; /* 16GB */
-static const unsigned long kpte_shift_incr = 3;
-
-static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end,
- unsigned long shift)
+static void __init flush_all_kernel_tsbs(void)
{
- unsigned long size = (1UL << shift);
- unsigned long mask = (size - 1UL);
- unsigned long remains = end - start;
- unsigned long val;
-
- if (remains < size || (start & mask))
- return start;
-
- /* VAL maps:
- *
- * shift 28 --> kern_linear_pte_xor index 1
- * shift 31 --> kern_linear_pte_xor index 2
- * shift 34 --> kern_linear_pte_xor index 3
- */
- val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1;
-
- remains &= ~mask;
- if (shift != kpte_shift_max)
- remains = size;
-
- while (remains) {
- unsigned long index = start >> kpte_shift_min;
+ int i;
- kpte_set_val(index, val);
+ for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
+ struct tsb *ent = &swapper_tsb[i];
- start += 1UL << kpte_shift_min;
- remains -= 1UL << kpte_shift_min;
+ ent->tag = (1UL << TSB_TAG_INVALID_BIT);
}
+#ifndef CONFIG_DEBUG_PAGEALLOC
+ for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
+ struct tsb *ent = &swapper_4m_tsb[i];
- return start;
-}
-
-static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
-{
- unsigned long smallest_size, smallest_mask;
- unsigned long s;
-
- smallest_size = (1UL << kpte_shift_min);
- smallest_mask = (smallest_size - 1UL);
-
- while (start < end) {
- unsigned long orig_start = start;
-
- for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) {
- start = kpte_mark_using_shift(start, end, s);
-
- if (start != orig_start)
- break;
- }
-
- if (start == orig_start)
- start = (start + smallest_size) & ~smallest_mask;
+ ent->tag = (1UL << TSB_TAG_INVALID_BIT);
}
+#endif
}
-static void __init init_kpte_bitmap(void)
-{
- unsigned long i;
-
- for (i = 0; i < pall_ents; i++) {
- unsigned long phys_start, phys_end;
-
- phys_start = pall[i].phys_addr;
- phys_end = phys_start + pall[i].reg_size;
-
- mark_kpte_bitmap(phys_start, phys_end);
- }
-}
+extern unsigned int kvmap_linear_patch[1];
static void __init kernel_physical_mapping_init(void)
{
-#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned long i, mem_alloced = 0UL;
+ bool use_huge = true;
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ use_huge = false;
+#endif
for (i = 0; i < pall_ents; i++) {
unsigned long phys_start, phys_end;
@@ -1523,7 +1606,7 @@ static void __init kernel_physical_mapping_init(void)
phys_end = phys_start + pall[i].reg_size;
mem_alloced += kernel_map_range(phys_start, phys_end,
- PAGE_KERNEL);
+ PAGE_KERNEL, use_huge);
}
printk("Allocated %ld bytes for kernel page tables.\n",
@@ -1532,8 +1615,9 @@ static void __init kernel_physical_mapping_init(void)
kvmap_linear_patch[0] = 0x01000000; /* nop */
flushi(&kvmap_linear_patch[0]);
+ flush_all_kernel_tsbs();
+
__flush_tlb_all();
-#endif
}
#ifdef CONFIG_DEBUG_PAGEALLOC
@@ -1543,7 +1627,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
kernel_map_range(phys_start, phys_end,
- (enable ? PAGE_KERNEL : __pgprot(0)));
+ (enable ? PAGE_KERNEL : __pgprot(0)), false);
flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
PAGE_OFFSET + phys_end);
@@ -1571,76 +1655,56 @@ unsigned long __init find_ecache_flush_span(unsigned long size)
unsigned long PAGE_OFFSET;
EXPORT_SYMBOL(PAGE_OFFSET);
-static void __init page_offset_shift_patch_one(unsigned int *insn, unsigned long phys_bits)
-{
- unsigned long final_shift;
- unsigned int val = *insn;
- unsigned int cnt;
-
- /* We are patching in ilog2(max_supported_phys_address), and
- * we are doing so in a manner similar to a relocation addend.
- * That is, we are adding the shift value to whatever value
- * is in the shift instruction count field already.
- */
- cnt = (val & 0x3f);
- val &= ~0x3f;
-
- /* If we are trying to shift >= 64 bits, clear the destination
- * register. This can happen when phys_bits ends up being equal
- * to MAX_PHYS_ADDRESS_BITS.
- */
- final_shift = (cnt + (64 - phys_bits));
- if (final_shift >= 64) {
- unsigned int rd = (val >> 25) & 0x1f;
-
- val = 0x80100000 | (rd << 25);
- } else {
- val |= final_shift;
- }
- *insn = val;
-
- __asm__ __volatile__("flush %0"
- : /* no outputs */
- : "r" (insn));
-}
-
-static void __init page_offset_shift_patch(unsigned long phys_bits)
-{
- extern unsigned int __page_offset_shift_patch;
- extern unsigned int __page_offset_shift_patch_end;
- unsigned int *p;
-
- p = &__page_offset_shift_patch;
- while (p < &__page_offset_shift_patch_end) {
- unsigned int *insn = (unsigned int *)(unsigned long)*p;
+unsigned long VMALLOC_END = 0x0000010000000000UL;
+EXPORT_SYMBOL(VMALLOC_END);
- page_offset_shift_patch_one(insn, phys_bits);
-
- p++;
- }
-}
+unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
+unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
static void __init setup_page_offset(void)
{
- unsigned long max_phys_bits = 40;
-
if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+ /* Cheetah/Panther support a full 64-bit virtual
+ * address, so we can use all that our page tables
+ * support.
+ */
+ sparc64_va_hole_top = 0xfff0000000000000UL;
+ sparc64_va_hole_bottom = 0x0010000000000000UL;
+
max_phys_bits = 42;
} else if (tlb_type == hypervisor) {
switch (sun4v_chip_type) {
case SUN4V_CHIP_NIAGARA1:
case SUN4V_CHIP_NIAGARA2:
+ /* T1 and T2 support 48-bit virtual addresses. */
+ sparc64_va_hole_top = 0xffff800000000000UL;
+ sparc64_va_hole_bottom = 0x0000800000000000UL;
+
max_phys_bits = 39;
break;
case SUN4V_CHIP_NIAGARA3:
+ /* T3 supports 48-bit virtual addresses. */
+ sparc64_va_hole_top = 0xffff800000000000UL;
+ sparc64_va_hole_bottom = 0x0000800000000000UL;
+
max_phys_bits = 43;
break;
case SUN4V_CHIP_NIAGARA4:
case SUN4V_CHIP_NIAGARA5:
case SUN4V_CHIP_SPARC64X:
- default:
+ case SUN4V_CHIP_SPARC_M6:
+ /* T4 and later support 52-bit virtual addresses. */
+ sparc64_va_hole_top = 0xfff8000000000000UL;
+ sparc64_va_hole_bottom = 0x0008000000000000UL;
max_phys_bits = 47;
break;
+ case SUN4V_CHIP_SPARC_M7:
+ default:
+ /* M7 and later support 52-bit virtual addresses. */
+ sparc64_va_hole_top = 0xfff8000000000000UL;
+ sparc64_va_hole_bottom = 0x0008000000000000UL;
+ max_phys_bits = 49;
+ break;
}
}
@@ -1650,12 +1714,16 @@ static void __init setup_page_offset(void)
prom_halt();
}
- PAGE_OFFSET = PAGE_OFFSET_BY_BITS(max_phys_bits);
+ PAGE_OFFSET = sparc64_va_hole_top;
+ VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
+ (sparc64_va_hole_bottom >> 2));
- pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
+ pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
PAGE_OFFSET, max_phys_bits);
-
- page_offset_shift_patch(max_phys_bits);
+ pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
+ VMALLOC_START, VMALLOC_END);
+ pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
+ VMEMMAP_BASE, VMEMMAP_BASE << 1);
}
static void __init tsb_phys_patch(void)
@@ -1700,21 +1768,42 @@ static void __init tsb_phys_patch(void)
#define NUM_KTSB_DESCR 1
#endif
static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
-extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
+
+/* The swapper TSBs are loaded with a base sequence of:
+ *
+ * sethi %uhi(SYMBOL), REG1
+ * sethi %hi(SYMBOL), REG2
+ * or REG1, %ulo(SYMBOL), REG1
+ * or REG2, %lo(SYMBOL), REG2
+ * sllx REG1, 32, REG1
+ * or REG1, REG2, REG1
+ *
+ * When we use physical addressing for the TSB accesses, we patch the
+ * first four instructions in the above sequence.
+ */
static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
{
- pa >>= KTSB_PHYS_SHIFT;
+ unsigned long high_bits, low_bits;
+
+ high_bits = (pa >> 32) & 0xffffffff;
+ low_bits = (pa >> 0) & 0xffffffff;
while (start < end) {
unsigned int *ia = (unsigned int *)(unsigned long)*start;
- ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10);
+ ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
__asm__ __volatile__("flush %0" : : "r" (ia));
- ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff);
+ ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
__asm__ __volatile__("flush %0" : : "r" (ia + 1));
+ ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
+ __asm__ __volatile__("flush %0" : : "r" (ia + 2));
+
+ ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
+ __asm__ __volatile__("flush %0" : : "r" (ia + 3));
+
start++;
}
}
@@ -1853,11 +1942,56 @@ static void __init sun4v_linear_pte_xor_finalize(void)
/* paging_init() sets up the page tables */
static unsigned long last_valid_pfn;
-pgd_t swapper_pg_dir[PTRS_PER_PGD];
static void sun4u_pgprot_init(void);
static void sun4v_pgprot_init(void);
+static phys_addr_t __init available_memory(void)
+{
+ phys_addr_t available = 0ULL;
+ phys_addr_t pa_start, pa_end;
+ u64 i;
+
+ for_each_free_mem_range(i, NUMA_NO_NODE, &pa_start, &pa_end, NULL)
+ available = available + (pa_end - pa_start);
+
+ return available;
+}
+
+/* We need to exclude reserved regions. This exclusion will include
+ * vmlinux and initrd. To be more precise the initrd size could be used to
+ * compute a new lower limit because it is freed later during initialization.
+ */
+static void __init reduce_memory(phys_addr_t limit_ram)
+{
+ phys_addr_t avail_ram = available_memory();
+ phys_addr_t pa_start, pa_end;
+ u64 i;
+
+ if (limit_ram >= avail_ram)
+ return;
+
+ for_each_free_mem_range(i, NUMA_NO_NODE, &pa_start, &pa_end, NULL) {
+ phys_addr_t region_size = pa_end - pa_start;
+ phys_addr_t clip_start = pa_start;
+
+ avail_ram = avail_ram - region_size;
+ /* Are we consuming too much? */
+ if (avail_ram < limit_ram) {
+ phys_addr_t give_back = limit_ram - avail_ram;
+
+ region_size = region_size - give_back;
+ clip_start = clip_start + give_back;
+ }
+
+ memblock_remove(clip_start, region_size);
+
+ if (avail_ram <= limit_ram)
+ break;
+ i = 0UL;
+ }
+}
+
void __init paging_init(void)
{
unsigned long end_pfn, shift, phys_base;
@@ -1937,7 +2071,8 @@ void __init paging_init(void)
find_ramdisk(phys_base);
- memblock_enforce_memory_limit(cmdline_memory_size);
+ if (cmdline_memory_size)
+ reduce_memory(cmdline_memory_size);
memblock_allow_resize();
memblock_dump_all();
@@ -1956,16 +2091,10 @@ void __init paging_init(void)
*/
init_mm.pgd += ((shift) / (sizeof(pgd_t)));
- memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
+ memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
- /* Now can init the kernel/bad page tables. */
- pud_set(pud_offset(&swapper_pg_dir[0], 0),
- swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
-
inherit_prom_mappings();
- init_kpte_bitmap();
-
/* Ok, we can use our TLB miss and window trap handlers safely. */
setup_tba();
@@ -2072,70 +2201,6 @@ int page_in_phys_avail(unsigned long paddr)
return 0;
}
-static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
-static int pavail_rescan_ents __initdata;
-
-/* Certain OBP calls, such as fetching "available" properties, can
- * claim physical memory. So, along with initializing the valid
- * address bitmap, what we do here is refetch the physical available
- * memory list again, and make sure it provides at least as much
- * memory as 'pavail' does.
- */
-static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
-{
- int i;
-
- read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
-
- for (i = 0; i < pavail_ents; i++) {
- unsigned long old_start, old_end;
-
- old_start = pavail[i].phys_addr;
- old_end = old_start + pavail[i].reg_size;
- while (old_start < old_end) {
- int n;
-
- for (n = 0; n < pavail_rescan_ents; n++) {
- unsigned long new_start, new_end;
-
- new_start = pavail_rescan[n].phys_addr;
- new_end = new_start +
- pavail_rescan[n].reg_size;
-
- if (new_start <= old_start &&
- new_end >= (old_start + PAGE_SIZE)) {
- set_bit(old_start >> ILOG2_4MB, bitmap);
- goto do_next_page;
- }
- }
-
- prom_printf("mem_init: Lost memory in pavail\n");
- prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
- pavail[i].phys_addr,
- pavail[i].reg_size);
- prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
- pavail_rescan[i].phys_addr,
- pavail_rescan[i].reg_size);
- prom_printf("mem_init: Cannot continue, aborting.\n");
- prom_halt();
-
- do_next_page:
- old_start += PAGE_SIZE;
- }
- }
-}
-
-static void __init patch_tlb_miss_handler_bitmap(void)
-{
- extern unsigned int valid_addr_bitmap_insn[];
- extern unsigned int valid_addr_bitmap_patch[];
-
- valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1];
- mb();
- valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0];
- flushi(&valid_addr_bitmap_insn[0]);
-}
-
static void __init register_page_bootmem_info(void)
{
#ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -2148,18 +2213,6 @@ static void __init register_page_bootmem_info(void)
}
void __init mem_init(void)
{
- unsigned long addr, last;
-
- addr = PAGE_OFFSET + kern_base;
- last = PAGE_ALIGN(kern_size) + addr;
- while (addr < last) {
- set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap);
- addr += PAGE_SIZE;
- }
-
- setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap);
- patch_tlb_miss_handler_bitmap();
-
high_memory = __va(last_valid_pfn << PAGE_SHIFT);
register_page_bootmem_info();
@@ -2249,18 +2302,9 @@ unsigned long _PAGE_CACHE __read_mostly;
EXPORT_SYMBOL(_PAGE_CACHE);
#ifdef CONFIG_SPARSEMEM_VMEMMAP
-unsigned long vmemmap_table[VMEMMAP_SIZE];
-
-static long __meminitdata addr_start, addr_end;
-static int __meminitdata node_start;
-
int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
int node)
{
- unsigned long phys_start = (vstart - VMEMMAP_BASE);
- unsigned long phys_end = (vend - VMEMMAP_BASE);
- unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
- unsigned long end = VMEMMAP_ALIGN(phys_end);
unsigned long pte_base;
pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
@@ -2271,47 +2315,52 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
_PAGE_CP_4V | _PAGE_CV_4V |
_PAGE_P_4V | _PAGE_W_4V);
- for (; addr < end; addr += VMEMMAP_CHUNK) {
- unsigned long *vmem_pp =
- vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
- void *block;
+ pte_base |= _PAGE_PMD_HUGE;
- if (!(*vmem_pp & _PAGE_VALID)) {
- block = vmemmap_alloc_block(1UL << ILOG2_4MB, node);
- if (!block)
+ vstart = vstart & PMD_MASK;
+ vend = ALIGN(vend, PMD_SIZE);
+ for (; vstart < vend; vstart += PMD_SIZE) {
+ pgd_t *pgd = pgd_offset_k(vstart);
+ unsigned long pte;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ if (pgd_none(*pgd)) {
+ pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
+
+ if (!new)
return -ENOMEM;
+ pgd_populate(&init_mm, pgd, new);
+ }
- *vmem_pp = pte_base | __pa(block);
+ pud = pud_offset(pgd, vstart);
+ if (pud_none(*pud)) {
+ pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
- /* check to see if we have contiguous blocks */
- if (addr_end != addr || node_start != node) {
- if (addr_start)
- printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
- addr_start, addr_end-1, node_start);
- addr_start = addr;
- node_start = node;
- }
- addr_end = addr + VMEMMAP_CHUNK;
+ if (!new)
+ return -ENOMEM;
+ pud_populate(&init_mm, pud, new);
}
- }
- return 0;
-}
-void __meminit vmemmap_populate_print_last(void)
-{
- if (addr_start) {
- printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
- addr_start, addr_end-1, node_start);
- addr_start = 0;
- addr_end = 0;
- node_start = 0;
+ pmd = pmd_offset(pud, vstart);
+
+ pte = pmd_val(*pmd);
+ if (!(pte & _PAGE_VALID)) {
+ void *block = vmemmap_alloc_block(PMD_SIZE, node);
+
+ if (!block)
+ return -ENOMEM;
+
+ pmd_val(*pmd) = pte_base | __pa(block);
+ }
}
+
+ return 0;
}
void vmemmap_free(unsigned long start, unsigned long end)
{
}
-
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
static void prot_init_common(unsigned long page_none,
@@ -2787,8 +2836,8 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
}
if (end > HI_OBP_ADDRESS) {
- flush_tsb_kernel_range(end, HI_OBP_ADDRESS);
- do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);
+ flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
+ do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
}
} else {
flush_tsb_kernel_range(start, end);
diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h
index 0668b364f44d..a4c09603b05c 100644
--- a/arch/sparc/mm/init_64.h
+++ b/arch/sparc/mm/init_64.h
@@ -8,15 +8,8 @@
*/
#define MAX_PHYS_ADDRESS (1UL << MAX_PHYS_ADDRESS_BITS)
-#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
-#define KPTE_BITMAP_BYTES \
- ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4)
-#define VALID_ADDR_BITMAP_CHUNK_SZ (4UL * 1024UL * 1024UL)
-#define VALID_ADDR_BITMAP_BYTES \
- ((MAX_PHYS_ADDRESS / VALID_ADDR_BITMAP_CHUNK_SZ) / 8)
extern unsigned long kern_linear_pte_xor[4];
-extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
extern unsigned int sparc64_highest_unlocked_tlb_ent;
extern unsigned long sparc64_kern_pri_context;
extern unsigned long sparc64_kern_pri_nuc_bits;
@@ -38,15 +31,4 @@ extern unsigned long kern_locked_tte_data;
void prom_world(int enter);
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-#define VMEMMAP_CHUNK_SHIFT 22
-#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
-#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
-#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
-
-#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
- sizeof(struct page)) >> VMEMMAP_CHUNK_SHIFT)
-extern unsigned long vmemmap_table[VMEMMAP_SIZE];
-#endif
-
#endif /* _SPARC64_MM_INIT_H */
diff --git a/arch/sparc/power/hibernate_asm.S b/arch/sparc/power/hibernate_asm.S
index 79942166df84..d7d9017dcb15 100644
--- a/arch/sparc/power/hibernate_asm.S
+++ b/arch/sparc/power/hibernate_asm.S
@@ -54,8 +54,8 @@ ENTRY(swsusp_arch_resume)
nop
/* Write PAGE_OFFSET to %g7 */
- sethi %uhi(PAGE_OFFSET), %g7
- sllx %g7, 32, %g7
+ sethi %hi(PAGE_OFFSET), %g7
+ ldx [%g7 + %lo(PAGE_OFFSET)], %g7
setuw (PAGE_SIZE-8), %g3
diff --git a/arch/sparc/prom/bootstr_64.c b/arch/sparc/prom/bootstr_64.c
index ab9ccc63b388..7149e77714a4 100644
--- a/arch/sparc/prom/bootstr_64.c
+++ b/arch/sparc/prom/bootstr_64.c
@@ -14,7 +14,10 @@
* the .bss section or it will break things.
*/
-#define BARG_LEN 256
+/* We limit BARG_LEN to 1024 because this is the size of the
+ * 'barg_out' command line buffer in the SILO bootloader.
+ */
+#define BARG_LEN 1024
struct {
int bootstr_len;
int bootstr_valid;
diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
index e58b81726319..b2340f008ae0 100644
--- a/arch/sparc/prom/p1275.c
+++ b/arch/sparc/prom/p1275.c
@@ -9,6 +9,7 @@
#include <linux/smp.h>
#include <linux/string.h>
#include <linux/spinlock.h>
+#include <linux/irqflags.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
@@ -36,8 +37,8 @@ void p1275_cmd_direct(unsigned long *args)
{
unsigned long flags;
- raw_local_save_flags(flags);
- raw_local_irq_restore((unsigned long)PIL_NMI);
+ local_save_flags(flags);
+ local_irq_restore((unsigned long)PIL_NMI);
raw_spin_lock(&prom_entry_lock);
prom_world(1);
@@ -45,7 +46,7 @@ void p1275_cmd_direct(unsigned long *args)
prom_world(0);
raw_spin_unlock(&prom_entry_lock);
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
}
void prom_cif_init(void *cif_handler, void *cif_stack)
diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c
index 9e3a72205827..dd16c902ff70 100644
--- a/arch/um/drivers/random.c
+++ b/arch/um/drivers/random.c
@@ -79,7 +79,6 @@ static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
set_task_state(current, TASK_INTERRUPTIBLE);
schedule();
- set_task_state(current, TASK_RUNNING);
remove_wait_queue(&host_read_wait, &wait);
if (atomic_dec_and_test(&host_sleep_count)) {
diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore
index 7cab8c08e6d1..aff152c87cf4 100644
--- a/arch/x86/.gitignore
+++ b/arch/x86/.gitignore
@@ -1,4 +1,6 @@
boot/compressed/vmlinux
tools/test_get_len
tools/insn_sanity
+purgatory/kexec-purgatory.c
+purgatory/purgatory.ro
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 3eb8a41509b3..f2327e88e07c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -491,6 +491,36 @@ config X86_INTEL_LPSS
things like clock tree (common clock framework) and pincontrol
which are needed by the LPSS peripheral drivers.
+config IOSF_MBI
+ tristate "Intel SoC IOSF Sideband support for SoC platforms"
+ depends on PCI
+ ---help---
+ This option enables sideband register access support for Intel SoC
+ platforms. On these platforms the IOSF sideband is used in lieu of
+ MSR's for some register accesses, mostly but not limited to thermal
+ and power. Drivers may query the availability of this device to
+ determine if they need the sideband in order to work on these
+ platforms. The sideband is available on the following SoC products.
+ This list is not meant to be exclusive.
+ - BayTrail
+ - Braswell
+ - Quark
+
+ You should say Y if you are running a kernel on one of these SoC's.
+
+config IOSF_MBI_DEBUG
+ bool "Enable IOSF sideband access through debugfs"
+ depends on IOSF_MBI && DEBUG_FS
+ ---help---
+ Select this option to expose the IOSF sideband access registers (MCR,
+ MDR, MCRX) through debugfs to write and read register information from
+ different units on the SoC. This is most useful for obtaining device
+ state information for debug and analysis. As this is a general access
+ mechanism, users of this option would have specific knowledge of the
+ device they want to access.
+
+ If you don't require the option or are in doubt, say N.
+
config X86_RDC321X
bool "RDC R-321x SoC"
depends on X86_32
@@ -2454,11 +2484,6 @@ config X86_DMA_REMAP
bool
depends on STA2X11
-config IOSF_MBI
- tristate
- default m
- depends on PCI
-
config PMC_ATOM
def_bool y
depends on PCI
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 5692d6ac0f18..920e6160c535 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -50,9 +50,6 @@ ifeq ($(CONFIG_X86_32),y)
KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
- # Don't autogenerate MMX or SSE instructions
- KBUILD_CFLAGS += -mno-mmx -mno-sse
-
# Never want PIC in a 32-bit kernel, prevent breakage with GCC built
# with nonstandard options
KBUILD_CFLAGS += -fno-pic
@@ -80,8 +77,7 @@ else
KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64
- # Don't autogenerate traditional x87, MMX or SSE instructions
- KBUILD_CFLAGS += -mno-mmx -mno-sse
+ # Don't autogenerate traditional x87 instructions
KBUILD_CFLAGS += $(call cc-option,-mno-80387)
KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
@@ -168,7 +164,7 @@ KBUILD_CFLAGS += -Wno-sign-compare
#
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# prevent gcc from generating any FP code by mistake
-KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
+KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
KBUILD_CFLAGS += $(mflags-y)
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index 7c68808edeb7..bb1376381985 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -194,7 +194,7 @@ static bool mem_avoid_overlap(struct mem_vector *img)
while (ptr) {
struct mem_vector avoid;
- avoid.start = (u64)ptr;
+ avoid.start = (unsigned long)ptr;
avoid.size = sizeof(*ptr) + ptr->len;
if (mem_overlaps(img, &avoid))
diff --git a/arch/x86/boot/mkcpustr.c b/arch/x86/boot/mkcpustr.c
index 4579eff0ef4d..637097e66a62 100644
--- a/arch/x86/boot/mkcpustr.c
+++ b/arch/x86/boot/mkcpustr.c
@@ -16,6 +16,7 @@
#include <stdio.h>
#include "../include/asm/required-features.h"
+#include "../include/asm/disabled-features.h"
#include "../include/asm/cpufeature.h"
#include "../kernel/cpu/capflags.c"
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index d21ff89207cd..df91466f973d 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -308,11 +308,8 @@ static int load_aout_binary(struct linux_binprm *bprm)
(current->mm->start_brk = N_BSSADDR(ex));
retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
- if (retval < 0) {
- /* Someone check-me: is this error path enough? */
- send_sig(SIGKILL, current, 0);
+ if (retval < 0)
return retval;
- }
install_exec_creds(bprm);
@@ -324,17 +321,13 @@ static int load_aout_binary(struct linux_binprm *bprm)
error = vm_brk(text_addr & PAGE_MASK, map_size);
- if (error != (text_addr & PAGE_MASK)) {
- send_sig(SIGKILL, current, 0);
+ if (error != (text_addr & PAGE_MASK))
return error;
- }
error = read_code(bprm->file, text_addr, 32,
ex.a_text + ex.a_data);
- if ((signed long)error < 0) {
- send_sig(SIGKILL, current, 0);
+ if ((signed long)error < 0)
return error;
- }
} else {
#ifdef WARN_OLD
static unsigned long error_time, error_time2;
@@ -368,20 +361,16 @@ static int load_aout_binary(struct linux_binprm *bprm)
MAP_EXECUTABLE | MAP_32BIT,
fd_offset);
- if (error != N_TXTADDR(ex)) {
- send_sig(SIGKILL, current, 0);
+ if (error != N_TXTADDR(ex))
return error;
- }
error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
MAP_EXECUTABLE | MAP_32BIT,
fd_offset + ex.a_text);
- if (error != N_DATADDR(ex)) {
- send_sig(SIGKILL, current, 0);
+ if (error != N_DATADDR(ex))
return error;
- }
}
beyond_if:
set_binfmt(&aout_format);
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 4299eb05023c..711de084ab57 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -151,6 +151,16 @@ ENTRY(ia32_sysenter_target)
1: movl (%rbp),%ebp
_ASM_EXTABLE(1b,ia32_badarg)
ASM_CLAC
+
+ /*
+ * Sysenter doesn't filter flags, so we need to clear NT
+ * ourselves. To save a few cycles, we can check whether
+ * NT was set instead of doing an unconditional popfq.
+ */
+ testl $X86_EFLAGS_NT,EFLAGS(%rsp) /* saved EFLAGS match cpu */
+ jnz sysenter_fix_flags
+sysenter_flags_fixed:
+
orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
CFI_REMEMBER_STATE
@@ -184,6 +194,8 @@ sysexit_from_sys_call:
TRACE_IRQS_ON
ENABLE_INTERRUPTS_SYSEXIT32
+ CFI_RESTORE_STATE
+
#ifdef CONFIG_AUDITSYSCALL
.macro auditsys_entry_common
movl %esi,%r9d /* 6th arg: 4th syscall arg */
@@ -226,7 +238,6 @@ sysexit_from_sys_call:
.endm
sysenter_auditsys:
- CFI_RESTORE_STATE
auditsys_entry_common
movl %ebp,%r9d /* reload 6th syscall arg */
jmp sysenter_dispatch
@@ -235,6 +246,11 @@ sysexit_audit:
auditsys_exit sysexit_from_sys_call
#endif
+sysenter_fix_flags:
+ pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
+ popfq_cfi
+ jmp sysenter_flags_fixed
+
sysenter_tracesys:
#ifdef CONFIG_AUDITSYSCALL
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 6dd1c7dd0473..5e5cd123fdfb 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -24,7 +24,7 @@
*/
static inline int atomic_read(const atomic_t *v)
{
- return (*(volatile int *)&(v)->counter);
+ return ACCESS_ONCE((v)->counter);
}
/**
@@ -219,21 +219,6 @@ static inline short int atomic_inc_short(short int *v)
return *v;
}
-#ifdef CONFIG_X86_64
-/**
- * atomic_or_long - OR of two long integers
- * @v1: pointer to type unsigned long
- * @v2: pointer to type unsigned long
- *
- * Atomically ORs @v1 and @v2
- * Returns the result of the OR
- */
-static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
-{
- asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2));
-}
-#endif
-
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
asm volatile(LOCK_PREFIX "andl %0,%1" \
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 46e9052bbd28..f8d273e18516 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -18,7 +18,7 @@
*/
static inline long atomic64_read(const atomic64_t *v)
{
- return (*(volatile long *)&(v)->counter);
+ return ACCESS_ONCE((v)->counter);
}
/**
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index cb4c73bfeb48..76659b67fd11 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -85,7 +85,7 @@ For 32-bit we have the following conventions - kernel is built with
#define ARGOFFSET R11
#define SWFRAME ORIG_RAX
- .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
+ .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
subq $9*8+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET 9*8+\addskip
movq_cfi rdi, 8*8
@@ -96,7 +96,11 @@ For 32-bit we have the following conventions - kernel is built with
movq_cfi rcx, 5*8
.endif
+ .if \rax_enosys
+ movq $-ENOSYS, 4*8(%rsp)
+ .else
movq_cfi rax, 4*8
+ .endif
.if \save_r891011
movq_cfi r8, 3*8
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 094292a63e74..0bb1335313b2 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -8,6 +8,10 @@
#include <asm/required-features.h>
#endif
+#ifndef _ASM_X86_DISABLED_FEATURES_H
+#include <asm/disabled-features.h>
+#endif
+
#define NCAPINTS 11 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
@@ -282,6 +286,18 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
(((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
(((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) )
+#define DISABLED_MASK_BIT_SET(bit) \
+ ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0)) || \
+ (((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1)) || \
+ (((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2)) || \
+ (((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3)) || \
+ (((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4)) || \
+ (((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5)) || \
+ (((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6)) || \
+ (((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7)) || \
+ (((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8)) || \
+ (((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9)) )
+
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
test_cpu_cap(c, bit))
@@ -290,6 +306,18 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability))
+/*
+ * This macro is for detection of features which need kernel
+ * infrastructure to be used. It may *not* directly test the CPU
+ * itself. Use the cpu_has() family if you want true runtime
+ * testing of CPU features, like in hypervisor code where you are
+ * supporting a possible guest feature where host support for it
+ * is not relevant.
+ */
+#define cpu_feature_enabled(bit) \
+ (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : \
+ cpu_has(&boot_cpu_data, bit))
+
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
@@ -304,11 +332,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
} while (0)
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
-#define cpu_has_vme boot_cpu_has(X86_FEATURE_VME)
#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
-#define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE)
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
@@ -324,9 +350,6 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
-#define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR)
-#define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR)
-#define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
@@ -361,25 +384,6 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
-#ifdef CONFIG_X86_64
-
-#undef cpu_has_vme
-#define cpu_has_vme 0
-
-#undef cpu_has_pae
-#define cpu_has_pae ___BUG___
-
-#undef cpu_has_k6_mtrr
-#define cpu_has_k6_mtrr 0
-
-#undef cpu_has_cyrix_arr
-#define cpu_has_cyrix_arr 0
-
-#undef cpu_has_centaur_mcr
-#define cpu_has_centaur_mcr 0
-
-#endif /* CONFIG_X86_64 */
-
#if __GNUC__ >= 4
extern void warn_pre_alternatives(void);
extern bool __static_cpu_has_safe(u16 bit);
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
new file mode 100644
index 000000000000..97534a7d38e3
--- /dev/null
+++ b/arch/x86/include/asm/disabled-features.h
@@ -0,0 +1,39 @@
+#ifndef _ASM_X86_DISABLED_FEATURES_H
+#define _ASM_X86_DISABLED_FEATURES_H
+
+/* These features, although they might be available in a CPU
+ * will not be used because the compile options to support
+ * them are not present.
+ *
+ * This code allows them to be checked and disabled at
+ * compile time without an explicit #ifdef. Use
+ * cpu_feature_enabled().
+ */
+
+#ifdef CONFIG_X86_64
+# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
+# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
+# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
+# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
+#else
+# define DISABLE_VME 0
+# define DISABLE_K6_MTRR 0
+# define DISABLE_CYRIX_ARR 0
+# define DISABLE_CENTAUR_MCR 0
+#endif /* CONFIG_X86_64 */
+
+/*
+ * Make sure to add features to the correct mask
+ */
+#define DISABLED_MASK0 (DISABLE_VME)
+#define DISABLED_MASK1 0
+#define DISABLED_MASK2 0
+#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
+#define DISABLED_MASK4 0
+#define DISABLED_MASK5 0
+#define DISABLED_MASK6 0
+#define DISABLED_MASK7 0
+#define DISABLED_MASK8 0
+#define DISABLED_MASK9 0
+
+#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 1a055c81d864..ca3347a9dab5 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -160,8 +160,9 @@ do { \
#define elf_check_arch(x) \
((x)->e_machine == EM_X86_64)
-#define compat_elf_check_arch(x) \
- (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
+#define compat_elf_check_arch(x) \
+ (elf_check_arch_ia32(x) || \
+ (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
#if __USER32_DS != __USER_DS
# error "The following code assumes __USER32_DS == __USER_DS"
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 412ececa00b9..e97622f57722 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -344,7 +344,7 @@ static inline void __thread_fpu_end(struct task_struct *tsk)
static inline void __thread_fpu_begin(struct task_struct *tsk)
{
- if (!static_cpu_has_safe(X86_FEATURE_EAGER_FPU))
+ if (!use_eager_fpu())
clts();
__thread_set_has_fpu(tsk);
}
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 9067166409bf..bbe296e0bce1 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -43,7 +43,7 @@ struct extended_sigtable {
#define DWSIZE (sizeof(u32))
#define get_totalsize(mc) \
- (((struct microcode_intel *)mc)->hdr.totalsize ? \
+ (((struct microcode_intel *)mc)->hdr.datasize ? \
((struct microcode_intel *)mc)->hdr.totalsize : \
DEFAULT_UCODE_TOTALSIZE)
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index 4064acae625d..01b493e5a99b 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -9,7 +9,6 @@
#ifdef CONFIG_NUMA
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
-#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
/*
* Too small node sizes may confuse the VM badly. Usually they
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 8249df45d2f2..8dfc9fd094a3 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -51,6 +51,14 @@
ARCH_PERFMON_EVENTSEL_EDGE | \
ARCH_PERFMON_EVENTSEL_INV | \
ARCH_PERFMON_EVENTSEL_CMASK)
+#define X86_ALL_EVENT_FLAGS \
+ (ARCH_PERFMON_EVENTSEL_EDGE | \
+ ARCH_PERFMON_EVENTSEL_INV | \
+ ARCH_PERFMON_EVENTSEL_CMASK | \
+ ARCH_PERFMON_EVENTSEL_ANY | \
+ ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
+ HSW_IN_TX | \
+ HSW_IN_TX_CHECKPOINTED)
#define AMD64_RAW_EVENT_MASK \
(X86_RAW_EVENT_MASK | \
AMD64_EVENTSEL_EVENT)
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 9ee322103c6d..b6c0b404898a 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -32,9 +32,6 @@ static inline void pgtable_cache_init(void) { }
static inline void check_pgt_cache(void) { }
void paging_init(void);
-extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
-
-
/*
* Define this if things work differently on an i386 and an i486:
* it will (on an i486) warn about kernel memory accesses that are
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 3874693c0e53..4572b2f30237 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -116,7 +116,8 @@ static inline void native_pgd_clear(pgd_t *pgd)
native_set_pgd(pgd, native_make_pgd(0));
}
-extern void sync_global_pgds(unsigned long start, unsigned long end);
+extern void sync_global_pgds(unsigned long start, unsigned long end,
+ int removed);
/*
* Conversion functions: convert a page and protection to a page entry,
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 0f9724c9c510..07789647bf33 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -23,7 +23,6 @@
#define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
#define _PAGE_BIT_SPLITTING _PAGE_BIT_SOFTW2 /* only valid on a PSE pmd */
-#define _PAGE_BIT_IOMAP _PAGE_BIT_SOFTW2 /* flag used to indicate IO mapping */
#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
@@ -52,7 +51,7 @@
#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
#define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
-#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
+#define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
@@ -168,10 +167,10 @@
#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
-#define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
-#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
-#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
-#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
+#define __PAGE_KERNEL_IO (__PAGE_KERNEL)
+#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE)
+#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS)
+#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC)
#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 6205f0c434db..86fc2bb82287 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -75,6 +75,11 @@ convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
int error_code, int si_code);
+
+extern unsigned long syscall_trace_enter_phase1(struct pt_regs *, u32 arch);
+extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch,
+ unsigned long phase1_result);
+
extern long syscall_trace_enter(struct pt_regs *);
extern void syscall_trace_leave(struct pt_regs *);
diff --git a/arch/x86/include/asm/rwlock.h b/arch/x86/include/asm/rwlock.h
deleted file mode 100644
index a5370a03d90c..000000000000
--- a/arch/x86/include/asm/rwlock.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef _ASM_X86_RWLOCK_H
-#define _ASM_X86_RWLOCK_H
-
-#include <asm/asm.h>
-
-#if CONFIG_NR_CPUS <= 2048
-
-#ifndef __ASSEMBLY__
-typedef union {
- s32 lock;
- s32 write;
-} arch_rwlock_t;
-#endif
-
-#define RW_LOCK_BIAS 0x00100000
-#define READ_LOCK_SIZE(insn) __ASM_FORM(insn##l)
-#define READ_LOCK_ATOMIC(n) atomic_##n
-#define WRITE_LOCK_ADD(n) __ASM_FORM_COMMA(addl n)
-#define WRITE_LOCK_SUB(n) __ASM_FORM_COMMA(subl n)
-#define WRITE_LOCK_CMP RW_LOCK_BIAS
-
-#else /* CONFIG_NR_CPUS > 2048 */
-
-#include <linux/const.h>
-
-#ifndef __ASSEMBLY__
-typedef union {
- s64 lock;
- struct {
- u32 read;
- s32 write;
- };
-} arch_rwlock_t;
-#endif
-
-#define RW_LOCK_BIAS (_AC(1,L) << 32)
-#define READ_LOCK_SIZE(insn) __ASM_FORM(insn##q)
-#define READ_LOCK_ATOMIC(n) atomic64_##n
-#define WRITE_LOCK_ADD(n) __ASM_FORM(incl)
-#define WRITE_LOCK_SUB(n) __ASM_FORM(decl)
-#define WRITE_LOCK_CMP 1
-
-#endif /* CONFIG_NR_CPUS */
-
-#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
-
-/* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */
-
-#endif /* _ASM_X86_RWLOCK_H */
diff --git a/arch/x86/include/asm/serial.h b/arch/x86/include/asm/serial.h
index 628c801535ea..460b84f64556 100644
--- a/arch/x86/include/asm/serial.h
+++ b/arch/x86/include/asm/serial.h
@@ -6,24 +6,24 @@
*
* It'd be nice if someone built a serial card with a 24.576 MHz
* clock, since the 16550A is capable of handling a top speed of 1.5
- * megabits/second; but this requires the faster clock.
+ * megabits/second; but this requires a faster clock.
*/
-#define BASE_BAUD ( 1843200 / 16 )
+#define BASE_BAUD (1843200/16)
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
-#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
+# define STD_COMX_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
+# define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | 0 | ASYNC_AUTO_IRQ)
#else
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
-#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
+# define STD_COMX_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | 0 )
+# define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | 0 | 0 )
#endif
-#define SERIAL_PORT_DFNS \
- /* UART CLK PORT IRQ FLAGS */ \
- { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
- { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
- { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
- { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
+#define SERIAL_PORT_DFNS \
+ /* UART CLK PORT IRQ FLAGS */ \
+ { .uart = 0, BASE_BAUD, 0x3F8, 4, STD_COMX_FLAGS }, /* ttyS0 */ \
+ { .uart = 0, BASE_BAUD, 0x2F8, 3, STD_COMX_FLAGS }, /* ttyS1 */ \
+ { .uart = 0, BASE_BAUD, 0x3E8, 4, STD_COMX_FLAGS }, /* ttyS2 */ \
+ { .uart = 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
#endif /* _ASM_X86_SERIAL_H */
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 54f1c8068c02..9295016485c9 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -187,7 +187,6 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
cpu_relax();
}
-#ifndef CONFIG_QUEUE_RWLOCK
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
@@ -198,91 +197,15 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
* irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*
- * On x86, we implement read-write locks as a 32-bit counter
- * with the high bit (sign) being the "contended" bit.
+ * On x86, we implement read-write locks using the generic qrwlock with
+ * x86 specific optimization.
*/
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static inline int arch_read_can_lock(arch_rwlock_t *lock)
-{
- return lock->lock > 0;
-}
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static inline int arch_write_can_lock(arch_rwlock_t *lock)
-{
- return lock->write == WRITE_LOCK_CMP;
-}
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
- "jns 1f\n"
- "call __read_lock_failed\n\t"
- "1:\n"
- ::LOCK_PTR_REG (rw) : "memory");
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
- "jz 1f\n"
- "call __write_lock_failed\n\t"
- "1:\n"
- ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS)
- : "memory");
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *lock)
-{
- READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock;
-
- if (READ_LOCK_ATOMIC(dec_return)(count) >= 0)
- return 1;
- READ_LOCK_ATOMIC(inc)(count);
- return 0;
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *lock)
-{
- atomic_t *count = (atomic_t *)&lock->write;
-
- if (atomic_sub_and_test(WRITE_LOCK_CMP, count))
- return 1;
- atomic_add(WRITE_LOCK_CMP, count);
- return 0;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
- :"+m" (rw->lock) : : "memory");
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
- : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
-}
-#else
#include <asm/qrwlock.h>
-#endif /* CONFIG_QUEUE_RWLOCK */
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#undef READ_LOCK_SIZE
-#undef READ_LOCK_ATOMIC
-#undef WRITE_LOCK_ADD
-#undef WRITE_LOCK_SUB
-#undef WRITE_LOCK_CMP
-
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 73c4c007200f..5f9d7572d82b 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -34,10 +34,6 @@ typedef struct arch_spinlock {
#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
-#ifdef CONFIG_QUEUE_RWLOCK
#include <asm-generic/qrwlock_types.h>
-#else
-#include <asm/rwlock.h>
-#endif
#endif /* _ASM_X86_SPINLOCK_TYPES_H */
diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
index bbae02470701..d993e33f5236 100644
--- a/arch/x86/include/uapi/asm/e820.h
+++ b/arch/x86/include/uapi/asm/e820.h
@@ -21,11 +21,6 @@
* this size.
*/
-/*
- * Odd: 'make headers_check' complains about numa.h if I try
- * to collapse the next two #ifdef lines to a single line:
- * #if defined(__KERNEL__) && defined(CONFIG_EFI)
- */
#ifndef __KERNEL__
#define E820_X_MAX E820MAX
#endif
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index ada2e2d6be3e..8f1e77440b2b 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -39,8 +39,6 @@ obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y += pci-iommu_table.o
obj-y += resource.o
-obj-$(CONFIG_PREEMPT) += preempt.o
-
obj-y += process.o
obj-y += i387.o xsave.o
obj-y += ptrace.o
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index ae915391ebec..4128b5fcb559 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -32,7 +32,7 @@
static int numachip_system __read_mostly;
-static const struct apic apic_numachip __read_mostly;
+static const struct apic apic_numachip;
static unsigned int get_apic_id(unsigned long x)
{
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 004f017aa7b9..8e9dcfd630e4 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -204,7 +204,6 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
{
-#ifdef CONFIG_SMP
unsigned long val;
int pnode;
@@ -223,7 +222,6 @@ static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
atomic_set(&init_deasserted, 1);
-#endif
return 0;
}
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 77dcab277710..01d5453b5502 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -39,7 +39,9 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o
endif
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
-obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_rapl.o
+obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_uncore_snb.o
+obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore_snbep.o perf_event_intel_uncore_nhmex.o
+obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o
endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c649f236e288..700f958652f8 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1184,7 +1184,7 @@ void syscall_init(void)
/* Flags to clear on syscall */
wrmsrl(MSR_SYSCALL_MASK,
X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
- X86_EFLAGS_IOPL|X86_EFLAGS_AC);
+ X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
}
/*
@@ -1266,6 +1266,19 @@ static void dbg_restore_debug_regs(void)
#define dbg_restore_debug_regs()
#endif /* ! CONFIG_KGDB */
+static void wait_for_master_cpu(int cpu)
+{
+#ifdef CONFIG_SMP
+ /*
+ * wait for ACK from master CPU before continuing
+ * with AP initialization
+ */
+ WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
+ while (!cpumask_test_cpu(cpu, cpu_callout_mask))
+ cpu_relax();
+#endif
+}
+
/*
* cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT
@@ -1281,16 +1294,17 @@ void cpu_init(void)
struct task_struct *me;
struct tss_struct *t;
unsigned long v;
- int cpu;
+ int cpu = stack_smp_processor_id();
int i;
+ wait_for_master_cpu(cpu);
+
/*
* Load microcode on this cpu if a valid microcode is available.
* This is early microcode loading procedure.
*/
load_ucode_ap();
- cpu = stack_smp_processor_id();
t = &per_cpu(init_tss, cpu);
oist = &per_cpu(orig_ist, cpu);
@@ -1302,9 +1316,6 @@ void cpu_init(void)
me = current;
- if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
- panic("CPU#%d already initialized!\n", cpu);
-
pr_debug("Initializing CPU#%d\n", cpu);
clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
@@ -1381,17 +1392,13 @@ void cpu_init(void)
struct tss_struct *t = &per_cpu(init_tss, cpu);
struct thread_struct *thread = &curr->thread;
- show_ucode_info_early();
+ wait_for_master_cpu(cpu);
- if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
- printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
- for (;;)
- local_irq_enable();
- }
+ show_ucode_info_early();
printk(KERN_INFO "Initializing CPU#%d\n", cpu);
- if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
+ if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de)
clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
load_current_idt();
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 74e804ddc5c7..1ef456273172 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -144,6 +144,21 @@ static void early_init_intel(struct cpuinfo_x86 *c)
setup_clear_cpu_cap(X86_FEATURE_ERMS);
}
}
+
+ /*
+ * Intel Quark Core DevMan_001.pdf section 6.4.11
+ * "The operating system also is required to invalidate (i.e., flush)
+ * the TLB when any changes are made to any of the page table entries.
+ * The operating system must reload CR3 to cause the TLB to be flushed"
+ *
+ * As a result cpu_has_pge() in arch/x86/include/asm/tlbflush.h should
+ * be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
+ * to be modified
+ */
+ if (c->x86 == 5 && c->x86_model == 9) {
+ pr_info("Disabling PGE capability bit\n");
+ setup_clear_cpu_cap(X86_FEATURE_PGE);
+ }
}
#ifdef CONFIG_X86_32
@@ -382,6 +397,13 @@ static void init_intel(struct cpuinfo_x86 *c)
}
l2 = init_intel_cacheinfo(c);
+
+ /* Detect legacy cache sizes if init_intel_cacheinfo did not */
+ if (l2 == 0) {
+ cpu_detect_cache_sizes(c);
+ l2 = c->x86_cache_size;
+ }
+
if (c->cpuid_level > 9) {
unsigned eax = cpuid_eax(10);
/* Check for version and the number of counters */
@@ -485,6 +507,13 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
*/
if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
size = 256;
+
+ /*
+ * Intel Quark SoC X1000 contains a 4-way set associative
+ * 16K cache with a 16 byte cache line and 256 lines per tag
+ */
+ if ((c->x86 == 5) && (c->x86_model == 9))
+ size = 16;
return size;
}
#endif
@@ -686,7 +715,8 @@ static const struct cpu_dev intel_cpu_dev = {
[3] = "OverDrive PODP5V83",
[4] = "Pentium MMX",
[7] = "Mobile Pentium 75 - 200",
- [8] = "Mobile Pentium MMX"
+ [8] = "Mobile Pentium MMX",
+ [9] = "Quark SoC X1000",
}
},
{ .family = 6, .model_names =
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 36a1bb6d1ee0..1af51b1586d7 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -498,8 +498,8 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
- printk(KERN_DEBUG
- "CPU%d: Thermal monitoring handled by SMI\n", cpu);
+ if (system_state == SYSTEM_BOOTING)
+ printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", cpu);
return;
}
diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c
index 617a9e284245..7aa1acc79789 100644
--- a/arch/x86/kernel/cpu/microcode/amd_early.c
+++ b/arch/x86/kernel/cpu/microcode/amd_early.c
@@ -27,7 +27,7 @@ static u32 ucode_new_rev;
u8 amd_ucode_patch[PATCH_MAX_SIZE];
static u16 this_equiv_id;
-struct cpio_data ucode_cpio;
+static struct cpio_data ucode_cpio;
/*
* Microcode patch container file is prepended to the initrd in cpio format.
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index a276fa75d9b5..c6826d1e8082 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -127,7 +127,7 @@ static int get_matching_mc(struct microcode_intel *mc_intel, int cpu)
return get_matching_microcode(csig, cpf, mc_intel, crev);
}
-int apply_microcode(int cpu)
+static int apply_microcode_intel(int cpu)
{
struct microcode_intel *mc_intel;
struct ucode_cpu_info *uci;
@@ -314,7 +314,7 @@ static struct microcode_ops microcode_intel_ops = {
.request_microcode_user = request_microcode_user,
.request_microcode_fw = request_microcode_fw,
.collect_cpu_info = collect_cpu_info,
- .apply_microcode = apply_microcode,
+ .apply_microcode = apply_microcode_intel,
.microcode_fini_cpu = microcode_fini_cpu,
};
diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
index 18f739129e72..b88343f7a3b3 100644
--- a/arch/x86/kernel/cpu/microcode/intel_early.c
+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
@@ -28,8 +28,8 @@
#include <asm/tlbflush.h>
#include <asm/setup.h>
-unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
-struct mc_saved_data {
+static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
+static struct mc_saved_data {
unsigned int mc_saved_count;
struct microcode_intel **mc_saved;
} mc_saved_data;
@@ -415,7 +415,7 @@ static void __ref show_saved_mc(void)
struct ucode_cpu_info uci;
if (mc_saved_data.mc_saved_count == 0) {
- pr_debug("no micorcode data saved.\n");
+ pr_debug("no microcode data saved.\n");
return;
}
pr_debug("Total microcode saved: %d\n", mc_saved_data.mc_saved_count);
@@ -506,7 +506,7 @@ int save_mc_for_early(u8 *mc)
if (mc_saved && mc_saved_count)
memcpy(mc_saved_tmp, mc_saved,
- mc_saved_count * sizeof(struct mirocode_intel *));
+ mc_saved_count * sizeof(struct microcode_intel *));
/*
* Save the microcode patch mc in mc_save_tmp structure if it's a newer
* version.
@@ -526,7 +526,7 @@ int save_mc_for_early(u8 *mc)
show_saved_mc();
/*
- * Free old saved microcod data.
+ * Free old saved microcode data.
*/
if (mc_saved) {
for (i = 0; i < mc_saved_count_init; i++)
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index f961de9964c7..ea5f363a1948 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -707,7 +707,7 @@ void __init mtrr_bp_init(void)
} else {
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
- if (cpu_has_k6_mtrr) {
+ if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
/* Pre-Athlon (K6) AMD CPU MTRRs */
mtrr_if = mtrr_ops[X86_VENDOR_AMD];
size_or_mask = SIZE_OR_MASK_BITS(32);
@@ -715,14 +715,14 @@ void __init mtrr_bp_init(void)
}
break;
case X86_VENDOR_CENTAUR:
- if (cpu_has_centaur_mcr) {
+ if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
}
break;
case X86_VENDOR_CYRIX:
- if (cpu_has_cyrix_arr) {
+ if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 2879ecdaac43..16c73022306e 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -243,7 +243,8 @@ static bool check_hw_exists(void)
msr_fail:
printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
- printk(KERN_ERR "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new);
+ printk(boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR
+ "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new);
return false;
}
@@ -387,7 +388,7 @@ int x86_pmu_hw_config(struct perf_event *event)
precise++;
/* Support for IP fixup */
- if (x86_pmu.lbr_nr)
+ if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
precise++;
}
@@ -443,6 +444,12 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.type == PERF_TYPE_RAW)
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
+ if (event->attr.sample_period && x86_pmu.limit_period) {
+ if (x86_pmu.limit_period(event, event->attr.sample_period) >
+ event->attr.sample_period)
+ return -EINVAL;
+ }
+
return x86_setup_perfctr(event);
}
@@ -980,6 +987,9 @@ int x86_perf_event_set_period(struct perf_event *event)
if (left > x86_pmu.max_period)
left = x86_pmu.max_period;
+ if (x86_pmu.limit_period)
+ left = x86_pmu.limit_period(event, left);
+
per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
/*
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 8ade93111e03..d98a34d435d7 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -67,8 +67,10 @@ struct event_constraint {
*/
#define PERF_X86_EVENT_PEBS_LDLAT 0x1 /* ld+ldlat data address sampling */
#define PERF_X86_EVENT_PEBS_ST 0x2 /* st data address sampling */
-#define PERF_X86_EVENT_PEBS_ST_HSW 0x4 /* haswell style st data sampling */
+#define PERF_X86_EVENT_PEBS_ST_HSW 0x4 /* haswell style datala, store */
#define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */
+#define PERF_X86_EVENT_PEBS_LD_HSW 0x10 /* haswell style datala, load */
+#define PERF_X86_EVENT_PEBS_NA_HSW 0x20 /* haswell style datala, unknown */
struct amd_nb {
int nb_id; /* NorthBridge id */
@@ -252,18 +254,52 @@ struct cpu_hw_events {
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
#define INTEL_PLD_CONSTRAINT(c, n) \
- __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
+ __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
#define INTEL_PST_CONSTRAINT(c, n) \
- __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
+ __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
-/* DataLA version of store sampling without extra enable bit. */
-#define INTEL_PST_HSW_CONSTRAINT(c, n) \
- __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
+/* Event constraint, but match on all event flags too. */
+#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
+ EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
+
+/* Check only flags, but allow all event/umask */
+#define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
+ EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
+
+/* Check flags and event code, and set the HSW store flag */
+#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
+ __EVENT_CONSTRAINT(code, n, \
+ ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
+ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
+
+/* Check flags and event code, and set the HSW load flag */
+#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
+ __EVENT_CONSTRAINT(code, n, \
+ ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
+ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+
+/* Check flags and event code/umask, and set the HSW store flag */
+#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
+ __EVENT_CONSTRAINT(code, n, \
+ INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
+/* Check flags and event code/umask, and set the HSW load flag */
+#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
+ __EVENT_CONSTRAINT(code, n, \
+ INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
+ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+
+/* Check flags and event code/umask, and set the HSW N/A flag */
+#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
+ __EVENT_CONSTRAINT(code, n, \
+ INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \
+ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
+
+
/*
* We define the end marker as having a weight of -1
* to enable blacklisting of events using a counter bitmask
@@ -409,6 +445,7 @@ struct x86_pmu {
struct x86_pmu_quirk *quirks;
int perfctr_second_write;
bool late_ack;
+ unsigned (*limit_period)(struct perf_event *event, unsigned l);
/*
* sysfs attrs
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 2502d0d9d246..3851def5057c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -220,6 +220,15 @@ static struct event_constraint intel_hsw_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+static struct event_constraint intel_bdw_event_constraints[] = {
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
+ INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
+ INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */
+ EVENT_CONSTRAINT_END
+};
+
static u64 intel_pmu_event_map(int hw_event)
{
return intel_perfmon_event_map[hw_event];
@@ -415,6 +424,126 @@ static __initconst const u64 snb_hw_cache_event_ids
};
+static __initconst const u64 hsw_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
+ [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(L1I ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */
+ [ C(RESULT_ACCESS) ] = 0x1b7,
+ /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE|
+ L3_MISS|ANY_SNOOP */
+ [ C(RESULT_MISS) ] = 0x1b7,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE:ALL_RFO */
+ /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */
+ [ C(RESULT_MISS) ] = 0x1b7,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
+ [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
+ [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
+ [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
+ [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+static __initconst const u64 hsw_hw_cache_extra_regs
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */
+ [ C(RESULT_ACCESS) ] = 0x2d5,
+ /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE|
+ L3_MISS|ANY_SNOOP */
+ [ C(RESULT_MISS) ] = 0x3fbc0202d5ull,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x122, /* OFFCORE_RESPONSE:ALL_RFO */
+ /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */
+ [ C(RESULT_MISS) ] = 0x3fbc020122ull,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0,
+ [ C(RESULT_MISS) ] = 0x0,
+ },
+ },
+};
+
static __initconst const u64 westmere_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -1905,6 +2034,24 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
return c;
}
+/*
+ * Broadwell:
+ * The INST_RETIRED.ALL period always needs to have lowest
+ * 6bits cleared (BDM57). It shall not use a period smaller
+ * than 100 (BDM11). We combine the two to enforce
+ * a min-period of 128.
+ */
+static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
+{
+ if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
+ X86_CONFIG(.event=0xc0, .umask=0x01)) {
+ if (left < 128)
+ left = 128;
+ left &= ~0x3fu;
+ }
+ return left;
+}
+
PMU_FORMAT_ATTR(event, "config:0-7" );
PMU_FORMAT_ATTR(umask, "config:8-15" );
PMU_FORMAT_ATTR(edge, "config:18" );
@@ -2367,15 +2514,15 @@ __init int intel_pmu_init(void)
* Install the hw-cache-events table:
*/
switch (boot_cpu_data.x86_model) {
- case 14: /* 65 nm core solo/duo, "Yonah" */
+ case 14: /* 65nm Core "Yonah" */
pr_cont("Core events, ");
break;
- case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
+ case 15: /* 65nm Core2 "Merom" */
x86_add_quirk(intel_clovertown_quirk);
- case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
- case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
- case 29: /* six-core 45 nm xeon "Dunnington" */
+ case 22: /* 65nm Core2 "Merom-L" */
+ case 23: /* 45nm Core2 "Penryn" */
+ case 29: /* 45nm Core2 "Dunnington (MP) */
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -2386,9 +2533,9 @@ __init int intel_pmu_init(void)
pr_cont("Core2 events, ");
break;
- case 26: /* 45 nm nehalem, "Bloomfield" */
- case 30: /* 45 nm nehalem, "Lynnfield" */
- case 46: /* 45 nm nehalem-ex, "Beckton" */
+ case 30: /* 45nm Nehalem */
+ case 26: /* 45nm Nehalem-EP */
+ case 46: /* 45nm Nehalem-EX */
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
@@ -2415,11 +2562,11 @@ __init int intel_pmu_init(void)
pr_cont("Nehalem events, ");
break;
- case 28: /* Atom */
- case 38: /* Lincroft */
- case 39: /* Penwell */
- case 53: /* Cloverview */
- case 54: /* Cedarview */
+ case 28: /* 45nm Atom "Pineview" */
+ case 38: /* 45nm Atom "Lincroft" */
+ case 39: /* 32nm Atom "Penwell" */
+ case 53: /* 32nm Atom "Cloverview" */
+ case 54: /* 32nm Atom "Cedarview" */
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -2430,8 +2577,8 @@ __init int intel_pmu_init(void)
pr_cont("Atom events, ");
break;
- case 55: /* Atom 22nm "Silvermont" */
- case 77: /* Avoton "Silvermont" */
+ case 55: /* 22nm Atom "Silvermont" */
+ case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
@@ -2446,9 +2593,9 @@ __init int intel_pmu_init(void)
pr_cont("Silvermont events, ");
break;
- case 37: /* 32 nm nehalem, "Clarkdale" */
- case 44: /* 32 nm nehalem, "Gulftown" */
- case 47: /* 32 nm Xeon E7 */
+ case 37: /* 32nm Westmere */
+ case 44: /* 32nm Westmere-EP */
+ case 47: /* 32nm Westmere-EX */
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
@@ -2474,8 +2621,8 @@ __init int intel_pmu_init(void)
pr_cont("Westmere events, ");
break;
- case 42: /* SandyBridge */
- case 45: /* SandyBridge, "Romely-EP" */
+ case 42: /* 32nm SandyBridge */
+ case 45: /* 32nm SandyBridge-E/EN/EP */
x86_add_quirk(intel_sandybridge_quirk);
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -2506,8 +2653,9 @@ __init int intel_pmu_init(void)
pr_cont("SandyBridge events, ");
break;
- case 58: /* IvyBridge */
- case 62: /* IvyBridge EP */
+
+ case 58: /* 22nm IvyBridge */
+ case 62: /* 22nm IvyBridge-EP/EX */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
/* dTLB-load-misses on IVB is different than SNB */
@@ -2539,20 +2687,19 @@ __init int intel_pmu_init(void)
break;
- case 60: /* Haswell Client */
- case 70:
- case 71:
- case 63:
- case 69:
+ case 60: /* 22nm Haswell Core */
+ case 63: /* 22nm Haswell Server */
+ case 69: /* 22nm Haswell ULT */
+ case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
x86_pmu.late_ack = true;
- memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
- memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+ memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_snb();
x86_pmu.event_constraints = intel_hsw_event_constraints;
x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
- x86_pmu.extra_regs = intel_snb_extra_regs;
+ x86_pmu.extra_regs = intel_snbep_extra_regs;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
@@ -2565,6 +2712,28 @@ __init int intel_pmu_init(void)
pr_cont("Haswell events, ");
break;
+ case 61: /* 14nm Broadwell Core-M */
+ x86_pmu.late_ack = true;
+ memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+
+ intel_pmu_lbr_init_snb();
+
+ x86_pmu.event_constraints = intel_bdw_event_constraints;
+ x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
+ x86_pmu.extra_regs = intel_snbep_extra_regs;
+ x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
+ /* all extra regs are per-cpu when HT is on */
+ x86_pmu.er_flags |= ERF_HAS_RSP_1;
+ x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+
+ x86_pmu.hw_config = hsw_hw_config;
+ x86_pmu.get_event_constraints = hsw_get_event_constraints;
+ x86_pmu.cpu_events = hsw_events_attrs;
+ x86_pmu.limit_period = bdw_limit_period;
+ pr_cont("Broadwell events, ");
+ break;
+
default:
switch (x86_pmu.version) {
case 1:
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 696ade311ded..b1553d05a5cb 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -108,14 +108,16 @@ static u64 precise_store_data(u64 status)
return val;
}
-static u64 precise_store_data_hsw(struct perf_event *event, u64 status)
+static u64 precise_datala_hsw(struct perf_event *event, u64 status)
{
union perf_mem_data_src dse;
- u64 cfg = event->hw.config & INTEL_ARCH_EVENT_MASK;
- dse.val = 0;
- dse.mem_op = PERF_MEM_OP_STORE;
- dse.mem_lvl = PERF_MEM_LVL_NA;
+ dse.val = PERF_MEM_NA;
+
+ if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
+ dse.mem_op = PERF_MEM_OP_STORE;
+ else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW)
+ dse.mem_op = PERF_MEM_OP_LOAD;
/*
* L1 info only valid for following events:
@@ -125,15 +127,12 @@ static u64 precise_store_data_hsw(struct perf_event *event, u64 status)
* MEM_UOPS_RETIRED.SPLIT_STORES
* MEM_UOPS_RETIRED.ALL_STORES
*/
- if (cfg != 0x12d0 && cfg != 0x22d0 && cfg != 0x42d0 && cfg != 0x82d0)
- return dse.mem_lvl;
-
- if (status & 1)
- dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
- else
- dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
-
- /* Nothing else supported. Sorry. */
+ if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
+ if (status & 1)
+ dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
+ else
+ dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
+ }
return dse.val;
}
@@ -569,28 +568,10 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
};
struct event_constraint intel_slm_pebs_event_constraints[] = {
- INTEL_UEVENT_CONSTRAINT(0x0103, 0x1), /* REHABQ.LD_BLOCK_ST_FORWARD_PS */
- INTEL_UEVENT_CONSTRAINT(0x0803, 0x1), /* REHABQ.LD_SPLITS_PS */
- INTEL_UEVENT_CONSTRAINT(0x0204, 0x1), /* MEM_UOPS_RETIRED.L2_HIT_LOADS_PS */
- INTEL_UEVENT_CONSTRAINT(0x0404, 0x1), /* MEM_UOPS_RETIRED.L2_MISS_LOADS_PS */
- INTEL_UEVENT_CONSTRAINT(0x0804, 0x1), /* MEM_UOPS_RETIRED.DTLB_MISS_LOADS_PS */
- INTEL_UEVENT_CONSTRAINT(0x2004, 0x1), /* MEM_UOPS_RETIRED.HITM_PS */
- INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY_PS */
- INTEL_UEVENT_CONSTRAINT(0x00c4, 0x1), /* BR_INST_RETIRED.ALL_BRANCHES_PS */
- INTEL_UEVENT_CONSTRAINT(0x7ec4, 0x1), /* BR_INST_RETIRED.JCC_PS */
- INTEL_UEVENT_CONSTRAINT(0xbfc4, 0x1), /* BR_INST_RETIRED.FAR_BRANCH_PS */
- INTEL_UEVENT_CONSTRAINT(0xebc4, 0x1), /* BR_INST_RETIRED.NON_RETURN_IND_PS */
- INTEL_UEVENT_CONSTRAINT(0xf7c4, 0x1), /* BR_INST_RETIRED.RETURN_PS */
- INTEL_UEVENT_CONSTRAINT(0xf9c4, 0x1), /* BR_INST_RETIRED.CALL_PS */
- INTEL_UEVENT_CONSTRAINT(0xfbc4, 0x1), /* BR_INST_RETIRED.IND_CALL_PS */
- INTEL_UEVENT_CONSTRAINT(0xfdc4, 0x1), /* BR_INST_RETIRED.REL_CALL_PS */
- INTEL_UEVENT_CONSTRAINT(0xfec4, 0x1), /* BR_INST_RETIRED.TAKEN_JCC_PS */
- INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_MISP_RETIRED.ALL_BRANCHES_PS */
- INTEL_UEVENT_CONSTRAINT(0x7ec5, 0x1), /* BR_INST_MISP_RETIRED.JCC_PS */
- INTEL_UEVENT_CONSTRAINT(0xebc5, 0x1), /* BR_INST_MISP_RETIRED.NON_RETURN_IND_PS */
- INTEL_UEVENT_CONSTRAINT(0xf7c5, 0x1), /* BR_INST_MISP_RETIRED.RETURN_PS */
- INTEL_UEVENT_CONSTRAINT(0xfbc5, 0x1), /* BR_INST_MISP_RETIRED.IND_CALL_PS */
- INTEL_UEVENT_CONSTRAINT(0xfec5, 0x1), /* BR_INST_MISP_RETIRED.TAKEN_JCC_PS */
+ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ /* Allow all events as PEBS with no flags */
+ INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
EVENT_CONSTRAINT_END
};
@@ -626,68 +607,44 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
struct event_constraint intel_snb_pebs_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
- INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
- INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
- INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
- INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
- INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
+ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ /* Allow all events as PEBS with no flags */
+ INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
EVENT_CONSTRAINT_END
};
struct event_constraint intel_ivb_pebs_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
- INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
- INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
- INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
- INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
- INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ /* Allow all events as PEBS with no flags */
+ INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
EVENT_CONSTRAINT_END
};
struct event_constraint intel_hsw_pebs_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
- INTEL_PST_HSW_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
- INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
- INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
- INTEL_UEVENT_CONSTRAINT(0x01c5, 0xf), /* BR_MISP_RETIRED.CONDITIONAL */
- INTEL_UEVENT_CONSTRAINT(0x04c5, 0xf), /* BR_MISP_RETIRED.ALL_BRANCHES */
- INTEL_UEVENT_CONSTRAINT(0x20c5, 0xf), /* BR_MISP_RETIRED.NEAR_TAKEN */
- INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.* */
- /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
- INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf),
- /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
- INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf),
- INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
- INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
- /* MEM_UOPS_RETIRED.SPLIT_STORES */
- INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf),
- INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
- INTEL_PST_HSW_CONSTRAINT(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
- INTEL_UEVENT_CONSTRAINT(0x01d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L1_HIT */
- INTEL_UEVENT_CONSTRAINT(0x02d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L2_HIT */
- INTEL_UEVENT_CONSTRAINT(0x04d1, 0xf), /* MEM_LOAD_UOPS_RETIRED.L3_HIT */
- /* MEM_LOAD_UOPS_RETIRED.HIT_LFB */
- INTEL_UEVENT_CONSTRAINT(0x40d1, 0xf),
- /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS */
- INTEL_UEVENT_CONSTRAINT(0x01d2, 0xf),
- /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT */
- INTEL_UEVENT_CONSTRAINT(0x02d2, 0xf),
- /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM */
- INTEL_UEVENT_CONSTRAINT(0x01d3, 0xf),
- INTEL_UEVENT_CONSTRAINT(0x04c8, 0xf), /* HLE_RETIRED.Abort */
- INTEL_UEVENT_CONSTRAINT(0x04c9, 0xf), /* RTM_RETIRED.Abort */
-
+ INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
+ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
+ /* Allow all events as PEBS with no flags */
+ INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
EVENT_CONSTRAINT_END
};
@@ -864,6 +821,10 @@ static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs)
static void __intel_pmu_pebs_event(struct perf_event *event,
struct pt_regs *iregs, void *__pebs)
{
+#define PERF_X86_EVENT_PEBS_HSW_PREC \
+ (PERF_X86_EVENT_PEBS_ST_HSW | \
+ PERF_X86_EVENT_PEBS_LD_HSW | \
+ PERF_X86_EVENT_PEBS_NA_HSW)
/*
* We cast to the biggest pebs_record but are careful not to
* unconditionally access the 'extra' entries.
@@ -873,42 +834,40 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
struct perf_sample_data data;
struct pt_regs regs;
u64 sample_type;
- int fll, fst;
+ int fll, fst, dsrc;
+ int fl = event->hw.flags;
if (!intel_pmu_save_and_restart(event))
return;
- fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
- fst = event->hw.flags & (PERF_X86_EVENT_PEBS_ST |
- PERF_X86_EVENT_PEBS_ST_HSW);
+ sample_type = event->attr.sample_type;
+ dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
+
+ fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
+ fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
perf_sample_data_init(&data, 0, event->hw.last_period);
data.period = event->hw.last_period;
- sample_type = event->attr.sample_type;
/*
- * if PEBS-LL or PreciseStore
+ * Use latency for weight (only avail with PEBS-LL)
*/
- if (fll || fst) {
- /*
- * Use latency for weight (only avail with PEBS-LL)
- */
- if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
- data.weight = pebs->lat;
-
- /*
- * data.data_src encodes the data source
- */
- if (sample_type & PERF_SAMPLE_DATA_SRC) {
- if (fll)
- data.data_src.val = load_latency_data(pebs->dse);
- else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
- data.data_src.val =
- precise_store_data_hsw(event, pebs->dse);
- else
- data.data_src.val = precise_store_data(pebs->dse);
- }
+ if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
+ data.weight = pebs->lat;
+
+ /*
+ * data.data_src encodes the data source
+ */
+ if (dsrc) {
+ u64 val = PERF_MEM_NA;
+ if (fll)
+ val = load_latency_data(pebs->dse);
+ else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
+ val = precise_datala_hsw(event, pebs->dse);
+ else if (fst)
+ val = precise_store_data(pebs->dse);
+ data.data_src.val = val;
}
/*
@@ -935,16 +894,16 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
else
regs.flags &= ~PERF_EFLAGS_EXACT;
- if ((event->attr.sample_type & PERF_SAMPLE_ADDR) &&
+ if ((sample_type & PERF_SAMPLE_ADDR) &&
x86_pmu.intel_cap.pebs_format >= 1)
data.addr = pebs->dla;
if (x86_pmu.intel_cap.pebs_format >= 2) {
/* Only set the TSX weight when no memory weight. */
- if ((event->attr.sample_type & PERF_SAMPLE_WEIGHT) && !fll)
+ if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
data.weight = intel_hsw_weight(pebs);
- if (event->attr.sample_type & PERF_SAMPLE_TRANSACTION)
+ if (sample_type & PERF_SAMPLE_TRANSACTION)
data.txn = intel_hsw_transaction(pebs);
}
@@ -1055,7 +1014,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
* BTS, PEBS probe and setup
*/
-void intel_ds_init(void)
+void __init intel_ds_init(void)
{
/*
* No support for 32bit formats
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 9dd2459a4c73..4af10617de33 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -697,7 +697,7 @@ static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = {
};
/* core */
-void intel_pmu_lbr_init_core(void)
+void __init intel_pmu_lbr_init_core(void)
{
x86_pmu.lbr_nr = 4;
x86_pmu.lbr_tos = MSR_LBR_TOS;
@@ -712,7 +712,7 @@ void intel_pmu_lbr_init_core(void)
}
/* nehalem/westmere */
-void intel_pmu_lbr_init_nhm(void)
+void __init intel_pmu_lbr_init_nhm(void)
{
x86_pmu.lbr_nr = 16;
x86_pmu.lbr_tos = MSR_LBR_TOS;
@@ -733,7 +733,7 @@ void intel_pmu_lbr_init_nhm(void)
}
/* sandy bridge */
-void intel_pmu_lbr_init_snb(void)
+void __init intel_pmu_lbr_init_snb(void)
{
x86_pmu.lbr_nr = 16;
x86_pmu.lbr_tos = MSR_LBR_TOS;
@@ -753,7 +753,7 @@ void intel_pmu_lbr_init_snb(void)
}
/* atom */
-void intel_pmu_lbr_init_atom(void)
+void __init intel_pmu_lbr_init_atom(void)
{
/*
* only models starting at stepping 10 seems
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 0939f86f543d..9762dbd9f3f7 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -1,83 +1,39 @@
#include "perf_event_intel_uncore.h"
static struct intel_uncore_type *empty_uncore[] = { NULL, };
-static struct intel_uncore_type **msr_uncores = empty_uncore;
-static struct intel_uncore_type **pci_uncores = empty_uncore;
-/* pci bus to socket mapping */
-static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
+struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
+struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
-static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
+static bool pcidrv_registered;
+struct pci_driver *uncore_pci_driver;
+/* pci bus to socket mapping */
+int uncore_pcibus_to_physid[256] = { [0 ... 255] = -1, };
+struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
static DEFINE_RAW_SPINLOCK(uncore_box_lock);
-
/* mask of cpus that collect uncore events */
static cpumask_t uncore_cpu_mask;
/* constraint for the fixed counter */
-static struct event_constraint constraint_fixed =
+static struct event_constraint uncore_constraint_fixed =
EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
-static struct event_constraint constraint_empty =
+struct event_constraint uncore_constraint_empty =
EVENT_CONSTRAINT(0, 0, 0);
-#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
- ((1ULL << (n)) - 1)))
-
-DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
-DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
-DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
-DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
-DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
-DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
-DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
-DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
-DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
-DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
-DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
-DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
-DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
-DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
-DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
-DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
-DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
-DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
-DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
-DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
-DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
-DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
-DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
-DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
-DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
-DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
-DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
-DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
-DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
-DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
-DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
-DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
-DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
-DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
-DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
-DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
-DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
-DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
-DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
-DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
-DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
-
-static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
-static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
-static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
-static void uncore_pmu_event_read(struct perf_event *event);
-
-static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
+ssize_t uncore_event_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct uncore_event_desc *event =
+ container_of(attr, struct uncore_event_desc, attr);
+ return sprintf(buf, "%s", event->config);
+}
+
+struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
{
return container_of(event->pmu, struct intel_uncore_pmu, pmu);
}
-static struct intel_uncore_box *
-uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
+struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
{
struct intel_uncore_box *box;
@@ -86,6 +42,9 @@ uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
return box;
raw_spin_lock(&uncore_box_lock);
+ /* Recheck in lock to handle races. */
+ if (*per_cpu_ptr(pmu->box, cpu))
+ goto out;
list_for_each_entry(box, &pmu->box_list, list) {
if (box->phys_id == topology_physical_package_id(cpu)) {
atomic_inc(&box->refcnt);
@@ -93,12 +52,13 @@ uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
break;
}
}
+out:
raw_spin_unlock(&uncore_box_lock);
return *per_cpu_ptr(pmu->box, cpu);
}
-static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
+struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
{
/*
* perf core schedules event on the basis of cpu, uncore events are
@@ -107,7 +67,7 @@ static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
}
-static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
+u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
{
u64 count;
@@ -119,7 +79,7 @@ static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_eve
/*
* generic get constraint function for shared match/mask registers.
*/
-static struct event_constraint *
+struct event_constraint *
uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
{
struct intel_uncore_extra_reg *er;
@@ -154,10 +114,10 @@ uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
return NULL;
}
- return &constraint_empty;
+ return &uncore_constraint_empty;
}
-static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
+void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
{
struct intel_uncore_extra_reg *er;
struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
@@ -178,7 +138,7 @@ static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_even
reg1->alloc = 0;
}
-static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
+u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
{
struct intel_uncore_extra_reg *er;
unsigned long flags;
@@ -193,2936 +153,6 @@ static u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
return config;
}
-/* Sandy Bridge-EP uncore support */
-static struct intel_uncore_type snbep_uncore_cbox;
-static struct intel_uncore_type snbep_uncore_pcu;
-
-static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
-{
- struct pci_dev *pdev = box->pci_dev;
- int box_ctl = uncore_pci_box_ctl(box);
- u32 config = 0;
-
- if (!pci_read_config_dword(pdev, box_ctl, &config)) {
- config |= SNBEP_PMON_BOX_CTL_FRZ;
- pci_write_config_dword(pdev, box_ctl, config);
- }
-}
-
-static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
-{
- struct pci_dev *pdev = box->pci_dev;
- int box_ctl = uncore_pci_box_ctl(box);
- u32 config = 0;
-
- if (!pci_read_config_dword(pdev, box_ctl, &config)) {
- config &= ~SNBEP_PMON_BOX_CTL_FRZ;
- pci_write_config_dword(pdev, box_ctl, config);
- }
-}
-
-static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct pci_dev *pdev = box->pci_dev;
- struct hw_perf_event *hwc = &event->hw;
-
- pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
-}
-
-static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct pci_dev *pdev = box->pci_dev;
- struct hw_perf_event *hwc = &event->hw;
-
- pci_write_config_dword(pdev, hwc->config_base, hwc->config);
-}
-
-static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct pci_dev *pdev = box->pci_dev;
- struct hw_perf_event *hwc = &event->hw;
- u64 count = 0;
-
- pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
- pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
-
- return count;
-}
-
-static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
-{
- struct pci_dev *pdev = box->pci_dev;
-
- pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
-}
-
-static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
-{
- u64 config;
- unsigned msr;
-
- msr = uncore_msr_box_ctl(box);
- if (msr) {
- rdmsrl(msr, config);
- config |= SNBEP_PMON_BOX_CTL_FRZ;
- wrmsrl(msr, config);
- }
-}
-
-static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
-{
- u64 config;
- unsigned msr;
-
- msr = uncore_msr_box_ctl(box);
- if (msr) {
- rdmsrl(msr, config);
- config &= ~SNBEP_PMON_BOX_CTL_FRZ;
- wrmsrl(msr, config);
- }
-}
-
-static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
-
- if (reg1->idx != EXTRA_REG_NONE)
- wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
-
- wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
-}
-
-static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
- struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
-
- wrmsrl(hwc->config_base, hwc->config);
-}
-
-static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
-{
- unsigned msr = uncore_msr_box_ctl(box);
-
- if (msr)
- wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
-}
-
-static struct attribute *snbep_uncore_formats_attr[] = {
- &format_attr_event.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_inv.attr,
- &format_attr_thresh8.attr,
- NULL,
-};
-
-static struct attribute *snbep_uncore_ubox_formats_attr[] = {
- &format_attr_event.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_inv.attr,
- &format_attr_thresh5.attr,
- NULL,
-};
-
-static struct attribute *snbep_uncore_cbox_formats_attr[] = {
- &format_attr_event.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_tid_en.attr,
- &format_attr_inv.attr,
- &format_attr_thresh8.attr,
- &format_attr_filter_tid.attr,
- &format_attr_filter_nid.attr,
- &format_attr_filter_state.attr,
- &format_attr_filter_opc.attr,
- NULL,
-};
-
-static struct attribute *snbep_uncore_pcu_formats_attr[] = {
- &format_attr_event_ext.attr,
- &format_attr_occ_sel.attr,
- &format_attr_edge.attr,
- &format_attr_inv.attr,
- &format_attr_thresh5.attr,
- &format_attr_occ_invert.attr,
- &format_attr_occ_edge.attr,
- &format_attr_filter_band0.attr,
- &format_attr_filter_band1.attr,
- &format_attr_filter_band2.attr,
- &format_attr_filter_band3.attr,
- NULL,
-};
-
-static struct attribute *snbep_uncore_qpi_formats_attr[] = {
- &format_attr_event_ext.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_inv.attr,
- &format_attr_thresh8.attr,
- &format_attr_match_rds.attr,
- &format_attr_match_rnid30.attr,
- &format_attr_match_rnid4.attr,
- &format_attr_match_dnid.attr,
- &format_attr_match_mc.attr,
- &format_attr_match_opc.attr,
- &format_attr_match_vnw.attr,
- &format_attr_match0.attr,
- &format_attr_match1.attr,
- &format_attr_mask_rds.attr,
- &format_attr_mask_rnid30.attr,
- &format_attr_mask_rnid4.attr,
- &format_attr_mask_dnid.attr,
- &format_attr_mask_mc.attr,
- &format_attr_mask_opc.attr,
- &format_attr_mask_vnw.attr,
- &format_attr_mask0.attr,
- &format_attr_mask1.attr,
- NULL,
-};
-
-static struct uncore_event_desc snbep_uncore_imc_events[] = {
- INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
- INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
- INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
- { /* end: all zeroes */ },
-};
-
-static struct uncore_event_desc snbep_uncore_qpi_events[] = {
- INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
- INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
- INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
- INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
- { /* end: all zeroes */ },
-};
-
-static struct attribute_group snbep_uncore_format_group = {
- .name = "format",
- .attrs = snbep_uncore_formats_attr,
-};
-
-static struct attribute_group snbep_uncore_ubox_format_group = {
- .name = "format",
- .attrs = snbep_uncore_ubox_formats_attr,
-};
-
-static struct attribute_group snbep_uncore_cbox_format_group = {
- .name = "format",
- .attrs = snbep_uncore_cbox_formats_attr,
-};
-
-static struct attribute_group snbep_uncore_pcu_format_group = {
- .name = "format",
- .attrs = snbep_uncore_pcu_formats_attr,
-};
-
-static struct attribute_group snbep_uncore_qpi_format_group = {
- .name = "format",
- .attrs = snbep_uncore_qpi_formats_attr,
-};
-
-#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
- .init_box = snbep_uncore_msr_init_box, \
- .disable_box = snbep_uncore_msr_disable_box, \
- .enable_box = snbep_uncore_msr_enable_box, \
- .disable_event = snbep_uncore_msr_disable_event, \
- .enable_event = snbep_uncore_msr_enable_event, \
- .read_counter = uncore_msr_read_counter
-
-static struct intel_uncore_ops snbep_uncore_msr_ops = {
- SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
-};
-
-#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
- .init_box = snbep_uncore_pci_init_box, \
- .disable_box = snbep_uncore_pci_disable_box, \
- .enable_box = snbep_uncore_pci_enable_box, \
- .disable_event = snbep_uncore_pci_disable_event, \
- .read_counter = snbep_uncore_pci_read_counter
-
-static struct intel_uncore_ops snbep_uncore_pci_ops = {
- SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
- .enable_event = snbep_uncore_pci_enable_event, \
-};
-
-static struct event_constraint snbep_uncore_cbox_constraints[] = {
- UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
- UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
- UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
- UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
- UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
- UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
- EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
- UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
- UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
- EVENT_CONSTRAINT_END
-};
-
-static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
- UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
- UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
- EVENT_CONSTRAINT_END
-};
-
-static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
- UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
- UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
- UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
- EVENT_CONSTRAINT_END
-};
-
-static struct intel_uncore_type snbep_uncore_ubox = {
- .name = "ubox",
- .num_counters = 2,
- .num_boxes = 1,
- .perf_ctr_bits = 44,
- .fixed_ctr_bits = 48,
- .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
- .event_ctl = SNBEP_U_MSR_PMON_CTL0,
- .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
- .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
- .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
- .ops = &snbep_uncore_msr_ops,
- .format_group = &snbep_uncore_ubox_format_group,
-};
-
-static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
- SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
- SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
- SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
- EVENT_EXTRA_END
-};
-
-static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
- struct intel_uncore_extra_reg *er = &box->shared_regs[0];
- int i;
-
- if (uncore_box_is_fake(box))
- return;
-
- for (i = 0; i < 5; i++) {
- if (reg1->alloc & (0x1 << i))
- atomic_sub(1 << (i * 6), &er->ref);
- }
- reg1->alloc = 0;
-}
-
-static struct event_constraint *
-__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
- u64 (*cbox_filter_mask)(int fields))
-{
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
- struct intel_uncore_extra_reg *er = &box->shared_regs[0];
- int i, alloc = 0;
- unsigned long flags;
- u64 mask;
-
- if (reg1->idx == EXTRA_REG_NONE)
- return NULL;
-
- raw_spin_lock_irqsave(&er->lock, flags);
- for (i = 0; i < 5; i++) {
- if (!(reg1->idx & (0x1 << i)))
- continue;
- if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
- continue;
-
- mask = cbox_filter_mask(0x1 << i);
- if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
- !((reg1->config ^ er->config) & mask)) {
- atomic_add(1 << (i * 6), &er->ref);
- er->config &= ~mask;
- er->config |= reg1->config & mask;
- alloc |= (0x1 << i);
- } else {
- break;
- }
- }
- raw_spin_unlock_irqrestore(&er->lock, flags);
- if (i < 5)
- goto fail;
-
- if (!uncore_box_is_fake(box))
- reg1->alloc |= alloc;
-
- return NULL;
-fail:
- for (; i >= 0; i--) {
- if (alloc & (0x1 << i))
- atomic_sub(1 << (i * 6), &er->ref);
- }
- return &constraint_empty;
-}
-
-static u64 snbep_cbox_filter_mask(int fields)
-{
- u64 mask = 0;
-
- if (fields & 0x1)
- mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
- if (fields & 0x2)
- mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
- if (fields & 0x4)
- mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
- if (fields & 0x8)
- mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
-
- return mask;
-}
-
-static struct event_constraint *
-snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
-{
- return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
-}
-
-static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
- struct extra_reg *er;
- int idx = 0;
-
- for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
- if (er->event != (event->hw.config & er->config_mask))
- continue;
- idx |= er->idx;
- }
-
- if (idx) {
- reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
- SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
- reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
- reg1->idx = idx;
- }
- return 0;
-}
-
-static struct intel_uncore_ops snbep_uncore_cbox_ops = {
- SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
- .hw_config = snbep_cbox_hw_config,
- .get_constraint = snbep_cbox_get_constraint,
- .put_constraint = snbep_cbox_put_constraint,
-};
-
-static struct intel_uncore_type snbep_uncore_cbox = {
- .name = "cbox",
- .num_counters = 4,
- .num_boxes = 8,
- .perf_ctr_bits = 44,
- .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
- .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
- .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
- .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
- .msr_offset = SNBEP_CBO_MSR_OFFSET,
- .num_shared_regs = 1,
- .constraints = snbep_uncore_cbox_constraints,
- .ops = &snbep_uncore_cbox_ops,
- .format_group = &snbep_uncore_cbox_format_group,
-};
-
-static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
- u64 config = reg1->config;
-
- if (new_idx > reg1->idx)
- config <<= 8 * (new_idx - reg1->idx);
- else
- config >>= 8 * (reg1->idx - new_idx);
-
- if (modify) {
- hwc->config += new_idx - reg1->idx;
- reg1->config = config;
- reg1->idx = new_idx;
- }
- return config;
-}
-
-static struct event_constraint *
-snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
- struct intel_uncore_extra_reg *er = &box->shared_regs[0];
- unsigned long flags;
- int idx = reg1->idx;
- u64 mask, config1 = reg1->config;
- bool ok = false;
-
- if (reg1->idx == EXTRA_REG_NONE ||
- (!uncore_box_is_fake(box) && reg1->alloc))
- return NULL;
-again:
- mask = 0xffULL << (idx * 8);
- raw_spin_lock_irqsave(&er->lock, flags);
- if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
- !((config1 ^ er->config) & mask)) {
- atomic_add(1 << (idx * 8), &er->ref);
- er->config &= ~mask;
- er->config |= config1 & mask;
- ok = true;
- }
- raw_spin_unlock_irqrestore(&er->lock, flags);
-
- if (!ok) {
- idx = (idx + 1) % 4;
- if (idx != reg1->idx) {
- config1 = snbep_pcu_alter_er(event, idx, false);
- goto again;
- }
- return &constraint_empty;
- }
-
- if (!uncore_box_is_fake(box)) {
- if (idx != reg1->idx)
- snbep_pcu_alter_er(event, idx, true);
- reg1->alloc = 1;
- }
- return NULL;
-}
-
-static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
- struct intel_uncore_extra_reg *er = &box->shared_regs[0];
-
- if (uncore_box_is_fake(box) || !reg1->alloc)
- return;
-
- atomic_sub(1 << (reg1->idx * 8), &er->ref);
- reg1->alloc = 0;
-}
-
-static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
- int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
-
- if (ev_sel >= 0xb && ev_sel <= 0xe) {
- reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
- reg1->idx = ev_sel - 0xb;
- reg1->config = event->attr.config1 & (0xff << reg1->idx);
- }
- return 0;
-}
-
-static struct intel_uncore_ops snbep_uncore_pcu_ops = {
- SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
- .hw_config = snbep_pcu_hw_config,
- .get_constraint = snbep_pcu_get_constraint,
- .put_constraint = snbep_pcu_put_constraint,
-};
-
-static struct intel_uncore_type snbep_uncore_pcu = {
- .name = "pcu",
- .num_counters = 4,
- .num_boxes = 1,
- .perf_ctr_bits = 48,
- .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
- .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
- .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
- .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
- .num_shared_regs = 1,
- .ops = &snbep_uncore_pcu_ops,
- .format_group = &snbep_uncore_pcu_format_group,
-};
-
-static struct intel_uncore_type *snbep_msr_uncores[] = {
- &snbep_uncore_ubox,
- &snbep_uncore_cbox,
- &snbep_uncore_pcu,
- NULL,
-};
-
-enum {
- SNBEP_PCI_QPI_PORT0_FILTER,
- SNBEP_PCI_QPI_PORT1_FILTER,
-};
-
-static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
- struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
-
- if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
- reg1->idx = 0;
- reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
- reg1->config = event->attr.config1;
- reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
- reg2->config = event->attr.config2;
- }
- return 0;
-}
-
-static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct pci_dev *pdev = box->pci_dev;
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
- struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
-
- if (reg1->idx != EXTRA_REG_NONE) {
- int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
- struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx];
- WARN_ON_ONCE(!filter_pdev);
- if (filter_pdev) {
- pci_write_config_dword(filter_pdev, reg1->reg,
- (u32)reg1->config);
- pci_write_config_dword(filter_pdev, reg1->reg + 4,
- (u32)(reg1->config >> 32));
- pci_write_config_dword(filter_pdev, reg2->reg,
- (u32)reg2->config);
- pci_write_config_dword(filter_pdev, reg2->reg + 4,
- (u32)(reg2->config >> 32));
- }
- }
-
- pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
-}
-
-static struct intel_uncore_ops snbep_uncore_qpi_ops = {
- SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
- .enable_event = snbep_qpi_enable_event,
- .hw_config = snbep_qpi_hw_config,
- .get_constraint = uncore_get_constraint,
- .put_constraint = uncore_put_constraint,
-};
-
-#define SNBEP_UNCORE_PCI_COMMON_INIT() \
- .perf_ctr = SNBEP_PCI_PMON_CTR0, \
- .event_ctl = SNBEP_PCI_PMON_CTL0, \
- .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
- .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
- .ops = &snbep_uncore_pci_ops, \
- .format_group = &snbep_uncore_format_group
-
-static struct intel_uncore_type snbep_uncore_ha = {
- .name = "ha",
- .num_counters = 4,
- .num_boxes = 1,
- .perf_ctr_bits = 48,
- SNBEP_UNCORE_PCI_COMMON_INIT(),
-};
-
-static struct intel_uncore_type snbep_uncore_imc = {
- .name = "imc",
- .num_counters = 4,
- .num_boxes = 4,
- .perf_ctr_bits = 48,
- .fixed_ctr_bits = 48,
- .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
- .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
- .event_descs = snbep_uncore_imc_events,
- SNBEP_UNCORE_PCI_COMMON_INIT(),
-};
-
-static struct intel_uncore_type snbep_uncore_qpi = {
- .name = "qpi",
- .num_counters = 4,
- .num_boxes = 2,
- .perf_ctr_bits = 48,
- .perf_ctr = SNBEP_PCI_PMON_CTR0,
- .event_ctl = SNBEP_PCI_PMON_CTL0,
- .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
- .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
- .num_shared_regs = 1,
- .ops = &snbep_uncore_qpi_ops,
- .event_descs = snbep_uncore_qpi_events,
- .format_group = &snbep_uncore_qpi_format_group,
-};
-
-
-static struct intel_uncore_type snbep_uncore_r2pcie = {
- .name = "r2pcie",
- .num_counters = 4,
- .num_boxes = 1,
- .perf_ctr_bits = 44,
- .constraints = snbep_uncore_r2pcie_constraints,
- SNBEP_UNCORE_PCI_COMMON_INIT(),
-};
-
-static struct intel_uncore_type snbep_uncore_r3qpi = {
- .name = "r3qpi",
- .num_counters = 3,
- .num_boxes = 2,
- .perf_ctr_bits = 44,
- .constraints = snbep_uncore_r3qpi_constraints,
- SNBEP_UNCORE_PCI_COMMON_INIT(),
-};
-
-enum {
- SNBEP_PCI_UNCORE_HA,
- SNBEP_PCI_UNCORE_IMC,
- SNBEP_PCI_UNCORE_QPI,
- SNBEP_PCI_UNCORE_R2PCIE,
- SNBEP_PCI_UNCORE_R3QPI,
-};
-
-static struct intel_uncore_type *snbep_pci_uncores[] = {
- [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
- [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
- [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
- [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
- [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
- NULL,
-};
-
-static const struct pci_device_id snbep_uncore_pci_ids[] = {
- { /* Home Agent */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
- .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
- },
- { /* MC Channel 0 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
- .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
- },
- { /* MC Channel 1 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
- .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
- },
- { /* MC Channel 2 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
- .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
- },
- { /* MC Channel 3 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
- .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
- },
- { /* QPI Port 0 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
- .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
- },
- { /* QPI Port 1 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
- .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
- },
- { /* R2PCIe */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
- .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
- },
- { /* R3QPI Link 0 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
- .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
- },
- { /* R3QPI Link 1 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
- .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
- },
- { /* QPI Port 0 filter */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
- SNBEP_PCI_QPI_PORT0_FILTER),
- },
- { /* QPI Port 0 filter */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
- SNBEP_PCI_QPI_PORT1_FILTER),
- },
- { /* end: all zeroes */ }
-};
-
-static struct pci_driver snbep_uncore_pci_driver = {
- .name = "snbep_uncore",
- .id_table = snbep_uncore_pci_ids,
-};
-
-/*
- * build pci bus to socket mapping
- */
-static int snbep_pci2phy_map_init(int devid)
-{
- struct pci_dev *ubox_dev = NULL;
- int i, bus, nodeid;
- int err = 0;
- u32 config = 0;
-
- while (1) {
- /* find the UBOX device */
- ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
- if (!ubox_dev)
- break;
- bus = ubox_dev->bus->number;
- /* get the Node ID of the local register */
- err = pci_read_config_dword(ubox_dev, 0x40, &config);
- if (err)
- break;
- nodeid = config;
- /* get the Node ID mapping */
- err = pci_read_config_dword(ubox_dev, 0x54, &config);
- if (err)
- break;
- /*
- * every three bits in the Node ID mapping register maps
- * to a particular node.
- */
- for (i = 0; i < 8; i++) {
- if (nodeid == ((config >> (3 * i)) & 0x7)) {
- pcibus_to_physid[bus] = i;
- break;
- }
- }
- }
-
- if (!err) {
- /*
- * For PCI bus with no UBOX device, find the next bus
- * that has UBOX device and use its mapping.
- */
- i = -1;
- for (bus = 255; bus >= 0; bus--) {
- if (pcibus_to_physid[bus] >= 0)
- i = pcibus_to_physid[bus];
- else
- pcibus_to_physid[bus] = i;
- }
- }
-
- if (ubox_dev)
- pci_dev_put(ubox_dev);
-
- return err ? pcibios_err_to_errno(err) : 0;
-}
-/* end of Sandy Bridge-EP uncore support */
-
-/* IvyTown uncore support */
-static void ivt_uncore_msr_init_box(struct intel_uncore_box *box)
-{
- unsigned msr = uncore_msr_box_ctl(box);
- if (msr)
- wrmsrl(msr, IVT_PMON_BOX_CTL_INT);
-}
-
-static void ivt_uncore_pci_init_box(struct intel_uncore_box *box)
-{
- struct pci_dev *pdev = box->pci_dev;
-
- pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVT_PMON_BOX_CTL_INT);
-}
-
-#define IVT_UNCORE_MSR_OPS_COMMON_INIT() \
- .init_box = ivt_uncore_msr_init_box, \
- .disable_box = snbep_uncore_msr_disable_box, \
- .enable_box = snbep_uncore_msr_enable_box, \
- .disable_event = snbep_uncore_msr_disable_event, \
- .enable_event = snbep_uncore_msr_enable_event, \
- .read_counter = uncore_msr_read_counter
-
-static struct intel_uncore_ops ivt_uncore_msr_ops = {
- IVT_UNCORE_MSR_OPS_COMMON_INIT(),
-};
-
-static struct intel_uncore_ops ivt_uncore_pci_ops = {
- .init_box = ivt_uncore_pci_init_box,
- .disable_box = snbep_uncore_pci_disable_box,
- .enable_box = snbep_uncore_pci_enable_box,
- .disable_event = snbep_uncore_pci_disable_event,
- .enable_event = snbep_uncore_pci_enable_event,
- .read_counter = snbep_uncore_pci_read_counter,
-};
-
-#define IVT_UNCORE_PCI_COMMON_INIT() \
- .perf_ctr = SNBEP_PCI_PMON_CTR0, \
- .event_ctl = SNBEP_PCI_PMON_CTL0, \
- .event_mask = IVT_PMON_RAW_EVENT_MASK, \
- .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
- .ops = &ivt_uncore_pci_ops, \
- .format_group = &ivt_uncore_format_group
-
-static struct attribute *ivt_uncore_formats_attr[] = {
- &format_attr_event.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_inv.attr,
- &format_attr_thresh8.attr,
- NULL,
-};
-
-static struct attribute *ivt_uncore_ubox_formats_attr[] = {
- &format_attr_event.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_inv.attr,
- &format_attr_thresh5.attr,
- NULL,
-};
-
-static struct attribute *ivt_uncore_cbox_formats_attr[] = {
- &format_attr_event.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_tid_en.attr,
- &format_attr_thresh8.attr,
- &format_attr_filter_tid.attr,
- &format_attr_filter_link.attr,
- &format_attr_filter_state2.attr,
- &format_attr_filter_nid2.attr,
- &format_attr_filter_opc2.attr,
- NULL,
-};
-
-static struct attribute *ivt_uncore_pcu_formats_attr[] = {
- &format_attr_event_ext.attr,
- &format_attr_occ_sel.attr,
- &format_attr_edge.attr,
- &format_attr_thresh5.attr,
- &format_attr_occ_invert.attr,
- &format_attr_occ_edge.attr,
- &format_attr_filter_band0.attr,
- &format_attr_filter_band1.attr,
- &format_attr_filter_band2.attr,
- &format_attr_filter_band3.attr,
- NULL,
-};
-
-static struct attribute *ivt_uncore_qpi_formats_attr[] = {
- &format_attr_event_ext.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_thresh8.attr,
- &format_attr_match_rds.attr,
- &format_attr_match_rnid30.attr,
- &format_attr_match_rnid4.attr,
- &format_attr_match_dnid.attr,
- &format_attr_match_mc.attr,
- &format_attr_match_opc.attr,
- &format_attr_match_vnw.attr,
- &format_attr_match0.attr,
- &format_attr_match1.attr,
- &format_attr_mask_rds.attr,
- &format_attr_mask_rnid30.attr,
- &format_attr_mask_rnid4.attr,
- &format_attr_mask_dnid.attr,
- &format_attr_mask_mc.attr,
- &format_attr_mask_opc.attr,
- &format_attr_mask_vnw.attr,
- &format_attr_mask0.attr,
- &format_attr_mask1.attr,
- NULL,
-};
-
-static struct attribute_group ivt_uncore_format_group = {
- .name = "format",
- .attrs = ivt_uncore_formats_attr,
-};
-
-static struct attribute_group ivt_uncore_ubox_format_group = {
- .name = "format",
- .attrs = ivt_uncore_ubox_formats_attr,
-};
-
-static struct attribute_group ivt_uncore_cbox_format_group = {
- .name = "format",
- .attrs = ivt_uncore_cbox_formats_attr,
-};
-
-static struct attribute_group ivt_uncore_pcu_format_group = {
- .name = "format",
- .attrs = ivt_uncore_pcu_formats_attr,
-};
-
-static struct attribute_group ivt_uncore_qpi_format_group = {
- .name = "format",
- .attrs = ivt_uncore_qpi_formats_attr,
-};
-
-static struct intel_uncore_type ivt_uncore_ubox = {
- .name = "ubox",
- .num_counters = 2,
- .num_boxes = 1,
- .perf_ctr_bits = 44,
- .fixed_ctr_bits = 48,
- .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
- .event_ctl = SNBEP_U_MSR_PMON_CTL0,
- .event_mask = IVT_U_MSR_PMON_RAW_EVENT_MASK,
- .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
- .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
- .ops = &ivt_uncore_msr_ops,
- .format_group = &ivt_uncore_ubox_format_group,
-};
-
-static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
- SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
- SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
- SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
-
- SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
- SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
- SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
- SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
- SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
- SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
- SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
- SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
- SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
- SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
- EVENT_EXTRA_END
-};
-
-static u64 ivt_cbox_filter_mask(int fields)
-{
- u64 mask = 0;
-
- if (fields & 0x1)
- mask |= IVT_CB0_MSR_PMON_BOX_FILTER_TID;
- if (fields & 0x2)
- mask |= IVT_CB0_MSR_PMON_BOX_FILTER_LINK;
- if (fields & 0x4)
- mask |= IVT_CB0_MSR_PMON_BOX_FILTER_STATE;
- if (fields & 0x8)
- mask |= IVT_CB0_MSR_PMON_BOX_FILTER_NID;
- if (fields & 0x10)
- mask |= IVT_CB0_MSR_PMON_BOX_FILTER_OPC;
-
- return mask;
-}
-
-static struct event_constraint *
-ivt_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
-{
- return __snbep_cbox_get_constraint(box, event, ivt_cbox_filter_mask);
-}
-
-static int ivt_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
- struct extra_reg *er;
- int idx = 0;
-
- for (er = ivt_uncore_cbox_extra_regs; er->msr; er++) {
- if (er->event != (event->hw.config & er->config_mask))
- continue;
- idx |= er->idx;
- }
-
- if (idx) {
- reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
- SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
- reg1->config = event->attr.config1 & ivt_cbox_filter_mask(idx);
- reg1->idx = idx;
- }
- return 0;
-}
-
-static void ivt_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
-
- if (reg1->idx != EXTRA_REG_NONE) {
- u64 filter = uncore_shared_reg_config(box, 0);
- wrmsrl(reg1->reg, filter & 0xffffffff);
- wrmsrl(reg1->reg + 6, filter >> 32);
- }
-
- wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
-}
-
-static struct intel_uncore_ops ivt_uncore_cbox_ops = {
- .init_box = ivt_uncore_msr_init_box,
- .disable_box = snbep_uncore_msr_disable_box,
- .enable_box = snbep_uncore_msr_enable_box,
- .disable_event = snbep_uncore_msr_disable_event,
- .enable_event = ivt_cbox_enable_event,
- .read_counter = uncore_msr_read_counter,
- .hw_config = ivt_cbox_hw_config,
- .get_constraint = ivt_cbox_get_constraint,
- .put_constraint = snbep_cbox_put_constraint,
-};
-
-static struct intel_uncore_type ivt_uncore_cbox = {
- .name = "cbox",
- .num_counters = 4,
- .num_boxes = 15,
- .perf_ctr_bits = 44,
- .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
- .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
- .event_mask = IVT_CBO_MSR_PMON_RAW_EVENT_MASK,
- .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
- .msr_offset = SNBEP_CBO_MSR_OFFSET,
- .num_shared_regs = 1,
- .constraints = snbep_uncore_cbox_constraints,
- .ops = &ivt_uncore_cbox_ops,
- .format_group = &ivt_uncore_cbox_format_group,
-};
-
-static struct intel_uncore_ops ivt_uncore_pcu_ops = {
- IVT_UNCORE_MSR_OPS_COMMON_INIT(),
- .hw_config = snbep_pcu_hw_config,
- .get_constraint = snbep_pcu_get_constraint,
- .put_constraint = snbep_pcu_put_constraint,
-};
-
-static struct intel_uncore_type ivt_uncore_pcu = {
- .name = "pcu",
- .num_counters = 4,
- .num_boxes = 1,
- .perf_ctr_bits = 48,
- .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
- .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
- .event_mask = IVT_PCU_MSR_PMON_RAW_EVENT_MASK,
- .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
- .num_shared_regs = 1,
- .ops = &ivt_uncore_pcu_ops,
- .format_group = &ivt_uncore_pcu_format_group,
-};
-
-static struct intel_uncore_type *ivt_msr_uncores[] = {
- &ivt_uncore_ubox,
- &ivt_uncore_cbox,
- &ivt_uncore_pcu,
- NULL,
-};
-
-static struct intel_uncore_type ivt_uncore_ha = {
- .name = "ha",
- .num_counters = 4,
- .num_boxes = 2,
- .perf_ctr_bits = 48,
- IVT_UNCORE_PCI_COMMON_INIT(),
-};
-
-static struct intel_uncore_type ivt_uncore_imc = {
- .name = "imc",
- .num_counters = 4,
- .num_boxes = 8,
- .perf_ctr_bits = 48,
- .fixed_ctr_bits = 48,
- .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
- .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
- IVT_UNCORE_PCI_COMMON_INIT(),
-};
-
-/* registers in IRP boxes are not properly aligned */
-static unsigned ivt_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
-static unsigned ivt_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
-
-static void ivt_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct pci_dev *pdev = box->pci_dev;
- struct hw_perf_event *hwc = &event->hw;
-
- pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx],
- hwc->config | SNBEP_PMON_CTL_EN);
-}
-
-static void ivt_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct pci_dev *pdev = box->pci_dev;
- struct hw_perf_event *hwc = &event->hw;
-
- pci_write_config_dword(pdev, ivt_uncore_irp_ctls[hwc->idx], hwc->config);
-}
-
-static u64 ivt_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct pci_dev *pdev = box->pci_dev;
- struct hw_perf_event *hwc = &event->hw;
- u64 count = 0;
-
- pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
- pci_read_config_dword(pdev, ivt_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
-
- return count;
-}
-
-static struct intel_uncore_ops ivt_uncore_irp_ops = {
- .init_box = ivt_uncore_pci_init_box,
- .disable_box = snbep_uncore_pci_disable_box,
- .enable_box = snbep_uncore_pci_enable_box,
- .disable_event = ivt_uncore_irp_disable_event,
- .enable_event = ivt_uncore_irp_enable_event,
- .read_counter = ivt_uncore_irp_read_counter,
-};
-
-static struct intel_uncore_type ivt_uncore_irp = {
- .name = "irp",
- .num_counters = 4,
- .num_boxes = 1,
- .perf_ctr_bits = 48,
- .event_mask = IVT_PMON_RAW_EVENT_MASK,
- .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
- .ops = &ivt_uncore_irp_ops,
- .format_group = &ivt_uncore_format_group,
-};
-
-static struct intel_uncore_ops ivt_uncore_qpi_ops = {
- .init_box = ivt_uncore_pci_init_box,
- .disable_box = snbep_uncore_pci_disable_box,
- .enable_box = snbep_uncore_pci_enable_box,
- .disable_event = snbep_uncore_pci_disable_event,
- .enable_event = snbep_qpi_enable_event,
- .read_counter = snbep_uncore_pci_read_counter,
- .hw_config = snbep_qpi_hw_config,
- .get_constraint = uncore_get_constraint,
- .put_constraint = uncore_put_constraint,
-};
-
-static struct intel_uncore_type ivt_uncore_qpi = {
- .name = "qpi",
- .num_counters = 4,
- .num_boxes = 3,
- .perf_ctr_bits = 48,
- .perf_ctr = SNBEP_PCI_PMON_CTR0,
- .event_ctl = SNBEP_PCI_PMON_CTL0,
- .event_mask = IVT_QPI_PCI_PMON_RAW_EVENT_MASK,
- .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
- .num_shared_regs = 1,
- .ops = &ivt_uncore_qpi_ops,
- .format_group = &ivt_uncore_qpi_format_group,
-};
-
-static struct intel_uncore_type ivt_uncore_r2pcie = {
- .name = "r2pcie",
- .num_counters = 4,
- .num_boxes = 1,
- .perf_ctr_bits = 44,
- .constraints = snbep_uncore_r2pcie_constraints,
- IVT_UNCORE_PCI_COMMON_INIT(),
-};
-
-static struct intel_uncore_type ivt_uncore_r3qpi = {
- .name = "r3qpi",
- .num_counters = 3,
- .num_boxes = 2,
- .perf_ctr_bits = 44,
- .constraints = snbep_uncore_r3qpi_constraints,
- IVT_UNCORE_PCI_COMMON_INIT(),
-};
-
-enum {
- IVT_PCI_UNCORE_HA,
- IVT_PCI_UNCORE_IMC,
- IVT_PCI_UNCORE_IRP,
- IVT_PCI_UNCORE_QPI,
- IVT_PCI_UNCORE_R2PCIE,
- IVT_PCI_UNCORE_R3QPI,
-};
-
-static struct intel_uncore_type *ivt_pci_uncores[] = {
- [IVT_PCI_UNCORE_HA] = &ivt_uncore_ha,
- [IVT_PCI_UNCORE_IMC] = &ivt_uncore_imc,
- [IVT_PCI_UNCORE_IRP] = &ivt_uncore_irp,
- [IVT_PCI_UNCORE_QPI] = &ivt_uncore_qpi,
- [IVT_PCI_UNCORE_R2PCIE] = &ivt_uncore_r2pcie,
- [IVT_PCI_UNCORE_R3QPI] = &ivt_uncore_r3qpi,
- NULL,
-};
-
-static const struct pci_device_id ivt_uncore_pci_ids[] = {
- { /* Home Agent 0 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
- },
- { /* Home Agent 1 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
- },
- { /* MC0 Channel 0 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
- },
- { /* MC0 Channel 1 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
- },
- { /* MC0 Channel 3 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
- },
- { /* MC0 Channel 4 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
- },
- { /* MC1 Channel 0 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
- },
- { /* MC1 Channel 1 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
- },
- { /* MC1 Channel 3 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
- },
- { /* MC1 Channel 4 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
- },
- { /* IRP */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IRP, 0),
- },
- { /* QPI0 Port 0 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
- },
- { /* QPI0 Port 1 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
- },
- { /* QPI1 Port 2 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
- },
- { /* R2PCIe */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
- },
- { /* R3QPI0 Link 0 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
- },
- { /* R3QPI0 Link 1 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
- },
- { /* R3QPI1 Link 2 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
- .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
- },
- { /* QPI Port 0 filter */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
- SNBEP_PCI_QPI_PORT0_FILTER),
- },
- { /* QPI Port 0 filter */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
- SNBEP_PCI_QPI_PORT1_FILTER),
- },
- { /* end: all zeroes */ }
-};
-
-static struct pci_driver ivt_uncore_pci_driver = {
- .name = "ivt_uncore",
- .id_table = ivt_uncore_pci_ids,
-};
-/* end of IvyTown uncore support */
-
-/* Sandy Bridge uncore support */
-static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
-
- if (hwc->idx < UNCORE_PMC_IDX_FIXED)
- wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
- else
- wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
-}
-
-static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- wrmsrl(event->hw.config_base, 0);
-}
-
-static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
-{
- if (box->pmu->pmu_idx == 0) {
- wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
- SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
- }
-}
-
-static struct uncore_event_desc snb_uncore_events[] = {
- INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
- { /* end: all zeroes */ },
-};
-
-static struct attribute *snb_uncore_formats_attr[] = {
- &format_attr_event.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_inv.attr,
- &format_attr_cmask5.attr,
- NULL,
-};
-
-static struct attribute_group snb_uncore_format_group = {
- .name = "format",
- .attrs = snb_uncore_formats_attr,
-};
-
-static struct intel_uncore_ops snb_uncore_msr_ops = {
- .init_box = snb_uncore_msr_init_box,
- .disable_event = snb_uncore_msr_disable_event,
- .enable_event = snb_uncore_msr_enable_event,
- .read_counter = uncore_msr_read_counter,
-};
-
-static struct event_constraint snb_uncore_cbox_constraints[] = {
- UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
- UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
- EVENT_CONSTRAINT_END
-};
-
-static struct intel_uncore_type snb_uncore_cbox = {
- .name = "cbox",
- .num_counters = 2,
- .num_boxes = 4,
- .perf_ctr_bits = 44,
- .fixed_ctr_bits = 48,
- .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
- .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
- .fixed_ctr = SNB_UNC_FIXED_CTR,
- .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
- .single_fixed = 1,
- .event_mask = SNB_UNC_RAW_EVENT_MASK,
- .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
- .constraints = snb_uncore_cbox_constraints,
- .ops = &snb_uncore_msr_ops,
- .format_group = &snb_uncore_format_group,
- .event_descs = snb_uncore_events,
-};
-
-static struct intel_uncore_type *snb_msr_uncores[] = {
- &snb_uncore_cbox,
- NULL,
-};
-
-enum {
- SNB_PCI_UNCORE_IMC,
-};
-
-static struct uncore_event_desc snb_uncore_imc_events[] = {
- INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"),
- INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
- INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
-
- INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
- INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
- INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
-
- { /* end: all zeroes */ },
-};
-
-#define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
-#define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
-
-/* page size multiple covering all config regs */
-#define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
-
-#define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
-#define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
-#define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
-#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
-#define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
-
-static struct attribute *snb_uncore_imc_formats_attr[] = {
- &format_attr_event.attr,
- NULL,
-};
-
-static struct attribute_group snb_uncore_imc_format_group = {
- .name = "format",
- .attrs = snb_uncore_imc_formats_attr,
-};
-
-static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
-{
- struct pci_dev *pdev = box->pci_dev;
- int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
- resource_size_t addr;
- u32 pci_dword;
-
- pci_read_config_dword(pdev, where, &pci_dword);
- addr = pci_dword;
-
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
- pci_read_config_dword(pdev, where + 4, &pci_dword);
- addr |= ((resource_size_t)pci_dword << 32);
-#endif
-
- addr &= ~(PAGE_SIZE - 1);
-
- box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
- box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
-}
-
-static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
-{}
-
-static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
-{}
-
-static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
-{}
-
-static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
-{}
-
-static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
-
- return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
-}
-
-/*
- * custom event_init() function because we define our own fixed, free
- * running counters, so we do not want to conflict with generic uncore
- * logic. Also simplifies processing
- */
-static int snb_uncore_imc_event_init(struct perf_event *event)
-{
- struct intel_uncore_pmu *pmu;
- struct intel_uncore_box *box;
- struct hw_perf_event *hwc = &event->hw;
- u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
- int idx, base;
-
- if (event->attr.type != event->pmu->type)
- return -ENOENT;
-
- pmu = uncore_event_to_pmu(event);
- /* no device found for this pmu */
- if (pmu->func_id < 0)
- return -ENOENT;
-
- /* Sampling not supported yet */
- if (hwc->sample_period)
- return -EINVAL;
-
- /* unsupported modes and filters */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest ||
- event->attr.sample_period) /* no sampling */
- return -EINVAL;
-
- /*
- * Place all uncore events for a particular physical package
- * onto a single cpu
- */
- if (event->cpu < 0)
- return -EINVAL;
-
- /* check only supported bits are set */
- if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
- return -EINVAL;
-
- box = uncore_pmu_to_box(pmu, event->cpu);
- if (!box || box->cpu < 0)
- return -EINVAL;
-
- event->cpu = box->cpu;
-
- event->hw.idx = -1;
- event->hw.last_tag = ~0ULL;
- event->hw.extra_reg.idx = EXTRA_REG_NONE;
- event->hw.branch_reg.idx = EXTRA_REG_NONE;
- /*
- * check event is known (whitelist, determines counter)
- */
- switch (cfg) {
- case SNB_UNCORE_PCI_IMC_DATA_READS:
- base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
- idx = UNCORE_PMC_IDX_FIXED;
- break;
- case SNB_UNCORE_PCI_IMC_DATA_WRITES:
- base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
- idx = UNCORE_PMC_IDX_FIXED + 1;
- break;
- default:
- return -EINVAL;
- }
-
- /* must be done before validate_group */
- event->hw.event_base = base;
- event->hw.config = cfg;
- event->hw.idx = idx;
-
- /* no group validation needed, we have free running counters */
-
- return 0;
-}
-
-static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
-{
- return 0;
-}
-
-static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
-{
- struct intel_uncore_box *box = uncore_event_to_box(event);
- u64 count;
-
- if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
- return;
-
- event->hw.state = 0;
- box->n_active++;
-
- list_add_tail(&event->active_entry, &box->active_list);
-
- count = snb_uncore_imc_read_counter(box, event);
- local64_set(&event->hw.prev_count, count);
-
- if (box->n_active == 1)
- uncore_pmu_start_hrtimer(box);
-}
-
-static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
-{
- struct intel_uncore_box *box = uncore_event_to_box(event);
- struct hw_perf_event *hwc = &event->hw;
-
- if (!(hwc->state & PERF_HES_STOPPED)) {
- box->n_active--;
-
- WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
- hwc->state |= PERF_HES_STOPPED;
-
- list_del(&event->active_entry);
-
- if (box->n_active == 0)
- uncore_pmu_cancel_hrtimer(box);
- }
-
- if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
- /*
- * Drain the remaining delta count out of a event
- * that we are disabling:
- */
- uncore_perf_event_update(box, event);
- hwc->state |= PERF_HES_UPTODATE;
- }
-}
-
-static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
-{
- struct intel_uncore_box *box = uncore_event_to_box(event);
- struct hw_perf_event *hwc = &event->hw;
-
- if (!box)
- return -ENODEV;
-
- hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
- if (!(flags & PERF_EF_START))
- hwc->state |= PERF_HES_ARCH;
-
- snb_uncore_imc_event_start(event, 0);
-
- box->n_events++;
-
- return 0;
-}
-
-static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
-{
- struct intel_uncore_box *box = uncore_event_to_box(event);
- int i;
-
- snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
-
- for (i = 0; i < box->n_events; i++) {
- if (event == box->event_list[i]) {
- --box->n_events;
- break;
- }
- }
-}
-
-static int snb_pci2phy_map_init(int devid)
-{
- struct pci_dev *dev = NULL;
- int bus;
-
- dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
- if (!dev)
- return -ENOTTY;
-
- bus = dev->bus->number;
-
- pcibus_to_physid[bus] = 0;
-
- pci_dev_put(dev);
-
- return 0;
-}
-
-static struct pmu snb_uncore_imc_pmu = {
- .task_ctx_nr = perf_invalid_context,
- .event_init = snb_uncore_imc_event_init,
- .add = snb_uncore_imc_event_add,
- .del = snb_uncore_imc_event_del,
- .start = snb_uncore_imc_event_start,
- .stop = snb_uncore_imc_event_stop,
- .read = uncore_pmu_event_read,
-};
-
-static struct intel_uncore_ops snb_uncore_imc_ops = {
- .init_box = snb_uncore_imc_init_box,
- .enable_box = snb_uncore_imc_enable_box,
- .disable_box = snb_uncore_imc_disable_box,
- .disable_event = snb_uncore_imc_disable_event,
- .enable_event = snb_uncore_imc_enable_event,
- .hw_config = snb_uncore_imc_hw_config,
- .read_counter = snb_uncore_imc_read_counter,
-};
-
-static struct intel_uncore_type snb_uncore_imc = {
- .name = "imc",
- .num_counters = 2,
- .num_boxes = 1,
- .fixed_ctr_bits = 32,
- .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE,
- .event_descs = snb_uncore_imc_events,
- .format_group = &snb_uncore_imc_format_group,
- .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
- .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK,
- .ops = &snb_uncore_imc_ops,
- .pmu = &snb_uncore_imc_pmu,
-};
-
-static struct intel_uncore_type *snb_pci_uncores[] = {
- [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc,
- NULL,
-};
-
-static const struct pci_device_id snb_uncore_pci_ids[] = {
- { /* IMC */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
- .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
- },
- { /* end: all zeroes */ },
-};
-
-static const struct pci_device_id ivb_uncore_pci_ids[] = {
- { /* IMC */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
- .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
- },
- { /* end: all zeroes */ },
-};
-
-static const struct pci_device_id hsw_uncore_pci_ids[] = {
- { /* IMC */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
- .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
- },
- { /* end: all zeroes */ },
-};
-
-static struct pci_driver snb_uncore_pci_driver = {
- .name = "snb_uncore",
- .id_table = snb_uncore_pci_ids,
-};
-
-static struct pci_driver ivb_uncore_pci_driver = {
- .name = "ivb_uncore",
- .id_table = ivb_uncore_pci_ids,
-};
-
-static struct pci_driver hsw_uncore_pci_driver = {
- .name = "hsw_uncore",
- .id_table = hsw_uncore_pci_ids,
-};
-
-/* end of Sandy Bridge uncore support */
-
-/* Nehalem uncore support */
-static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
-{
- wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
-}
-
-static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
-{
- wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
-}
-
-static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
-
- if (hwc->idx < UNCORE_PMC_IDX_FIXED)
- wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
- else
- wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
-}
-
-static struct attribute *nhm_uncore_formats_attr[] = {
- &format_attr_event.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_inv.attr,
- &format_attr_cmask8.attr,
- NULL,
-};
-
-static struct attribute_group nhm_uncore_format_group = {
- .name = "format",
- .attrs = nhm_uncore_formats_attr,
-};
-
-static struct uncore_event_desc nhm_uncore_events[] = {
- INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
- INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
- INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
- INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
- INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
- INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
- INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
- INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
- INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
- { /* end: all zeroes */ },
-};
-
-static struct intel_uncore_ops nhm_uncore_msr_ops = {
- .disable_box = nhm_uncore_msr_disable_box,
- .enable_box = nhm_uncore_msr_enable_box,
- .disable_event = snb_uncore_msr_disable_event,
- .enable_event = nhm_uncore_msr_enable_event,
- .read_counter = uncore_msr_read_counter,
-};
-
-static struct intel_uncore_type nhm_uncore = {
- .name = "",
- .num_counters = 8,
- .num_boxes = 1,
- .perf_ctr_bits = 48,
- .fixed_ctr_bits = 48,
- .event_ctl = NHM_UNC_PERFEVTSEL0,
- .perf_ctr = NHM_UNC_UNCORE_PMC0,
- .fixed_ctr = NHM_UNC_FIXED_CTR,
- .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
- .event_mask = NHM_UNC_RAW_EVENT_MASK,
- .event_descs = nhm_uncore_events,
- .ops = &nhm_uncore_msr_ops,
- .format_group = &nhm_uncore_format_group,
-};
-
-static struct intel_uncore_type *nhm_msr_uncores[] = {
- &nhm_uncore,
- NULL,
-};
-/* end of Nehalem uncore support */
-
-/* Nehalem-EX uncore support */
-DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
-DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
-DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
-DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
-
-static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
-{
- wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
-}
-
-static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
-{
- unsigned msr = uncore_msr_box_ctl(box);
- u64 config;
-
- if (msr) {
- rdmsrl(msr, config);
- config &= ~((1ULL << uncore_num_counters(box)) - 1);
- /* WBox has a fixed counter */
- if (uncore_msr_fixed_ctl(box))
- config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
- wrmsrl(msr, config);
- }
-}
-
-static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
-{
- unsigned msr = uncore_msr_box_ctl(box);
- u64 config;
-
- if (msr) {
- rdmsrl(msr, config);
- config |= (1ULL << uncore_num_counters(box)) - 1;
- /* WBox has a fixed counter */
- if (uncore_msr_fixed_ctl(box))
- config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
- wrmsrl(msr, config);
- }
-}
-
-static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- wrmsrl(event->hw.config_base, 0);
-}
-
-static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
-
- if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
- wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
- else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
- wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
- else
- wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
-}
-
-#define NHMEX_UNCORE_OPS_COMMON_INIT() \
- .init_box = nhmex_uncore_msr_init_box, \
- .disable_box = nhmex_uncore_msr_disable_box, \
- .enable_box = nhmex_uncore_msr_enable_box, \
- .disable_event = nhmex_uncore_msr_disable_event, \
- .read_counter = uncore_msr_read_counter
-
-static struct intel_uncore_ops nhmex_uncore_ops = {
- NHMEX_UNCORE_OPS_COMMON_INIT(),
- .enable_event = nhmex_uncore_msr_enable_event,
-};
-
-static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
- &format_attr_event.attr,
- &format_attr_edge.attr,
- NULL,
-};
-
-static struct attribute_group nhmex_uncore_ubox_format_group = {
- .name = "format",
- .attrs = nhmex_uncore_ubox_formats_attr,
-};
-
-static struct intel_uncore_type nhmex_uncore_ubox = {
- .name = "ubox",
- .num_counters = 1,
- .num_boxes = 1,
- .perf_ctr_bits = 48,
- .event_ctl = NHMEX_U_MSR_PMON_EV_SEL,
- .perf_ctr = NHMEX_U_MSR_PMON_CTR,
- .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK,
- .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL,
- .ops = &nhmex_uncore_ops,
- .format_group = &nhmex_uncore_ubox_format_group
-};
-
-static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
- &format_attr_event.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_inv.attr,
- &format_attr_thresh8.attr,
- NULL,
-};
-
-static struct attribute_group nhmex_uncore_cbox_format_group = {
- .name = "format",
- .attrs = nhmex_uncore_cbox_formats_attr,
-};
-
-/* msr offset for each instance of cbox */
-static unsigned nhmex_cbox_msr_offsets[] = {
- 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
-};
-
-static struct intel_uncore_type nhmex_uncore_cbox = {
- .name = "cbox",
- .num_counters = 6,
- .num_boxes = 10,
- .perf_ctr_bits = 48,
- .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
- .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
- .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
- .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
- .msr_offsets = nhmex_cbox_msr_offsets,
- .pair_ctr_ctl = 1,
- .ops = &nhmex_uncore_ops,
- .format_group = &nhmex_uncore_cbox_format_group
-};
-
-static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
- INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
- { /* end: all zeroes */ },
-};
-
-static struct intel_uncore_type nhmex_uncore_wbox = {
- .name = "wbox",
- .num_counters = 4,
- .num_boxes = 1,
- .perf_ctr_bits = 48,
- .event_ctl = NHMEX_W_MSR_PMON_CNT0,
- .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0,
- .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR,
- .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL,
- .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
- .box_ctl = NHMEX_W_MSR_GLOBAL_CTL,
- .pair_ctr_ctl = 1,
- .event_descs = nhmex_uncore_wbox_events,
- .ops = &nhmex_uncore_ops,
- .format_group = &nhmex_uncore_cbox_format_group
-};
-
-static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
- struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
- int ctr, ev_sel;
-
- ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
- NHMEX_B_PMON_CTR_SHIFT;
- ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
- NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
-
- /* events that do not use the match/mask registers */
- if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
- (ctr == 2 && ev_sel != 0x4) || ctr == 3)
- return 0;
-
- if (box->pmu->pmu_idx == 0)
- reg1->reg = NHMEX_B0_MSR_MATCH;
- else
- reg1->reg = NHMEX_B1_MSR_MATCH;
- reg1->idx = 0;
- reg1->config = event->attr.config1;
- reg2->config = event->attr.config2;
- return 0;
-}
-
-static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
- struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
-
- if (reg1->idx != EXTRA_REG_NONE) {
- wrmsrl(reg1->reg, reg1->config);
- wrmsrl(reg1->reg + 1, reg2->config);
- }
- wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
- (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
-}
-
-/*
- * The Bbox has 4 counters, but each counter monitors different events.
- * Use bits 6-7 in the event config to select counter.
- */
-static struct event_constraint nhmex_uncore_bbox_constraints[] = {
- EVENT_CONSTRAINT(0 , 1, 0xc0),
- EVENT_CONSTRAINT(0x40, 2, 0xc0),
- EVENT_CONSTRAINT(0x80, 4, 0xc0),
- EVENT_CONSTRAINT(0xc0, 8, 0xc0),
- EVENT_CONSTRAINT_END,
-};
-
-static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
- &format_attr_event5.attr,
- &format_attr_counter.attr,
- &format_attr_match.attr,
- &format_attr_mask.attr,
- NULL,
-};
-
-static struct attribute_group nhmex_uncore_bbox_format_group = {
- .name = "format",
- .attrs = nhmex_uncore_bbox_formats_attr,
-};
-
-static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
- NHMEX_UNCORE_OPS_COMMON_INIT(),
- .enable_event = nhmex_bbox_msr_enable_event,
- .hw_config = nhmex_bbox_hw_config,
- .get_constraint = uncore_get_constraint,
- .put_constraint = uncore_put_constraint,
-};
-
-static struct intel_uncore_type nhmex_uncore_bbox = {
- .name = "bbox",
- .num_counters = 4,
- .num_boxes = 2,
- .perf_ctr_bits = 48,
- .event_ctl = NHMEX_B0_MSR_PMON_CTL0,
- .perf_ctr = NHMEX_B0_MSR_PMON_CTR0,
- .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK,
- .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
- .msr_offset = NHMEX_B_MSR_OFFSET,
- .pair_ctr_ctl = 1,
- .num_shared_regs = 1,
- .constraints = nhmex_uncore_bbox_constraints,
- .ops = &nhmex_uncore_bbox_ops,
- .format_group = &nhmex_uncore_bbox_format_group
-};
-
-static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
- struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
-
- /* only TO_R_PROG_EV event uses the match/mask register */
- if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
- NHMEX_S_EVENT_TO_R_PROG_EV)
- return 0;
-
- if (box->pmu->pmu_idx == 0)
- reg1->reg = NHMEX_S0_MSR_MM_CFG;
- else
- reg1->reg = NHMEX_S1_MSR_MM_CFG;
- reg1->idx = 0;
- reg1->config = event->attr.config1;
- reg2->config = event->attr.config2;
- return 0;
-}
-
-static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
- struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
-
- if (reg1->idx != EXTRA_REG_NONE) {
- wrmsrl(reg1->reg, 0);
- wrmsrl(reg1->reg + 1, reg1->config);
- wrmsrl(reg1->reg + 2, reg2->config);
- wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
- }
- wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
-}
-
-static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
- &format_attr_event.attr,
- &format_attr_umask.attr,
- &format_attr_edge.attr,
- &format_attr_inv.attr,
- &format_attr_thresh8.attr,
- &format_attr_match.attr,
- &format_attr_mask.attr,
- NULL,
-};
-
-static struct attribute_group nhmex_uncore_sbox_format_group = {
- .name = "format",
- .attrs = nhmex_uncore_sbox_formats_attr,
-};
-
-static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
- NHMEX_UNCORE_OPS_COMMON_INIT(),
- .enable_event = nhmex_sbox_msr_enable_event,
- .hw_config = nhmex_sbox_hw_config,
- .get_constraint = uncore_get_constraint,
- .put_constraint = uncore_put_constraint,
-};
-
-static struct intel_uncore_type nhmex_uncore_sbox = {
- .name = "sbox",
- .num_counters = 4,
- .num_boxes = 2,
- .perf_ctr_bits = 48,
- .event_ctl = NHMEX_S0_MSR_PMON_CTL0,
- .perf_ctr = NHMEX_S0_MSR_PMON_CTR0,
- .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
- .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
- .msr_offset = NHMEX_S_MSR_OFFSET,
- .pair_ctr_ctl = 1,
- .num_shared_regs = 1,
- .ops = &nhmex_uncore_sbox_ops,
- .format_group = &nhmex_uncore_sbox_format_group
-};
-
-enum {
- EXTRA_REG_NHMEX_M_FILTER,
- EXTRA_REG_NHMEX_M_DSP,
- EXTRA_REG_NHMEX_M_ISS,
- EXTRA_REG_NHMEX_M_MAP,
- EXTRA_REG_NHMEX_M_MSC_THR,
- EXTRA_REG_NHMEX_M_PGT,
- EXTRA_REG_NHMEX_M_PLD,
- EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
-};
-
-static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
- MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
- MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
- MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
- MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
- /* event 0xa uses two extra registers */
- MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
- MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
- MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
- /* events 0xd ~ 0x10 use the same extra register */
- MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
- MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
- MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
- MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
- MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
- MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
- MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
- MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
- MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
- EVENT_EXTRA_END
-};
-
-/* Nehalem-EX or Westmere-EX ? */
-static bool uncore_nhmex;
-
-static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
-{
- struct intel_uncore_extra_reg *er;
- unsigned long flags;
- bool ret = false;
- u64 mask;
-
- if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
- er = &box->shared_regs[idx];
- raw_spin_lock_irqsave(&er->lock, flags);
- if (!atomic_read(&er->ref) || er->config == config) {
- atomic_inc(&er->ref);
- er->config = config;
- ret = true;
- }
- raw_spin_unlock_irqrestore(&er->lock, flags);
-
- return ret;
- }
- /*
- * The ZDP_CTL_FVC MSR has 4 fields which are used to control
- * events 0xd ~ 0x10. Besides these 4 fields, there are additional
- * fields which are shared.
- */
- idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
- if (WARN_ON_ONCE(idx >= 4))
- return false;
-
- /* mask of the shared fields */
- if (uncore_nhmex)
- mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
- else
- mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
- er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
-
- raw_spin_lock_irqsave(&er->lock, flags);
- /* add mask of the non-shared field if it's in use */
- if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
- if (uncore_nhmex)
- mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
- else
- mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
- }
-
- if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
- atomic_add(1 << (idx * 8), &er->ref);
- if (uncore_nhmex)
- mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
- NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
- else
- mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
- WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
- er->config &= ~mask;
- er->config |= (config & mask);
- ret = true;
- }
- raw_spin_unlock_irqrestore(&er->lock, flags);
-
- return ret;
-}
-
-static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
-{
- struct intel_uncore_extra_reg *er;
-
- if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
- er = &box->shared_regs[idx];
- atomic_dec(&er->ref);
- return;
- }
-
- idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
- er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
- atomic_sub(1 << (idx * 8), &er->ref);
-}
-
-static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
- u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
- u64 config = reg1->config;
-
- /* get the non-shared control bits and shift them */
- idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
- if (uncore_nhmex)
- config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
- else
- config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
- if (new_idx > orig_idx) {
- idx = new_idx - orig_idx;
- config <<= 3 * idx;
- } else {
- idx = orig_idx - new_idx;
- config >>= 3 * idx;
- }
-
- /* add the shared control bits back */
- if (uncore_nhmex)
- config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
- else
- config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
- config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
- if (modify) {
- /* adjust the main event selector */
- if (new_idx > orig_idx)
- hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
- else
- hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
- reg1->config = config;
- reg1->idx = ~0xff | new_idx;
- }
- return config;
-}
-
-static struct event_constraint *
-nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
- struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
- int i, idx[2], alloc = 0;
- u64 config1 = reg1->config;
-
- idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
- idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
-again:
- for (i = 0; i < 2; i++) {
- if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
- idx[i] = 0xff;
-
- if (idx[i] == 0xff)
- continue;
-
- if (!nhmex_mbox_get_shared_reg(box, idx[i],
- __BITS_VALUE(config1, i, 32)))
- goto fail;
- alloc |= (0x1 << i);
- }
-
- /* for the match/mask registers */
- if (reg2->idx != EXTRA_REG_NONE &&
- (uncore_box_is_fake(box) || !reg2->alloc) &&
- !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
- goto fail;
-
- /*
- * If it's a fake box -- as per validate_{group,event}() we
- * shouldn't touch event state and we can avoid doing so
- * since both will only call get_event_constraints() once
- * on each event, this avoids the need for reg->alloc.
- */
- if (!uncore_box_is_fake(box)) {
- if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
- nhmex_mbox_alter_er(event, idx[0], true);
- reg1->alloc |= alloc;
- if (reg2->idx != EXTRA_REG_NONE)
- reg2->alloc = 1;
- }
- return NULL;
-fail:
- if (idx[0] != 0xff && !(alloc & 0x1) &&
- idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
- /*
- * events 0xd ~ 0x10 are functional identical, but are
- * controlled by different fields in the ZDP_CTL_FVC
- * register. If we failed to take one field, try the
- * rest 3 choices.
- */
- BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
- idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
- idx[0] = (idx[0] + 1) % 4;
- idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
- if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
- config1 = nhmex_mbox_alter_er(event, idx[0], false);
- goto again;
- }
- }
-
- if (alloc & 0x1)
- nhmex_mbox_put_shared_reg(box, idx[0]);
- if (alloc & 0x2)
- nhmex_mbox_put_shared_reg(box, idx[1]);
- return &constraint_empty;
-}
-
-static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
- struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
-
- if (uncore_box_is_fake(box))
- return;
-
- if (reg1->alloc & 0x1)
- nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
- if (reg1->alloc & 0x2)
- nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
- reg1->alloc = 0;
-
- if (reg2->alloc) {
- nhmex_mbox_put_shared_reg(box, reg2->idx);
- reg2->alloc = 0;
- }
-}
-
-static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
-{
- if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
- return er->idx;
- return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
-}
-
-static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct intel_uncore_type *type = box->pmu->type;
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
- struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
- struct extra_reg *er;
- unsigned msr;
- int reg_idx = 0;
- /*
- * The mbox events may require 2 extra MSRs at the most. But only
- * the lower 32 bits in these MSRs are significant, so we can use
- * config1 to pass two MSRs' config.
- */
- for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
- if (er->event != (event->hw.config & er->config_mask))
- continue;
- if (event->attr.config1 & ~er->valid_mask)
- return -EINVAL;
-
- msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
- if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
- return -EINVAL;
-
- /* always use the 32~63 bits to pass the PLD config */
- if (er->idx == EXTRA_REG_NHMEX_M_PLD)
- reg_idx = 1;
- else if (WARN_ON_ONCE(reg_idx > 0))
- return -EINVAL;
-
- reg1->idx &= ~(0xff << (reg_idx * 8));
- reg1->reg &= ~(0xffff << (reg_idx * 16));
- reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
- reg1->reg |= msr << (reg_idx * 16);
- reg1->config = event->attr.config1;
- reg_idx++;
- }
- /*
- * The mbox only provides ability to perform address matching
- * for the PLD events.
- */
- if (reg_idx == 2) {
- reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
- if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
- reg2->config = event->attr.config2;
- else
- reg2->config = ~0ULL;
- if (box->pmu->pmu_idx == 0)
- reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
- else
- reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
- }
- return 0;
-}
-
-static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
-{
- struct intel_uncore_extra_reg *er;
- unsigned long flags;
- u64 config;
-
- if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
- return box->shared_regs[idx].config;
-
- er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
- raw_spin_lock_irqsave(&er->lock, flags);
- config = er->config;
- raw_spin_unlock_irqrestore(&er->lock, flags);
- return config;
-}
-
-static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
- struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
- int idx;
-
- idx = __BITS_VALUE(reg1->idx, 0, 8);
- if (idx != 0xff)
- wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
- nhmex_mbox_shared_reg_config(box, idx));
- idx = __BITS_VALUE(reg1->idx, 1, 8);
- if (idx != 0xff)
- wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
- nhmex_mbox_shared_reg_config(box, idx));
-
- if (reg2->idx != EXTRA_REG_NONE) {
- wrmsrl(reg2->reg, 0);
- if (reg2->config != ~0ULL) {
- wrmsrl(reg2->reg + 1,
- reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
- wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
- (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
- wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
- }
- }
-
- wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
-}
-
-DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
-DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
-DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
-DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
-DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
-DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
-DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63");
-DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
-DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
-DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
-
-static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
- &format_attr_count_mode.attr,
- &format_attr_storage_mode.attr,
- &format_attr_wrap_mode.attr,
- &format_attr_flag_mode.attr,
- &format_attr_inc_sel.attr,
- &format_attr_set_flag_sel.attr,
- &format_attr_filter_cfg_en.attr,
- &format_attr_filter_match.attr,
- &format_attr_filter_mask.attr,
- &format_attr_dsp.attr,
- &format_attr_thr.attr,
- &format_attr_fvc.attr,
- &format_attr_pgt.attr,
- &format_attr_map.attr,
- &format_attr_iss.attr,
- &format_attr_pld.attr,
- NULL,
-};
-
-static struct attribute_group nhmex_uncore_mbox_format_group = {
- .name = "format",
- .attrs = nhmex_uncore_mbox_formats_attr,
-};
-
-static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
- INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
- INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
- { /* end: all zeroes */ },
-};
-
-static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
- INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
- INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
- { /* end: all zeroes */ },
-};
-
-static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
- NHMEX_UNCORE_OPS_COMMON_INIT(),
- .enable_event = nhmex_mbox_msr_enable_event,
- .hw_config = nhmex_mbox_hw_config,
- .get_constraint = nhmex_mbox_get_constraint,
- .put_constraint = nhmex_mbox_put_constraint,
-};
-
-static struct intel_uncore_type nhmex_uncore_mbox = {
- .name = "mbox",
- .num_counters = 6,
- .num_boxes = 2,
- .perf_ctr_bits = 48,
- .event_ctl = NHMEX_M0_MSR_PMU_CTL0,
- .perf_ctr = NHMEX_M0_MSR_PMU_CNT0,
- .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK,
- .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL,
- .msr_offset = NHMEX_M_MSR_OFFSET,
- .pair_ctr_ctl = 1,
- .num_shared_regs = 8,
- .event_descs = nhmex_uncore_mbox_events,
- .ops = &nhmex_uncore_mbox_ops,
- .format_group = &nhmex_uncore_mbox_format_group,
-};
-
-static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
-
- /* adjust the main event selector and extra register index */
- if (reg1->idx % 2) {
- reg1->idx--;
- hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
- } else {
- reg1->idx++;
- hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
- }
-
- /* adjust extra register config */
- switch (reg1->idx % 6) {
- case 2:
- /* shift the 8~15 bits to the 0~7 bits */
- reg1->config >>= 8;
- break;
- case 3:
- /* shift the 0~7 bits to the 8~15 bits */
- reg1->config <<= 8;
- break;
- };
-}
-
-/*
- * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
- * An event set consists of 6 events, the 3rd and 4th events in
- * an event set use the same extra register. So an event set uses
- * 5 extra registers.
- */
-static struct event_constraint *
-nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
- struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
- struct intel_uncore_extra_reg *er;
- unsigned long flags;
- int idx, er_idx;
- u64 config1;
- bool ok = false;
-
- if (!uncore_box_is_fake(box) && reg1->alloc)
- return NULL;
-
- idx = reg1->idx % 6;
- config1 = reg1->config;
-again:
- er_idx = idx;
- /* the 3rd and 4th events use the same extra register */
- if (er_idx > 2)
- er_idx--;
- er_idx += (reg1->idx / 6) * 5;
-
- er = &box->shared_regs[er_idx];
- raw_spin_lock_irqsave(&er->lock, flags);
- if (idx < 2) {
- if (!atomic_read(&er->ref) || er->config == reg1->config) {
- atomic_inc(&er->ref);
- er->config = reg1->config;
- ok = true;
- }
- } else if (idx == 2 || idx == 3) {
- /*
- * these two events use different fields in a extra register,
- * the 0~7 bits and the 8~15 bits respectively.
- */
- u64 mask = 0xff << ((idx - 2) * 8);
- if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
- !((er->config ^ config1) & mask)) {
- atomic_add(1 << ((idx - 2) * 8), &er->ref);
- er->config &= ~mask;
- er->config |= config1 & mask;
- ok = true;
- }
- } else {
- if (!atomic_read(&er->ref) ||
- (er->config == (hwc->config >> 32) &&
- er->config1 == reg1->config &&
- er->config2 == reg2->config)) {
- atomic_inc(&er->ref);
- er->config = (hwc->config >> 32);
- er->config1 = reg1->config;
- er->config2 = reg2->config;
- ok = true;
- }
- }
- raw_spin_unlock_irqrestore(&er->lock, flags);
-
- if (!ok) {
- /*
- * The Rbox events are always in pairs. The paired
- * events are functional identical, but use different
- * extra registers. If we failed to take an extra
- * register, try the alternative.
- */
- idx ^= 1;
- if (idx != reg1->idx % 6) {
- if (idx == 2)
- config1 >>= 8;
- else if (idx == 3)
- config1 <<= 8;
- goto again;
- }
- } else {
- if (!uncore_box_is_fake(box)) {
- if (idx != reg1->idx % 6)
- nhmex_rbox_alter_er(box, event);
- reg1->alloc = 1;
- }
- return NULL;
- }
- return &constraint_empty;
-}
-
-static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct intel_uncore_extra_reg *er;
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
- int idx, er_idx;
-
- if (uncore_box_is_fake(box) || !reg1->alloc)
- return;
-
- idx = reg1->idx % 6;
- er_idx = idx;
- if (er_idx > 2)
- er_idx--;
- er_idx += (reg1->idx / 6) * 5;
-
- er = &box->shared_regs[er_idx];
- if (idx == 2 || idx == 3)
- atomic_sub(1 << ((idx - 2) * 8), &er->ref);
- else
- atomic_dec(&er->ref);
-
- reg1->alloc = 0;
-}
-
-static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
- struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
- int idx;
-
- idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
- NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
- if (idx >= 0x18)
- return -EINVAL;
-
- reg1->idx = idx;
- reg1->config = event->attr.config1;
-
- switch (idx % 6) {
- case 4:
- case 5:
- hwc->config |= event->attr.config & (~0ULL << 32);
- reg2->config = event->attr.config2;
- break;
- };
- return 0;
-}
-
-static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
- struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
- int idx, port;
-
- idx = reg1->idx;
- port = idx / 6 + box->pmu->pmu_idx * 4;
-
- switch (idx % 6) {
- case 0:
- wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
- break;
- case 1:
- wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
- break;
- case 2:
- case 3:
- wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
- uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
- break;
- case 4:
- wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
- hwc->config >> 32);
- wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
- wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
- break;
- case 5:
- wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
- hwc->config >> 32);
- wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
- wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
- break;
- };
-
- wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
- (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
-}
-
-DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
-DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
-DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
-DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
-DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
-
-static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
- &format_attr_event5.attr,
- &format_attr_xbr_mm_cfg.attr,
- &format_attr_xbr_match.attr,
- &format_attr_xbr_mask.attr,
- &format_attr_qlx_cfg.attr,
- &format_attr_iperf_cfg.attr,
- NULL,
-};
-
-static struct attribute_group nhmex_uncore_rbox_format_group = {
- .name = "format",
- .attrs = nhmex_uncore_rbox_formats_attr,
-};
-
-static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
- INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
- INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
- INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
- INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
- INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
- INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
- { /* end: all zeroes */ },
-};
-
-static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
- NHMEX_UNCORE_OPS_COMMON_INIT(),
- .enable_event = nhmex_rbox_msr_enable_event,
- .hw_config = nhmex_rbox_hw_config,
- .get_constraint = nhmex_rbox_get_constraint,
- .put_constraint = nhmex_rbox_put_constraint,
-};
-
-static struct intel_uncore_type nhmex_uncore_rbox = {
- .name = "rbox",
- .num_counters = 8,
- .num_boxes = 2,
- .perf_ctr_bits = 48,
- .event_ctl = NHMEX_R_MSR_PMON_CTL0,
- .perf_ctr = NHMEX_R_MSR_PMON_CNT0,
- .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK,
- .box_ctl = NHMEX_R_MSR_GLOBAL_CTL,
- .msr_offset = NHMEX_R_MSR_OFFSET,
- .pair_ctr_ctl = 1,
- .num_shared_regs = 20,
- .event_descs = nhmex_uncore_rbox_events,
- .ops = &nhmex_uncore_rbox_ops,
- .format_group = &nhmex_uncore_rbox_format_group
-};
-
-static struct intel_uncore_type *nhmex_msr_uncores[] = {
- &nhmex_uncore_ubox,
- &nhmex_uncore_cbox,
- &nhmex_uncore_bbox,
- &nhmex_uncore_sbox,
- &nhmex_uncore_mbox,
- &nhmex_uncore_rbox,
- &nhmex_uncore_wbox,
- NULL,
-};
-/* end of Nehalem-EX uncore support */
-
static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
{
struct hw_perf_event *hwc = &event->hw;
@@ -3140,7 +170,7 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_eve
hwc->event_base = uncore_perf_ctr(box, hwc->idx);
}
-static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
+void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
{
u64 prev_count, new_count, delta;
int shift;
@@ -3201,14 +231,14 @@ static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
return HRTIMER_RESTART;
}
-static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
+void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
{
__hrtimer_start_range_ns(&box->hrtimer,
ns_to_ktime(box->hrtimer_duration), 0,
HRTIMER_MODE_REL_PINNED, 0);
}
-static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
+void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
{
hrtimer_cancel(&box->hrtimer);
}
@@ -3291,7 +321,7 @@ uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *eve
}
if (event->attr.config == UNCORE_FIXED_EVENT)
- return &constraint_fixed;
+ return &uncore_constraint_fixed;
if (type->constraints) {
for_each_event_constraint(c, type->constraints) {
@@ -3496,7 +526,7 @@ static void uncore_pmu_event_del(struct perf_event *event, int flags)
event->hw.last_tag = ~0ULL;
}
-static void uncore_pmu_event_read(struct perf_event *event)
+void uncore_pmu_event_read(struct perf_event *event)
{
struct intel_uncore_box *box = uncore_event_to_box(event);
uncore_perf_event_update(box, event);
@@ -3635,7 +665,7 @@ static struct attribute_group uncore_pmu_attr_group = {
.attrs = uncore_pmu_attrs,
};
-static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
+static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
{
int ret;
@@ -3758,9 +788,6 @@ fail:
return ret;
}
-static struct pci_driver *uncore_pci_driver;
-static bool pcidrv_registered;
-
/*
* add a pci uncore device
*/
@@ -3770,18 +797,20 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
struct intel_uncore_box *box;
struct intel_uncore_type *type;
int phys_id;
+ bool first_box = false;
- phys_id = pcibus_to_physid[pdev->bus->number];
+ phys_id = uncore_pcibus_to_physid[pdev->bus->number];
if (phys_id < 0)
return -ENODEV;
if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
- extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev;
+ int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
+ uncore_extra_pci_dev[phys_id][idx] = pdev;
pci_set_drvdata(pdev, NULL);
return 0;
}
- type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
+ type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
box = uncore_alloc_box(type, NUMA_NO_NODE);
if (!box)
return -ENOMEM;
@@ -3803,9 +832,13 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
pci_set_drvdata(pdev, box);
raw_spin_lock(&uncore_box_lock);
+ if (list_empty(&pmu->box_list))
+ first_box = true;
list_add_tail(&box->list, &pmu->box_list);
raw_spin_unlock(&uncore_box_lock);
+ if (first_box)
+ uncore_pmu_register(pmu);
return 0;
}
@@ -3813,13 +846,14 @@ static void uncore_pci_remove(struct pci_dev *pdev)
{
struct intel_uncore_box *box = pci_get_drvdata(pdev);
struct intel_uncore_pmu *pmu;
- int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number];
+ int i, cpu, phys_id = uncore_pcibus_to_physid[pdev->bus->number];
+ bool last_box = false;
box = pci_get_drvdata(pdev);
if (!box) {
for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
- if (extra_pci_dev[phys_id][i] == pdev) {
- extra_pci_dev[phys_id][i] = NULL;
+ if (uncore_extra_pci_dev[phys_id][i] == pdev) {
+ uncore_extra_pci_dev[phys_id][i] = NULL;
break;
}
}
@@ -3835,6 +869,8 @@ static void uncore_pci_remove(struct pci_dev *pdev)
raw_spin_lock(&uncore_box_lock);
list_del(&box->list);
+ if (list_empty(&pmu->box_list))
+ last_box = true;
raw_spin_unlock(&uncore_box_lock);
for_each_possible_cpu(cpu) {
@@ -3846,6 +882,9 @@ static void uncore_pci_remove(struct pci_dev *pdev)
WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
kfree(box);
+
+ if (last_box)
+ perf_pmu_unregister(&pmu->pmu);
}
static int __init uncore_pci_init(void)
@@ -3854,46 +893,32 @@ static int __init uncore_pci_init(void)
switch (boot_cpu_data.x86_model) {
case 45: /* Sandy Bridge-EP */
- ret = snbep_pci2phy_map_init(0x3ce0);
- if (ret)
- return ret;
- pci_uncores = snbep_pci_uncores;
- uncore_pci_driver = &snbep_uncore_pci_driver;
+ ret = snbep_uncore_pci_init();
break;
- case 62: /* IvyTown */
- ret = snbep_pci2phy_map_init(0x0e1e);
- if (ret)
- return ret;
- pci_uncores = ivt_pci_uncores;
- uncore_pci_driver = &ivt_uncore_pci_driver;
+ case 62: /* Ivy Bridge-EP */
+ ret = ivbep_uncore_pci_init();
+ break;
+ case 63: /* Haswell-EP */
+ ret = hswep_uncore_pci_init();
break;
case 42: /* Sandy Bridge */
- ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_SNB_IMC);
- if (ret)
- return ret;
- pci_uncores = snb_pci_uncores;
- uncore_pci_driver = &snb_uncore_pci_driver;
+ ret = snb_uncore_pci_init();
break;
case 58: /* Ivy Bridge */
- ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_IVB_IMC);
- if (ret)
- return ret;
- pci_uncores = snb_pci_uncores;
- uncore_pci_driver = &ivb_uncore_pci_driver;
+ ret = ivb_uncore_pci_init();
break;
case 60: /* Haswell */
case 69: /* Haswell Celeron */
- ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_HSW_IMC);
- if (ret)
- return ret;
- pci_uncores = snb_pci_uncores;
- uncore_pci_driver = &hsw_uncore_pci_driver;
+ ret = hsw_uncore_pci_init();
break;
default:
return 0;
}
- ret = uncore_types_init(pci_uncores);
+ if (ret)
+ return ret;
+
+ ret = uncore_types_init(uncore_pci_uncores);
if (ret)
return ret;
@@ -3904,7 +929,7 @@ static int __init uncore_pci_init(void)
if (ret == 0)
pcidrv_registered = true;
else
- uncore_types_exit(pci_uncores);
+ uncore_types_exit(uncore_pci_uncores);
return ret;
}
@@ -3914,7 +939,7 @@ static void __init uncore_pci_exit(void)
if (pcidrv_registered) {
pcidrv_registered = false;
pci_unregister_driver(uncore_pci_driver);
- uncore_types_exit(pci_uncores);
+ uncore_types_exit(uncore_pci_uncores);
}
}
@@ -3940,8 +965,8 @@ static void uncore_cpu_dying(int cpu)
struct intel_uncore_box *box;
int i, j;
- for (i = 0; msr_uncores[i]; i++) {
- type = msr_uncores[i];
+ for (i = 0; uncore_msr_uncores[i]; i++) {
+ type = uncore_msr_uncores[i];
for (j = 0; j < type->num_boxes; j++) {
pmu = &type->pmus[j];
box = *per_cpu_ptr(pmu->box, cpu);
@@ -3961,8 +986,8 @@ static int uncore_cpu_starting(int cpu)
phys_id = topology_physical_package_id(cpu);
- for (i = 0; msr_uncores[i]; i++) {
- type = msr_uncores[i];
+ for (i = 0; uncore_msr_uncores[i]; i++) {
+ type = uncore_msr_uncores[i];
for (j = 0; j < type->num_boxes; j++) {
pmu = &type->pmus[j];
box = *per_cpu_ptr(pmu->box, cpu);
@@ -4002,8 +1027,8 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
struct intel_uncore_box *box;
int i, j;
- for (i = 0; msr_uncores[i]; i++) {
- type = msr_uncores[i];
+ for (i = 0; uncore_msr_uncores[i]; i++) {
+ type = uncore_msr_uncores[i];
for (j = 0; j < type->num_boxes; j++) {
pmu = &type->pmus[j];
if (pmu->func_id < 0)
@@ -4083,8 +1108,8 @@ static void uncore_event_exit_cpu(int cpu)
if (target >= 0)
cpumask_set_cpu(target, &uncore_cpu_mask);
- uncore_change_context(msr_uncores, cpu, target);
- uncore_change_context(pci_uncores, cpu, target);
+ uncore_change_context(uncore_msr_uncores, cpu, target);
+ uncore_change_context(uncore_pci_uncores, cpu, target);
}
static void uncore_event_init_cpu(int cpu)
@@ -4099,8 +1124,8 @@ static void uncore_event_init_cpu(int cpu)
cpumask_set_cpu(cpu, &uncore_cpu_mask);
- uncore_change_context(msr_uncores, -1, cpu);
- uncore_change_context(pci_uncores, -1, cpu);
+ uncore_change_context(uncore_msr_uncores, -1, cpu);
+ uncore_change_context(uncore_pci_uncores, -1, cpu);
}
static int uncore_cpu_notifier(struct notifier_block *self,
@@ -4160,47 +1185,37 @@ static void __init uncore_cpu_setup(void *dummy)
static int __init uncore_cpu_init(void)
{
- int ret, max_cores;
+ int ret;
- max_cores = boot_cpu_data.x86_max_cores;
switch (boot_cpu_data.x86_model) {
case 26: /* Nehalem */
case 30:
case 37: /* Westmere */
case 44:
- msr_uncores = nhm_msr_uncores;
+ nhm_uncore_cpu_init();
break;
case 42: /* Sandy Bridge */
case 58: /* Ivy Bridge */
- if (snb_uncore_cbox.num_boxes > max_cores)
- snb_uncore_cbox.num_boxes = max_cores;
- msr_uncores = snb_msr_uncores;
+ snb_uncore_cpu_init();
break;
case 45: /* Sandy Bridge-EP */
- if (snbep_uncore_cbox.num_boxes > max_cores)
- snbep_uncore_cbox.num_boxes = max_cores;
- msr_uncores = snbep_msr_uncores;
+ snbep_uncore_cpu_init();
break;
case 46: /* Nehalem-EX */
- uncore_nhmex = true;
case 47: /* Westmere-EX aka. Xeon E7 */
- if (!uncore_nhmex)
- nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
- if (nhmex_uncore_cbox.num_boxes > max_cores)
- nhmex_uncore_cbox.num_boxes = max_cores;
- msr_uncores = nhmex_msr_uncores;
+ nhmex_uncore_cpu_init();
break;
- case 62: /* IvyTown */
- if (ivt_uncore_cbox.num_boxes > max_cores)
- ivt_uncore_cbox.num_boxes = max_cores;
- msr_uncores = ivt_msr_uncores;
+ case 62: /* Ivy Bridge-EP */
+ ivbep_uncore_cpu_init();
+ break;
+ case 63: /* Haswell-EP */
+ hswep_uncore_cpu_init();
break;
-
default:
return 0;
}
- ret = uncore_types_init(msr_uncores);
+ ret = uncore_types_init(uncore_msr_uncores);
if (ret)
return ret;
@@ -4213,16 +1228,8 @@ static int __init uncore_pmus_register(void)
struct intel_uncore_type *type;
int i, j;
- for (i = 0; msr_uncores[i]; i++) {
- type = msr_uncores[i];
- for (j = 0; j < type->num_boxes; j++) {
- pmu = &type->pmus[j];
- uncore_pmu_register(pmu);
- }
- }
-
- for (i = 0; pci_uncores[i]; i++) {
- type = pci_uncores[i];
+ for (i = 0; uncore_msr_uncores[i]; i++) {
+ type = uncore_msr_uncores[i];
for (j = 0; j < type->num_boxes; j++) {
pmu = &type->pmus[j];
uncore_pmu_register(pmu);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 90236f0c94a9..18eb78bbdd10 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -24,395 +24,6 @@
#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
-/* SNB event control */
-#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
-#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
-#define SNB_UNC_CTL_EDGE_DET (1 << 18)
-#define SNB_UNC_CTL_EN (1 << 22)
-#define SNB_UNC_CTL_INVERT (1 << 23)
-#define SNB_UNC_CTL_CMASK_MASK 0x1f000000
-#define NHM_UNC_CTL_CMASK_MASK 0xff000000
-#define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
-
-#define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
- SNB_UNC_CTL_UMASK_MASK | \
- SNB_UNC_CTL_EDGE_DET | \
- SNB_UNC_CTL_INVERT | \
- SNB_UNC_CTL_CMASK_MASK)
-
-#define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
- SNB_UNC_CTL_UMASK_MASK | \
- SNB_UNC_CTL_EDGE_DET | \
- SNB_UNC_CTL_INVERT | \
- NHM_UNC_CTL_CMASK_MASK)
-
-/* SNB global control register */
-#define SNB_UNC_PERF_GLOBAL_CTL 0x391
-#define SNB_UNC_FIXED_CTR_CTRL 0x394
-#define SNB_UNC_FIXED_CTR 0x395
-
-/* SNB uncore global control */
-#define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
-#define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
-
-/* SNB Cbo register */
-#define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
-#define SNB_UNC_CBO_0_PER_CTR0 0x706
-#define SNB_UNC_CBO_MSR_OFFSET 0x10
-
-/* NHM global control register */
-#define NHM_UNC_PERF_GLOBAL_CTL 0x391
-#define NHM_UNC_FIXED_CTR 0x394
-#define NHM_UNC_FIXED_CTR_CTRL 0x395
-
-/* NHM uncore global control */
-#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
-#define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
-
-/* NHM uncore register */
-#define NHM_UNC_PERFEVTSEL0 0x3c0
-#define NHM_UNC_UNCORE_PMC0 0x3b0
-
-/* SNB-EP Box level control */
-#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
-#define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
-#define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
-#define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
-#define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
- SNBEP_PMON_BOX_CTL_RST_CTRS | \
- SNBEP_PMON_BOX_CTL_FRZ_EN)
-/* SNB-EP event control */
-#define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
-#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
-#define SNBEP_PMON_CTL_RST (1 << 17)
-#define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
-#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
-#define SNBEP_PMON_CTL_EN (1 << 22)
-#define SNBEP_PMON_CTL_INVERT (1 << 23)
-#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
-#define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
- SNBEP_PMON_CTL_UMASK_MASK | \
- SNBEP_PMON_CTL_EDGE_DET | \
- SNBEP_PMON_CTL_INVERT | \
- SNBEP_PMON_CTL_TRESH_MASK)
-
-/* SNB-EP Ubox event control */
-#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
-#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
- (SNBEP_PMON_CTL_EV_SEL_MASK | \
- SNBEP_PMON_CTL_UMASK_MASK | \
- SNBEP_PMON_CTL_EDGE_DET | \
- SNBEP_PMON_CTL_INVERT | \
- SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
-
-#define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
-#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
- SNBEP_CBO_PMON_CTL_TID_EN)
-
-/* SNB-EP PCU event control */
-#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
-#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
-#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
-#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
-#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
- (SNBEP_PMON_CTL_EV_SEL_MASK | \
- SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
- SNBEP_PMON_CTL_EDGE_DET | \
- SNBEP_PMON_CTL_EV_SEL_EXT | \
- SNBEP_PMON_CTL_INVERT | \
- SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
- SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
- SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
-
-#define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
- (SNBEP_PMON_RAW_EVENT_MASK | \
- SNBEP_PMON_CTL_EV_SEL_EXT)
-
-/* SNB-EP pci control register */
-#define SNBEP_PCI_PMON_BOX_CTL 0xf4
-#define SNBEP_PCI_PMON_CTL0 0xd8
-/* SNB-EP pci counter register */
-#define SNBEP_PCI_PMON_CTR0 0xa0
-
-/* SNB-EP home agent register */
-#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
-#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
-#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
-/* SNB-EP memory controller register */
-#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
-#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
-/* SNB-EP QPI register */
-#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
-#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
-#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
-#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
-
-/* SNB-EP Ubox register */
-#define SNBEP_U_MSR_PMON_CTR0 0xc16
-#define SNBEP_U_MSR_PMON_CTL0 0xc10
-
-#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
-#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
-
-/* SNB-EP Cbo register */
-#define SNBEP_C0_MSR_PMON_CTR0 0xd16
-#define SNBEP_C0_MSR_PMON_CTL0 0xd10
-#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
-#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
-#define SNBEP_CBO_MSR_OFFSET 0x20
-
-#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
-#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
-#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
-#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
-
-#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
- .event = (e), \
- .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
- .config_mask = (m), \
- .idx = (i) \
-}
-
-/* SNB-EP PCU register */
-#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
-#define SNBEP_PCU_MSR_PMON_CTL0 0xc30
-#define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
-#define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
-#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
-#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
-#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
-
-/* IVT event control */
-#define IVT_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
- SNBEP_PMON_BOX_CTL_RST_CTRS)
-#define IVT_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
- SNBEP_PMON_CTL_UMASK_MASK | \
- SNBEP_PMON_CTL_EDGE_DET | \
- SNBEP_PMON_CTL_TRESH_MASK)
-/* IVT Ubox */
-#define IVT_U_MSR_PMON_GLOBAL_CTL 0xc00
-#define IVT_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
-#define IVT_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
-
-#define IVT_U_MSR_PMON_RAW_EVENT_MASK \
- (SNBEP_PMON_CTL_EV_SEL_MASK | \
- SNBEP_PMON_CTL_UMASK_MASK | \
- SNBEP_PMON_CTL_EDGE_DET | \
- SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
-/* IVT Cbo */
-#define IVT_CBO_MSR_PMON_RAW_EVENT_MASK (IVT_PMON_RAW_EVENT_MASK | \
- SNBEP_CBO_PMON_CTL_TID_EN)
-
-#define IVT_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
-#define IVT_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
-#define IVT_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
-#define IVT_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
-#define IVT_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
-#define IVT_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
-#define IVT_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
-#define IVT_CB0_MSR_PMON_BOX_FILTER_IOSC (0x1ULL << 63)
-
-/* IVT home agent */
-#define IVT_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
-#define IVT_HA_PCI_PMON_RAW_EVENT_MASK \
- (IVT_PMON_RAW_EVENT_MASK | \
- IVT_HA_PCI_PMON_CTL_Q_OCC_RST)
-/* IVT PCU */
-#define IVT_PCU_MSR_PMON_RAW_EVENT_MASK \
- (SNBEP_PMON_CTL_EV_SEL_MASK | \
- SNBEP_PMON_CTL_EV_SEL_EXT | \
- SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
- SNBEP_PMON_CTL_EDGE_DET | \
- SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
- SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
- SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
-/* IVT QPI */
-#define IVT_QPI_PCI_PMON_RAW_EVENT_MASK \
- (IVT_PMON_RAW_EVENT_MASK | \
- SNBEP_PMON_CTL_EV_SEL_EXT)
-
-/* NHM-EX event control */
-#define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff
-#define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00
-#define NHMEX_PMON_CTL_EN_BIT0 (1 << 0)
-#define NHMEX_PMON_CTL_EDGE_DET (1 << 18)
-#define NHMEX_PMON_CTL_PMI_EN (1 << 20)
-#define NHMEX_PMON_CTL_EN_BIT22 (1 << 22)
-#define NHMEX_PMON_CTL_INVERT (1 << 23)
-#define NHMEX_PMON_CTL_TRESH_MASK 0xff000000
-#define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \
- NHMEX_PMON_CTL_UMASK_MASK | \
- NHMEX_PMON_CTL_EDGE_DET | \
- NHMEX_PMON_CTL_INVERT | \
- NHMEX_PMON_CTL_TRESH_MASK)
-
-/* NHM-EX Ubox */
-#define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00
-#define NHMEX_U_MSR_PMON_CTR 0xc11
-#define NHMEX_U_MSR_PMON_EV_SEL 0xc10
-
-#define NHMEX_U_PMON_GLOBAL_EN (1 << 0)
-#define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e
-#define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28)
-#define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29)
-#define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
-
-#define NHMEX_U_PMON_RAW_EVENT_MASK \
- (NHMEX_PMON_CTL_EV_SEL_MASK | \
- NHMEX_PMON_CTL_EDGE_DET)
-
-/* NHM-EX Cbox */
-#define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00
-#define NHMEX_C0_MSR_PMON_CTR0 0xd11
-#define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10
-#define NHMEX_C_MSR_OFFSET 0x20
-
-/* NHM-EX Bbox */
-#define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20
-#define NHMEX_B0_MSR_PMON_CTR0 0xc31
-#define NHMEX_B0_MSR_PMON_CTL0 0xc30
-#define NHMEX_B_MSR_OFFSET 0x40
-#define NHMEX_B0_MSR_MATCH 0xe45
-#define NHMEX_B0_MSR_MASK 0xe46
-#define NHMEX_B1_MSR_MATCH 0xe4d
-#define NHMEX_B1_MSR_MASK 0xe4e
-
-#define NHMEX_B_PMON_CTL_EN (1 << 0)
-#define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1
-#define NHMEX_B_PMON_CTL_EV_SEL_MASK \
- (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT)
-#define NHMEX_B_PMON_CTR_SHIFT 6
-#define NHMEX_B_PMON_CTR_MASK \
- (0x3 << NHMEX_B_PMON_CTR_SHIFT)
-#define NHMEX_B_PMON_RAW_EVENT_MASK \
- (NHMEX_B_PMON_CTL_EV_SEL_MASK | \
- NHMEX_B_PMON_CTR_MASK)
-
-/* NHM-EX Sbox */
-#define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40
-#define NHMEX_S0_MSR_PMON_CTR0 0xc51
-#define NHMEX_S0_MSR_PMON_CTL0 0xc50
-#define NHMEX_S_MSR_OFFSET 0x80
-#define NHMEX_S0_MSR_MM_CFG 0xe48
-#define NHMEX_S0_MSR_MATCH 0xe49
-#define NHMEX_S0_MSR_MASK 0xe4a
-#define NHMEX_S1_MSR_MM_CFG 0xe58
-#define NHMEX_S1_MSR_MATCH 0xe59
-#define NHMEX_S1_MSR_MASK 0xe5a
-
-#define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63)
-#define NHMEX_S_EVENT_TO_R_PROG_EV 0
-
-/* NHM-EX Mbox */
-#define NHMEX_M0_MSR_GLOBAL_CTL 0xca0
-#define NHMEX_M0_MSR_PMU_DSP 0xca5
-#define NHMEX_M0_MSR_PMU_ISS 0xca6
-#define NHMEX_M0_MSR_PMU_MAP 0xca7
-#define NHMEX_M0_MSR_PMU_MSC_THR 0xca8
-#define NHMEX_M0_MSR_PMU_PGT 0xca9
-#define NHMEX_M0_MSR_PMU_PLD 0xcaa
-#define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab
-#define NHMEX_M0_MSR_PMU_CTL0 0xcb0
-#define NHMEX_M0_MSR_PMU_CNT0 0xcb1
-#define NHMEX_M_MSR_OFFSET 0x40
-#define NHMEX_M0_MSR_PMU_MM_CFG 0xe54
-#define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c
-
-#define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63)
-#define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL
-#define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL
-#define NHMEX_M_PMON_ADDR_MASK_SHIFT 34
-
-#define NHMEX_M_PMON_CTL_EN (1 << 0)
-#define NHMEX_M_PMON_CTL_PMI_EN (1 << 1)
-#define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2
-#define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \
- (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT)
-#define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4
-#define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \
- (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT)
-#define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6)
-#define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7)
-#define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9
-#define NHMEX_M_PMON_CTL_INC_SEL_MASK \
- (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
-#define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19
-#define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \
- (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT)
-#define NHMEX_M_PMON_RAW_EVENT_MASK \
- (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \
- NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \
- NHMEX_M_PMON_CTL_WRAP_MODE | \
- NHMEX_M_PMON_CTL_FLAG_MODE | \
- NHMEX_M_PMON_CTL_INC_SEL_MASK | \
- NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK)
-
-#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23))
-#define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (11 + 3 * (n)))
-
-#define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24))
-#define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (12 + 3 * (n)))
-
-/*
- * use the 9~13 bits to select event If the 7th bit is not set,
- * otherwise use the 19~21 bits to select event.
- */
-#define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
-#define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \
- NHMEX_M_PMON_CTL_FLAG_MODE)
-#define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \
- NHMEX_M_PMON_CTL_FLAG_MODE)
-#define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \
- NHMEX_M_PMON_CTL_FLAG_MODE)
-#define MBOX_INC_SEL_EXTAR_REG(c, r) \
- EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \
- MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r)
-#define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \
- EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \
- MBOX_SET_FLAG_SEL_MASK, \
- (u64)-1, NHMEX_M_##r)
-
-/* NHM-EX Rbox */
-#define NHMEX_R_MSR_GLOBAL_CTL 0xe00
-#define NHMEX_R_MSR_PMON_CTL0 0xe10
-#define NHMEX_R_MSR_PMON_CNT0 0xe11
-#define NHMEX_R_MSR_OFFSET 0x20
-
-#define NHMEX_R_MSR_PORTN_QLX_CFG(n) \
- ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4))
-#define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n))
-#define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n))
-#define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \
- (((n) < 4 ? 0 : 0x10) + (n) * 4)
-#define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \
- (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
-#define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \
- (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1)
-#define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \
- (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2)
-#define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \
- (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
-#define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \
- (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1)
-#define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \
- (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2)
-
-#define NHMEX_R_PMON_CTL_EN (1 << 0)
-#define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1
-#define NHMEX_R_PMON_CTL_EV_SEL_MASK \
- (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT)
-#define NHMEX_R_PMON_CTL_PMI_EN (1 << 6)
-#define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK
-
-/* NHM-EX Wbox */
-#define NHMEX_W_MSR_GLOBAL_CTL 0xc80
-#define NHMEX_W_MSR_PMON_CNT0 0xc90
-#define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91
-#define NHMEX_W_MSR_PMON_FIXED_CTR 0x394
-#define NHMEX_W_MSR_PMON_FIXED_CTL 0x395
-
-#define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31)
-
struct intel_uncore_ops;
struct intel_uncore_pmu;
struct intel_uncore_box;
@@ -505,6 +116,9 @@ struct uncore_event_desc {
const char *config;
};
+ssize_t uncore_event_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf);
+
#define INTEL_UNCORE_EVENT_DESC(_name, _config) \
{ \
.attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
@@ -522,15 +136,6 @@ static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
static struct kobj_attribute format_attr_##_var = \
__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
-
-static ssize_t uncore_event_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct uncore_event_desc *event =
- container_of(attr, struct uncore_event_desc, attr);
- return sprintf(buf, "%s", event->config);
-}
-
static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
{
return box->pmu->type->box_ctl;
@@ -694,3 +299,41 @@ static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
{
return (box->phys_id < 0);
}
+
+struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event);
+struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
+struct intel_uncore_box *uncore_event_to_box(struct perf_event *event);
+u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
+void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
+void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
+void uncore_pmu_event_read(struct perf_event *event);
+void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
+struct event_constraint *
+uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
+void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
+u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
+
+extern struct intel_uncore_type **uncore_msr_uncores;
+extern struct intel_uncore_type **uncore_pci_uncores;
+extern struct pci_driver *uncore_pci_driver;
+extern int uncore_pcibus_to_physid[256];
+extern struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
+extern struct event_constraint uncore_constraint_empty;
+
+/* perf_event_intel_uncore_snb.c */
+int snb_uncore_pci_init(void);
+int ivb_uncore_pci_init(void);
+int hsw_uncore_pci_init(void);
+void snb_uncore_cpu_init(void);
+void nhm_uncore_cpu_init(void);
+
+/* perf_event_intel_uncore_snbep.c */
+int snbep_uncore_pci_init(void);
+void snbep_uncore_cpu_init(void);
+int ivbep_uncore_pci_init(void);
+void ivbep_uncore_cpu_init(void);
+int hswep_uncore_pci_init(void);
+void hswep_uncore_cpu_init(void);
+
+/* perf_event_intel_uncore_nhmex.c */
+void nhmex_uncore_cpu_init(void);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c
new file mode 100644
index 000000000000..2749965afed0
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c
@@ -0,0 +1,1221 @@
+/* Nehalem-EX/Westmere-EX uncore support */
+#include "perf_event_intel_uncore.h"
+
+/* NHM-EX event control */
+#define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff
+#define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00
+#define NHMEX_PMON_CTL_EN_BIT0 (1 << 0)
+#define NHMEX_PMON_CTL_EDGE_DET (1 << 18)
+#define NHMEX_PMON_CTL_PMI_EN (1 << 20)
+#define NHMEX_PMON_CTL_EN_BIT22 (1 << 22)
+#define NHMEX_PMON_CTL_INVERT (1 << 23)
+#define NHMEX_PMON_CTL_TRESH_MASK 0xff000000
+#define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \
+ NHMEX_PMON_CTL_UMASK_MASK | \
+ NHMEX_PMON_CTL_EDGE_DET | \
+ NHMEX_PMON_CTL_INVERT | \
+ NHMEX_PMON_CTL_TRESH_MASK)
+
+/* NHM-EX Ubox */
+#define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00
+#define NHMEX_U_MSR_PMON_CTR 0xc11
+#define NHMEX_U_MSR_PMON_EV_SEL 0xc10
+
+#define NHMEX_U_PMON_GLOBAL_EN (1 << 0)
+#define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e
+#define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28)
+#define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29)
+#define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
+
+#define NHMEX_U_PMON_RAW_EVENT_MASK \
+ (NHMEX_PMON_CTL_EV_SEL_MASK | \
+ NHMEX_PMON_CTL_EDGE_DET)
+
+/* NHM-EX Cbox */
+#define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00
+#define NHMEX_C0_MSR_PMON_CTR0 0xd11
+#define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10
+#define NHMEX_C_MSR_OFFSET 0x20
+
+/* NHM-EX Bbox */
+#define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20
+#define NHMEX_B0_MSR_PMON_CTR0 0xc31
+#define NHMEX_B0_MSR_PMON_CTL0 0xc30
+#define NHMEX_B_MSR_OFFSET 0x40
+#define NHMEX_B0_MSR_MATCH 0xe45
+#define NHMEX_B0_MSR_MASK 0xe46
+#define NHMEX_B1_MSR_MATCH 0xe4d
+#define NHMEX_B1_MSR_MASK 0xe4e
+
+#define NHMEX_B_PMON_CTL_EN (1 << 0)
+#define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1
+#define NHMEX_B_PMON_CTL_EV_SEL_MASK \
+ (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT)
+#define NHMEX_B_PMON_CTR_SHIFT 6
+#define NHMEX_B_PMON_CTR_MASK \
+ (0x3 << NHMEX_B_PMON_CTR_SHIFT)
+#define NHMEX_B_PMON_RAW_EVENT_MASK \
+ (NHMEX_B_PMON_CTL_EV_SEL_MASK | \
+ NHMEX_B_PMON_CTR_MASK)
+
+/* NHM-EX Sbox */
+#define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40
+#define NHMEX_S0_MSR_PMON_CTR0 0xc51
+#define NHMEX_S0_MSR_PMON_CTL0 0xc50
+#define NHMEX_S_MSR_OFFSET 0x80
+#define NHMEX_S0_MSR_MM_CFG 0xe48
+#define NHMEX_S0_MSR_MATCH 0xe49
+#define NHMEX_S0_MSR_MASK 0xe4a
+#define NHMEX_S1_MSR_MM_CFG 0xe58
+#define NHMEX_S1_MSR_MATCH 0xe59
+#define NHMEX_S1_MSR_MASK 0xe5a
+
+#define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63)
+#define NHMEX_S_EVENT_TO_R_PROG_EV 0
+
+/* NHM-EX Mbox */
+#define NHMEX_M0_MSR_GLOBAL_CTL 0xca0
+#define NHMEX_M0_MSR_PMU_DSP 0xca5
+#define NHMEX_M0_MSR_PMU_ISS 0xca6
+#define NHMEX_M0_MSR_PMU_MAP 0xca7
+#define NHMEX_M0_MSR_PMU_MSC_THR 0xca8
+#define NHMEX_M0_MSR_PMU_PGT 0xca9
+#define NHMEX_M0_MSR_PMU_PLD 0xcaa
+#define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab
+#define NHMEX_M0_MSR_PMU_CTL0 0xcb0
+#define NHMEX_M0_MSR_PMU_CNT0 0xcb1
+#define NHMEX_M_MSR_OFFSET 0x40
+#define NHMEX_M0_MSR_PMU_MM_CFG 0xe54
+#define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c
+
+#define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63)
+#define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL
+#define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL
+#define NHMEX_M_PMON_ADDR_MASK_SHIFT 34
+
+#define NHMEX_M_PMON_CTL_EN (1 << 0)
+#define NHMEX_M_PMON_CTL_PMI_EN (1 << 1)
+#define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2
+#define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \
+ (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT)
+#define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4
+#define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \
+ (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT)
+#define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6)
+#define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7)
+#define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9
+#define NHMEX_M_PMON_CTL_INC_SEL_MASK \
+ (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
+#define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19
+#define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \
+ (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT)
+#define NHMEX_M_PMON_RAW_EVENT_MASK \
+ (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \
+ NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \
+ NHMEX_M_PMON_CTL_WRAP_MODE | \
+ NHMEX_M_PMON_CTL_FLAG_MODE | \
+ NHMEX_M_PMON_CTL_INC_SEL_MASK | \
+ NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK)
+
+#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23))
+#define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (11 + 3 * (n)))
+
+#define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24))
+#define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (12 + 3 * (n)))
+
+/*
+ * use the 9~13 bits to select event If the 7th bit is not set,
+ * otherwise use the 19~21 bits to select event.
+ */
+#define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
+#define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \
+ NHMEX_M_PMON_CTL_FLAG_MODE)
+#define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \
+ NHMEX_M_PMON_CTL_FLAG_MODE)
+#define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \
+ NHMEX_M_PMON_CTL_FLAG_MODE)
+#define MBOX_INC_SEL_EXTAR_REG(c, r) \
+ EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \
+ MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r)
+#define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \
+ EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \
+ MBOX_SET_FLAG_SEL_MASK, \
+ (u64)-1, NHMEX_M_##r)
+
+/* NHM-EX Rbox */
+#define NHMEX_R_MSR_GLOBAL_CTL 0xe00
+#define NHMEX_R_MSR_PMON_CTL0 0xe10
+#define NHMEX_R_MSR_PMON_CNT0 0xe11
+#define NHMEX_R_MSR_OFFSET 0x20
+
+#define NHMEX_R_MSR_PORTN_QLX_CFG(n) \
+ ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4))
+#define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n))
+#define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n))
+#define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \
+ (((n) < 4 ? 0 : 0x10) + (n) * 4)
+#define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \
+ (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
+#define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \
+ (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1)
+#define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \
+ (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2)
+#define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \
+ (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
+#define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \
+ (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1)
+#define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \
+ (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2)
+
+#define NHMEX_R_PMON_CTL_EN (1 << 0)
+#define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1
+#define NHMEX_R_PMON_CTL_EV_SEL_MASK \
+ (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT)
+#define NHMEX_R_PMON_CTL_PMI_EN (1 << 6)
+#define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK
+
+/* NHM-EX Wbox */
+#define NHMEX_W_MSR_GLOBAL_CTL 0xc80
+#define NHMEX_W_MSR_PMON_CNT0 0xc90
+#define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91
+#define NHMEX_W_MSR_PMON_FIXED_CTR 0x394
+#define NHMEX_W_MSR_PMON_FIXED_CTL 0x395
+
+#define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31)
+
+#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
+ ((1ULL << (n)) - 1)))
+
+DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
+DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
+DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
+DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
+DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
+DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
+DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
+DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
+
+static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+ wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
+}
+
+static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
+{
+ unsigned msr = uncore_msr_box_ctl(box);
+ u64 config;
+
+ if (msr) {
+ rdmsrl(msr, config);
+ config &= ~((1ULL << uncore_num_counters(box)) - 1);
+ /* WBox has a fixed counter */
+ if (uncore_msr_fixed_ctl(box))
+ config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
+ wrmsrl(msr, config);
+ }
+}
+
+static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+ unsigned msr = uncore_msr_box_ctl(box);
+ u64 config;
+
+ if (msr) {
+ rdmsrl(msr, config);
+ config |= (1ULL << uncore_num_counters(box)) - 1;
+ /* WBox has a fixed counter */
+ if (uncore_msr_fixed_ctl(box))
+ config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
+ wrmsrl(msr, config);
+ }
+}
+
+static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ wrmsrl(event->hw.config_base, 0);
+}
+
+static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
+ wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
+ else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
+ wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
+ else
+ wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
+}
+
+#define NHMEX_UNCORE_OPS_COMMON_INIT() \
+ .init_box = nhmex_uncore_msr_init_box, \
+ .disable_box = nhmex_uncore_msr_disable_box, \
+ .enable_box = nhmex_uncore_msr_enable_box, \
+ .disable_event = nhmex_uncore_msr_disable_event, \
+ .read_counter = uncore_msr_read_counter
+
+static struct intel_uncore_ops nhmex_uncore_ops = {
+ NHMEX_UNCORE_OPS_COMMON_INIT(),
+ .enable_event = nhmex_uncore_msr_enable_event,
+};
+
+static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_edge.attr,
+ NULL,
+};
+
+static struct attribute_group nhmex_uncore_ubox_format_group = {
+ .name = "format",
+ .attrs = nhmex_uncore_ubox_formats_attr,
+};
+
+static struct intel_uncore_type nhmex_uncore_ubox = {
+ .name = "ubox",
+ .num_counters = 1,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .event_ctl = NHMEX_U_MSR_PMON_EV_SEL,
+ .perf_ctr = NHMEX_U_MSR_PMON_CTR,
+ .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK,
+ .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL,
+ .ops = &nhmex_uncore_ops,
+ .format_group = &nhmex_uncore_ubox_format_group
+};
+
+static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ NULL,
+};
+
+static struct attribute_group nhmex_uncore_cbox_format_group = {
+ .name = "format",
+ .attrs = nhmex_uncore_cbox_formats_attr,
+};
+
+/* msr offset for each instance of cbox */
+static unsigned nhmex_cbox_msr_offsets[] = {
+ 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
+};
+
+static struct intel_uncore_type nhmex_uncore_cbox = {
+ .name = "cbox",
+ .num_counters = 6,
+ .num_boxes = 10,
+ .perf_ctr_bits = 48,
+ .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
+ .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
+ .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
+ .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
+ .msr_offsets = nhmex_cbox_msr_offsets,
+ .pair_ctr_ctl = 1,
+ .ops = &nhmex_uncore_ops,
+ .format_group = &nhmex_uncore_cbox_format_group
+};
+
+static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
+ { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_type nhmex_uncore_wbox = {
+ .name = "wbox",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .event_ctl = NHMEX_W_MSR_PMON_CNT0,
+ .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0,
+ .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR,
+ .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL,
+ .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
+ .box_ctl = NHMEX_W_MSR_GLOBAL_CTL,
+ .pair_ctr_ctl = 1,
+ .event_descs = nhmex_uncore_wbox_events,
+ .ops = &nhmex_uncore_ops,
+ .format_group = &nhmex_uncore_cbox_format_group
+};
+
+static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+ int ctr, ev_sel;
+
+ ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
+ NHMEX_B_PMON_CTR_SHIFT;
+ ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
+ NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
+
+ /* events that do not use the match/mask registers */
+ if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
+ (ctr == 2 && ev_sel != 0x4) || ctr == 3)
+ return 0;
+
+ if (box->pmu->pmu_idx == 0)
+ reg1->reg = NHMEX_B0_MSR_MATCH;
+ else
+ reg1->reg = NHMEX_B1_MSR_MATCH;
+ reg1->idx = 0;
+ reg1->config = event->attr.config1;
+ reg2->config = event->attr.config2;
+ return 0;
+}
+
+static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+
+ if (reg1->idx != EXTRA_REG_NONE) {
+ wrmsrl(reg1->reg, reg1->config);
+ wrmsrl(reg1->reg + 1, reg2->config);
+ }
+ wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
+ (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
+}
+
+/*
+ * The Bbox has 4 counters, but each counter monitors different events.
+ * Use bits 6-7 in the event config to select counter.
+ */
+static struct event_constraint nhmex_uncore_bbox_constraints[] = {
+ EVENT_CONSTRAINT(0 , 1, 0xc0),
+ EVENT_CONSTRAINT(0x40, 2, 0xc0),
+ EVENT_CONSTRAINT(0x80, 4, 0xc0),
+ EVENT_CONSTRAINT(0xc0, 8, 0xc0),
+ EVENT_CONSTRAINT_END,
+};
+
+static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
+ &format_attr_event5.attr,
+ &format_attr_counter.attr,
+ &format_attr_match.attr,
+ &format_attr_mask.attr,
+ NULL,
+};
+
+static struct attribute_group nhmex_uncore_bbox_format_group = {
+ .name = "format",
+ .attrs = nhmex_uncore_bbox_formats_attr,
+};
+
+static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
+ NHMEX_UNCORE_OPS_COMMON_INIT(),
+ .enable_event = nhmex_bbox_msr_enable_event,
+ .hw_config = nhmex_bbox_hw_config,
+ .get_constraint = uncore_get_constraint,
+ .put_constraint = uncore_put_constraint,
+};
+
+static struct intel_uncore_type nhmex_uncore_bbox = {
+ .name = "bbox",
+ .num_counters = 4,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ .event_ctl = NHMEX_B0_MSR_PMON_CTL0,
+ .perf_ctr = NHMEX_B0_MSR_PMON_CTR0,
+ .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK,
+ .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
+ .msr_offset = NHMEX_B_MSR_OFFSET,
+ .pair_ctr_ctl = 1,
+ .num_shared_regs = 1,
+ .constraints = nhmex_uncore_bbox_constraints,
+ .ops = &nhmex_uncore_bbox_ops,
+ .format_group = &nhmex_uncore_bbox_format_group
+};
+
+static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+
+ /* only TO_R_PROG_EV event uses the match/mask register */
+ if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
+ NHMEX_S_EVENT_TO_R_PROG_EV)
+ return 0;
+
+ if (box->pmu->pmu_idx == 0)
+ reg1->reg = NHMEX_S0_MSR_MM_CFG;
+ else
+ reg1->reg = NHMEX_S1_MSR_MM_CFG;
+ reg1->idx = 0;
+ reg1->config = event->attr.config1;
+ reg2->config = event->attr.config2;
+ return 0;
+}
+
+static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+
+ if (reg1->idx != EXTRA_REG_NONE) {
+ wrmsrl(reg1->reg, 0);
+ wrmsrl(reg1->reg + 1, reg1->config);
+ wrmsrl(reg1->reg + 2, reg2->config);
+ wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
+ }
+ wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
+}
+
+static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ &format_attr_match.attr,
+ &format_attr_mask.attr,
+ NULL,
+};
+
+static struct attribute_group nhmex_uncore_sbox_format_group = {
+ .name = "format",
+ .attrs = nhmex_uncore_sbox_formats_attr,
+};
+
+static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
+ NHMEX_UNCORE_OPS_COMMON_INIT(),
+ .enable_event = nhmex_sbox_msr_enable_event,
+ .hw_config = nhmex_sbox_hw_config,
+ .get_constraint = uncore_get_constraint,
+ .put_constraint = uncore_put_constraint,
+};
+
+static struct intel_uncore_type nhmex_uncore_sbox = {
+ .name = "sbox",
+ .num_counters = 4,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ .event_ctl = NHMEX_S0_MSR_PMON_CTL0,
+ .perf_ctr = NHMEX_S0_MSR_PMON_CTR0,
+ .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
+ .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
+ .msr_offset = NHMEX_S_MSR_OFFSET,
+ .pair_ctr_ctl = 1,
+ .num_shared_regs = 1,
+ .ops = &nhmex_uncore_sbox_ops,
+ .format_group = &nhmex_uncore_sbox_format_group
+};
+
+enum {
+ EXTRA_REG_NHMEX_M_FILTER,
+ EXTRA_REG_NHMEX_M_DSP,
+ EXTRA_REG_NHMEX_M_ISS,
+ EXTRA_REG_NHMEX_M_MAP,
+ EXTRA_REG_NHMEX_M_MSC_THR,
+ EXTRA_REG_NHMEX_M_PGT,
+ EXTRA_REG_NHMEX_M_PLD,
+ EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
+};
+
+static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
+ MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
+ MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
+ MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
+ MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
+ /* event 0xa uses two extra registers */
+ MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
+ MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
+ MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
+ /* events 0xd ~ 0x10 use the same extra register */
+ MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
+ MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
+ MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
+ MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
+ MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
+ MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
+ MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
+ MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
+ MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
+ EVENT_EXTRA_END
+};
+
+/* Nehalem-EX or Westmere-EX ? */
+static bool uncore_nhmex;
+
+static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
+{
+ struct intel_uncore_extra_reg *er;
+ unsigned long flags;
+ bool ret = false;
+ u64 mask;
+
+ if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
+ er = &box->shared_regs[idx];
+ raw_spin_lock_irqsave(&er->lock, flags);
+ if (!atomic_read(&er->ref) || er->config == config) {
+ atomic_inc(&er->ref);
+ er->config = config;
+ ret = true;
+ }
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+
+ return ret;
+ }
+ /*
+ * The ZDP_CTL_FVC MSR has 4 fields which are used to control
+ * events 0xd ~ 0x10. Besides these 4 fields, there are additional
+ * fields which are shared.
+ */
+ idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
+ if (WARN_ON_ONCE(idx >= 4))
+ return false;
+
+ /* mask of the shared fields */
+ if (uncore_nhmex)
+ mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
+ else
+ mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
+ er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
+
+ raw_spin_lock_irqsave(&er->lock, flags);
+ /* add mask of the non-shared field if it's in use */
+ if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
+ if (uncore_nhmex)
+ mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ else
+ mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ }
+
+ if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
+ atomic_add(1 << (idx * 8), &er->ref);
+ if (uncore_nhmex)
+ mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
+ NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ else
+ mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
+ WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ er->config &= ~mask;
+ er->config |= (config & mask);
+ ret = true;
+ }
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+
+ return ret;
+}
+
+static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
+{
+ struct intel_uncore_extra_reg *er;
+
+ if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
+ er = &box->shared_regs[idx];
+ atomic_dec(&er->ref);
+ return;
+ }
+
+ idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
+ er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
+ atomic_sub(1 << (idx * 8), &er->ref);
+}
+
+static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
+ u64 config = reg1->config;
+
+ /* get the non-shared control bits and shift them */
+ idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
+ if (uncore_nhmex)
+ config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ else
+ config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ if (new_idx > orig_idx) {
+ idx = new_idx - orig_idx;
+ config <<= 3 * idx;
+ } else {
+ idx = orig_idx - new_idx;
+ config >>= 3 * idx;
+ }
+
+ /* add the shared control bits back */
+ if (uncore_nhmex)
+ config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
+ else
+ config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
+ config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
+ if (modify) {
+ /* adjust the main event selector */
+ if (new_idx > orig_idx)
+ hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
+ else
+ hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
+ reg1->config = config;
+ reg1->idx = ~0xff | new_idx;
+ }
+ return config;
+}
+
+static struct event_constraint *
+nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
+ int i, idx[2], alloc = 0;
+ u64 config1 = reg1->config;
+
+ idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
+ idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
+again:
+ for (i = 0; i < 2; i++) {
+ if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
+ idx[i] = 0xff;
+
+ if (idx[i] == 0xff)
+ continue;
+
+ if (!nhmex_mbox_get_shared_reg(box, idx[i],
+ __BITS_VALUE(config1, i, 32)))
+ goto fail;
+ alloc |= (0x1 << i);
+ }
+
+ /* for the match/mask registers */
+ if (reg2->idx != EXTRA_REG_NONE &&
+ (uncore_box_is_fake(box) || !reg2->alloc) &&
+ !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
+ goto fail;
+
+ /*
+ * If it's a fake box -- as per validate_{group,event}() we
+ * shouldn't touch event state and we can avoid doing so
+ * since both will only call get_event_constraints() once
+ * on each event, this avoids the need for reg->alloc.
+ */
+ if (!uncore_box_is_fake(box)) {
+ if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
+ nhmex_mbox_alter_er(event, idx[0], true);
+ reg1->alloc |= alloc;
+ if (reg2->idx != EXTRA_REG_NONE)
+ reg2->alloc = 1;
+ }
+ return NULL;
+fail:
+ if (idx[0] != 0xff && !(alloc & 0x1) &&
+ idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
+ /*
+ * events 0xd ~ 0x10 are functional identical, but are
+ * controlled by different fields in the ZDP_CTL_FVC
+ * register. If we failed to take one field, try the
+ * rest 3 choices.
+ */
+ BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
+ idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
+ idx[0] = (idx[0] + 1) % 4;
+ idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
+ if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
+ config1 = nhmex_mbox_alter_er(event, idx[0], false);
+ goto again;
+ }
+ }
+
+ if (alloc & 0x1)
+ nhmex_mbox_put_shared_reg(box, idx[0]);
+ if (alloc & 0x2)
+ nhmex_mbox_put_shared_reg(box, idx[1]);
+ return &uncore_constraint_empty;
+}
+
+static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
+
+ if (uncore_box_is_fake(box))
+ return;
+
+ if (reg1->alloc & 0x1)
+ nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
+ if (reg1->alloc & 0x2)
+ nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
+ reg1->alloc = 0;
+
+ if (reg2->alloc) {
+ nhmex_mbox_put_shared_reg(box, reg2->idx);
+ reg2->alloc = 0;
+ }
+}
+
+static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
+{
+ if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
+ return er->idx;
+ return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
+}
+
+static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct intel_uncore_type *type = box->pmu->type;
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
+ struct extra_reg *er;
+ unsigned msr;
+ int reg_idx = 0;
+ /*
+ * The mbox events may require 2 extra MSRs at the most. But only
+ * the lower 32 bits in these MSRs are significant, so we can use
+ * config1 to pass two MSRs' config.
+ */
+ for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
+ if (er->event != (event->hw.config & er->config_mask))
+ continue;
+ if (event->attr.config1 & ~er->valid_mask)
+ return -EINVAL;
+
+ msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
+ if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
+ return -EINVAL;
+
+ /* always use the 32~63 bits to pass the PLD config */
+ if (er->idx == EXTRA_REG_NHMEX_M_PLD)
+ reg_idx = 1;
+ else if (WARN_ON_ONCE(reg_idx > 0))
+ return -EINVAL;
+
+ reg1->idx &= ~(0xff << (reg_idx * 8));
+ reg1->reg &= ~(0xffff << (reg_idx * 16));
+ reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
+ reg1->reg |= msr << (reg_idx * 16);
+ reg1->config = event->attr.config1;
+ reg_idx++;
+ }
+ /*
+ * The mbox only provides ability to perform address matching
+ * for the PLD events.
+ */
+ if (reg_idx == 2) {
+ reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
+ if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
+ reg2->config = event->attr.config2;
+ else
+ reg2->config = ~0ULL;
+ if (box->pmu->pmu_idx == 0)
+ reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
+ else
+ reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
+ }
+ return 0;
+}
+
+static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
+{
+ struct intel_uncore_extra_reg *er;
+ unsigned long flags;
+ u64 config;
+
+ if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
+ return box->shared_regs[idx].config;
+
+ er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
+ raw_spin_lock_irqsave(&er->lock, flags);
+ config = er->config;
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+ return config;
+}
+
+static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+ int idx;
+
+ idx = __BITS_VALUE(reg1->idx, 0, 8);
+ if (idx != 0xff)
+ wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
+ nhmex_mbox_shared_reg_config(box, idx));
+ idx = __BITS_VALUE(reg1->idx, 1, 8);
+ if (idx != 0xff)
+ wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
+ nhmex_mbox_shared_reg_config(box, idx));
+
+ if (reg2->idx != EXTRA_REG_NONE) {
+ wrmsrl(reg2->reg, 0);
+ if (reg2->config != ~0ULL) {
+ wrmsrl(reg2->reg + 1,
+ reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
+ wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
+ (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
+ wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
+ }
+ }
+
+ wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
+}
+
+DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
+DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
+DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
+DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
+DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
+DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
+DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63");
+DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
+DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
+DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
+
+static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
+ &format_attr_count_mode.attr,
+ &format_attr_storage_mode.attr,
+ &format_attr_wrap_mode.attr,
+ &format_attr_flag_mode.attr,
+ &format_attr_inc_sel.attr,
+ &format_attr_set_flag_sel.attr,
+ &format_attr_filter_cfg_en.attr,
+ &format_attr_filter_match.attr,
+ &format_attr_filter_mask.attr,
+ &format_attr_dsp.attr,
+ &format_attr_thr.attr,
+ &format_attr_fvc.attr,
+ &format_attr_pgt.attr,
+ &format_attr_map.attr,
+ &format_attr_iss.attr,
+ &format_attr_pld.attr,
+ NULL,
+};
+
+static struct attribute_group nhmex_uncore_mbox_format_group = {
+ .name = "format",
+ .attrs = nhmex_uncore_mbox_formats_attr,
+};
+
+static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
+ INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
+ INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
+ { /* end: all zeroes */ },
+};
+
+static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
+ INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
+ INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
+ { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
+ NHMEX_UNCORE_OPS_COMMON_INIT(),
+ .enable_event = nhmex_mbox_msr_enable_event,
+ .hw_config = nhmex_mbox_hw_config,
+ .get_constraint = nhmex_mbox_get_constraint,
+ .put_constraint = nhmex_mbox_put_constraint,
+};
+
+static struct intel_uncore_type nhmex_uncore_mbox = {
+ .name = "mbox",
+ .num_counters = 6,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ .event_ctl = NHMEX_M0_MSR_PMU_CTL0,
+ .perf_ctr = NHMEX_M0_MSR_PMU_CNT0,
+ .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK,
+ .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL,
+ .msr_offset = NHMEX_M_MSR_OFFSET,
+ .pair_ctr_ctl = 1,
+ .num_shared_regs = 8,
+ .event_descs = nhmex_uncore_mbox_events,
+ .ops = &nhmex_uncore_mbox_ops,
+ .format_group = &nhmex_uncore_mbox_format_group,
+};
+
+static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+ /* adjust the main event selector and extra register index */
+ if (reg1->idx % 2) {
+ reg1->idx--;
+ hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
+ } else {
+ reg1->idx++;
+ hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
+ }
+
+ /* adjust extra register config */
+ switch (reg1->idx % 6) {
+ case 2:
+ /* shift the 8~15 bits to the 0~7 bits */
+ reg1->config >>= 8;
+ break;
+ case 3:
+ /* shift the 0~7 bits to the 8~15 bits */
+ reg1->config <<= 8;
+ break;
+ }
+}
+
+/*
+ * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
+ * An event set consists of 6 events, the 3rd and 4th events in
+ * an event set use the same extra register. So an event set uses
+ * 5 extra registers.
+ */
+static struct event_constraint *
+nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+ struct intel_uncore_extra_reg *er;
+ unsigned long flags;
+ int idx, er_idx;
+ u64 config1;
+ bool ok = false;
+
+ if (!uncore_box_is_fake(box) && reg1->alloc)
+ return NULL;
+
+ idx = reg1->idx % 6;
+ config1 = reg1->config;
+again:
+ er_idx = idx;
+ /* the 3rd and 4th events use the same extra register */
+ if (er_idx > 2)
+ er_idx--;
+ er_idx += (reg1->idx / 6) * 5;
+
+ er = &box->shared_regs[er_idx];
+ raw_spin_lock_irqsave(&er->lock, flags);
+ if (idx < 2) {
+ if (!atomic_read(&er->ref) || er->config == reg1->config) {
+ atomic_inc(&er->ref);
+ er->config = reg1->config;
+ ok = true;
+ }
+ } else if (idx == 2 || idx == 3) {
+ /*
+ * these two events use different fields in a extra register,
+ * the 0~7 bits and the 8~15 bits respectively.
+ */
+ u64 mask = 0xff << ((idx - 2) * 8);
+ if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
+ !((er->config ^ config1) & mask)) {
+ atomic_add(1 << ((idx - 2) * 8), &er->ref);
+ er->config &= ~mask;
+ er->config |= config1 & mask;
+ ok = true;
+ }
+ } else {
+ if (!atomic_read(&er->ref) ||
+ (er->config == (hwc->config >> 32) &&
+ er->config1 == reg1->config &&
+ er->config2 == reg2->config)) {
+ atomic_inc(&er->ref);
+ er->config = (hwc->config >> 32);
+ er->config1 = reg1->config;
+ er->config2 = reg2->config;
+ ok = true;
+ }
+ }
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+
+ if (!ok) {
+ /*
+ * The Rbox events are always in pairs. The paired
+ * events are functional identical, but use different
+ * extra registers. If we failed to take an extra
+ * register, try the alternative.
+ */
+ idx ^= 1;
+ if (idx != reg1->idx % 6) {
+ if (idx == 2)
+ config1 >>= 8;
+ else if (idx == 3)
+ config1 <<= 8;
+ goto again;
+ }
+ } else {
+ if (!uncore_box_is_fake(box)) {
+ if (idx != reg1->idx % 6)
+ nhmex_rbox_alter_er(box, event);
+ reg1->alloc = 1;
+ }
+ return NULL;
+ }
+ return &uncore_constraint_empty;
+}
+
+static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct intel_uncore_extra_reg *er;
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ int idx, er_idx;
+
+ if (uncore_box_is_fake(box) || !reg1->alloc)
+ return;
+
+ idx = reg1->idx % 6;
+ er_idx = idx;
+ if (er_idx > 2)
+ er_idx--;
+ er_idx += (reg1->idx / 6) * 5;
+
+ er = &box->shared_regs[er_idx];
+ if (idx == 2 || idx == 3)
+ atomic_sub(1 << ((idx - 2) * 8), &er->ref);
+ else
+ atomic_dec(&er->ref);
+
+ reg1->alloc = 0;
+}
+
+static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
+ int idx;
+
+ idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
+ NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
+ if (idx >= 0x18)
+ return -EINVAL;
+
+ reg1->idx = idx;
+ reg1->config = event->attr.config1;
+
+ switch (idx % 6) {
+ case 4:
+ case 5:
+ hwc->config |= event->attr.config & (~0ULL << 32);
+ reg2->config = event->attr.config2;
+ break;
+ }
+ return 0;
+}
+
+static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+ int idx, port;
+
+ idx = reg1->idx;
+ port = idx / 6 + box->pmu->pmu_idx * 4;
+
+ switch (idx % 6) {
+ case 0:
+ wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
+ break;
+ case 1:
+ wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
+ break;
+ case 2:
+ case 3:
+ wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
+ uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
+ break;
+ case 4:
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
+ hwc->config >> 32);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
+ break;
+ case 5:
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
+ hwc->config >> 32);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
+ break;
+ }
+
+ wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
+ (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
+}
+
+DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
+DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
+DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
+DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
+DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
+
+static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
+ &format_attr_event5.attr,
+ &format_attr_xbr_mm_cfg.attr,
+ &format_attr_xbr_match.attr,
+ &format_attr_xbr_mask.attr,
+ &format_attr_qlx_cfg.attr,
+ &format_attr_iperf_cfg.attr,
+ NULL,
+};
+
+static struct attribute_group nhmex_uncore_rbox_format_group = {
+ .name = "format",
+ .attrs = nhmex_uncore_rbox_formats_attr,
+};
+
+static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
+ INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
+ INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
+ INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
+ INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
+ INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
+ INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
+ { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
+ NHMEX_UNCORE_OPS_COMMON_INIT(),
+ .enable_event = nhmex_rbox_msr_enable_event,
+ .hw_config = nhmex_rbox_hw_config,
+ .get_constraint = nhmex_rbox_get_constraint,
+ .put_constraint = nhmex_rbox_put_constraint,
+};
+
+static struct intel_uncore_type nhmex_uncore_rbox = {
+ .name = "rbox",
+ .num_counters = 8,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ .event_ctl = NHMEX_R_MSR_PMON_CTL0,
+ .perf_ctr = NHMEX_R_MSR_PMON_CNT0,
+ .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK,
+ .box_ctl = NHMEX_R_MSR_GLOBAL_CTL,
+ .msr_offset = NHMEX_R_MSR_OFFSET,
+ .pair_ctr_ctl = 1,
+ .num_shared_regs = 20,
+ .event_descs = nhmex_uncore_rbox_events,
+ .ops = &nhmex_uncore_rbox_ops,
+ .format_group = &nhmex_uncore_rbox_format_group
+};
+
+static struct intel_uncore_type *nhmex_msr_uncores[] = {
+ &nhmex_uncore_ubox,
+ &nhmex_uncore_cbox,
+ &nhmex_uncore_bbox,
+ &nhmex_uncore_sbox,
+ &nhmex_uncore_mbox,
+ &nhmex_uncore_rbox,
+ &nhmex_uncore_wbox,
+ NULL,
+};
+
+void nhmex_uncore_cpu_init(void)
+{
+ if (boot_cpu_data.x86_model == 46)
+ uncore_nhmex = true;
+ else
+ nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
+ if (nhmex_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
+ nhmex_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+ uncore_msr_uncores = nhmex_msr_uncores;
+}
+/* end of Nehalem-EX uncore support */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
new file mode 100644
index 000000000000..3001015b755c
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
@@ -0,0 +1,636 @@
+/* Nehalem/SandBridge/Haswell uncore support */
+#include "perf_event_intel_uncore.h"
+
+/* SNB event control */
+#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
+#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
+#define SNB_UNC_CTL_EDGE_DET (1 << 18)
+#define SNB_UNC_CTL_EN (1 << 22)
+#define SNB_UNC_CTL_INVERT (1 << 23)
+#define SNB_UNC_CTL_CMASK_MASK 0x1f000000
+#define NHM_UNC_CTL_CMASK_MASK 0xff000000
+#define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
+
+#define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
+ SNB_UNC_CTL_UMASK_MASK | \
+ SNB_UNC_CTL_EDGE_DET | \
+ SNB_UNC_CTL_INVERT | \
+ SNB_UNC_CTL_CMASK_MASK)
+
+#define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
+ SNB_UNC_CTL_UMASK_MASK | \
+ SNB_UNC_CTL_EDGE_DET | \
+ SNB_UNC_CTL_INVERT | \
+ NHM_UNC_CTL_CMASK_MASK)
+
+/* SNB global control register */
+#define SNB_UNC_PERF_GLOBAL_CTL 0x391
+#define SNB_UNC_FIXED_CTR_CTRL 0x394
+#define SNB_UNC_FIXED_CTR 0x395
+
+/* SNB uncore global control */
+#define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
+#define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
+
+/* SNB Cbo register */
+#define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
+#define SNB_UNC_CBO_0_PER_CTR0 0x706
+#define SNB_UNC_CBO_MSR_OFFSET 0x10
+
+/* NHM global control register */
+#define NHM_UNC_PERF_GLOBAL_CTL 0x391
+#define NHM_UNC_FIXED_CTR 0x394
+#define NHM_UNC_FIXED_CTR_CTRL 0x395
+
+/* NHM uncore global control */
+#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
+#define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
+
+/* NHM uncore register */
+#define NHM_UNC_PERFEVTSEL0 0x3c0
+#define NHM_UNC_UNCORE_PMC0 0x3b0
+
+DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
+DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
+DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
+DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
+DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
+
+/* Sandy Bridge uncore support */
+static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->idx < UNCORE_PMC_IDX_FIXED)
+ wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
+ else
+ wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
+}
+
+static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ wrmsrl(event->hw.config_base, 0);
+}
+
+static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->pmu_idx == 0) {
+ wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
+ SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
+ }
+}
+
+static struct uncore_event_desc snb_uncore_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
+ { /* end: all zeroes */ },
+};
+
+static struct attribute *snb_uncore_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_cmask5.attr,
+ NULL,
+};
+
+static struct attribute_group snb_uncore_format_group = {
+ .name = "format",
+ .attrs = snb_uncore_formats_attr,
+};
+
+static struct intel_uncore_ops snb_uncore_msr_ops = {
+ .init_box = snb_uncore_msr_init_box,
+ .disable_event = snb_uncore_msr_disable_event,
+ .enable_event = snb_uncore_msr_enable_event,
+ .read_counter = uncore_msr_read_counter,
+};
+
+static struct event_constraint snb_uncore_cbox_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
+ EVENT_CONSTRAINT_END
+};
+
+static struct intel_uncore_type snb_uncore_cbox = {
+ .name = "cbox",
+ .num_counters = 2,
+ .num_boxes = 4,
+ .perf_ctr_bits = 44,
+ .fixed_ctr_bits = 48,
+ .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
+ .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
+ .fixed_ctr = SNB_UNC_FIXED_CTR,
+ .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
+ .single_fixed = 1,
+ .event_mask = SNB_UNC_RAW_EVENT_MASK,
+ .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
+ .constraints = snb_uncore_cbox_constraints,
+ .ops = &snb_uncore_msr_ops,
+ .format_group = &snb_uncore_format_group,
+ .event_descs = snb_uncore_events,
+};
+
+static struct intel_uncore_type *snb_msr_uncores[] = {
+ &snb_uncore_cbox,
+ NULL,
+};
+
+void snb_uncore_cpu_init(void)
+{
+ uncore_msr_uncores = snb_msr_uncores;
+ if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
+ snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+}
+
+enum {
+ SNB_PCI_UNCORE_IMC,
+};
+
+static struct uncore_event_desc snb_uncore_imc_events[] = {
+ INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"),
+ INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
+
+ INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
+ INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
+
+ { /* end: all zeroes */ },
+};
+
+#define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
+#define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
+
+/* page size multiple covering all config regs */
+#define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
+
+#define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
+#define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
+#define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
+#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
+#define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
+
+static struct attribute *snb_uncore_imc_formats_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group snb_uncore_imc_format_group = {
+ .name = "format",
+ .attrs = snb_uncore_imc_formats_attr,
+};
+
+static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
+ resource_size_t addr;
+ u32 pci_dword;
+
+ pci_read_config_dword(pdev, where, &pci_dword);
+ addr = pci_dword;
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ pci_read_config_dword(pdev, where + 4, &pci_dword);
+ addr |= ((resource_size_t)pci_dword << 32);
+#endif
+
+ addr &= ~(PAGE_SIZE - 1);
+
+ box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
+ box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
+}
+
+static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
+{}
+
+static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
+{}
+
+static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{}
+
+static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
+{}
+
+static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
+}
+
+/*
+ * custom event_init() function because we define our own fixed, free
+ * running counters, so we do not want to conflict with generic uncore
+ * logic. Also simplifies processing
+ */
+static int snb_uncore_imc_event_init(struct perf_event *event)
+{
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ struct hw_perf_event *hwc = &event->hw;
+ u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
+ int idx, base;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ pmu = uncore_event_to_pmu(event);
+ /* no device found for this pmu */
+ if (pmu->func_id < 0)
+ return -ENOENT;
+
+ /* Sampling not supported yet */
+ if (hwc->sample_period)
+ return -EINVAL;
+
+ /* unsupported modes and filters */
+ if (event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest ||
+ event->attr.sample_period) /* no sampling */
+ return -EINVAL;
+
+ /*
+ * Place all uncore events for a particular physical package
+ * onto a single cpu
+ */
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ /* check only supported bits are set */
+ if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
+ return -EINVAL;
+
+ box = uncore_pmu_to_box(pmu, event->cpu);
+ if (!box || box->cpu < 0)
+ return -EINVAL;
+
+ event->cpu = box->cpu;
+
+ event->hw.idx = -1;
+ event->hw.last_tag = ~0ULL;
+ event->hw.extra_reg.idx = EXTRA_REG_NONE;
+ event->hw.branch_reg.idx = EXTRA_REG_NONE;
+ /*
+ * check event is known (whitelist, determines counter)
+ */
+ switch (cfg) {
+ case SNB_UNCORE_PCI_IMC_DATA_READS:
+ base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
+ idx = UNCORE_PMC_IDX_FIXED;
+ break;
+ case SNB_UNCORE_PCI_IMC_DATA_WRITES:
+ base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
+ idx = UNCORE_PMC_IDX_FIXED + 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* must be done before validate_group */
+ event->hw.event_base = base;
+ event->hw.config = cfg;
+ event->hw.idx = idx;
+
+ /* no group validation needed, we have free running counters */
+
+ return 0;
+}
+
+static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ return 0;
+}
+
+static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ u64 count;
+
+ if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+ return;
+
+ event->hw.state = 0;
+ box->n_active++;
+
+ list_add_tail(&event->active_entry, &box->active_list);
+
+ count = snb_uncore_imc_read_counter(box, event);
+ local64_set(&event->hw.prev_count, count);
+
+ if (box->n_active == 1)
+ uncore_pmu_start_hrtimer(box);
+}
+
+static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ box->n_active--;
+
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+
+ list_del(&event->active_entry);
+
+ if (box->n_active == 0)
+ uncore_pmu_cancel_hrtimer(box);
+ }
+
+ if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+ /*
+ * Drain the remaining delta count out of a event
+ * that we are disabling:
+ */
+ uncore_perf_event_update(box, event);
+ hwc->state |= PERF_HES_UPTODATE;
+ }
+}
+
+static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!box)
+ return -ENODEV;
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ if (!(flags & PERF_EF_START))
+ hwc->state |= PERF_HES_ARCH;
+
+ snb_uncore_imc_event_start(event, 0);
+
+ box->n_events++;
+
+ return 0;
+}
+
+static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ int i;
+
+ snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
+
+ for (i = 0; i < box->n_events; i++) {
+ if (event == box->event_list[i]) {
+ --box->n_events;
+ break;
+ }
+ }
+}
+
+static int snb_pci2phy_map_init(int devid)
+{
+ struct pci_dev *dev = NULL;
+ int bus;
+
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
+ if (!dev)
+ return -ENOTTY;
+
+ bus = dev->bus->number;
+
+ uncore_pcibus_to_physid[bus] = 0;
+
+ pci_dev_put(dev);
+
+ return 0;
+}
+
+static struct pmu snb_uncore_imc_pmu = {
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = snb_uncore_imc_event_init,
+ .add = snb_uncore_imc_event_add,
+ .del = snb_uncore_imc_event_del,
+ .start = snb_uncore_imc_event_start,
+ .stop = snb_uncore_imc_event_stop,
+ .read = uncore_pmu_event_read,
+};
+
+static struct intel_uncore_ops snb_uncore_imc_ops = {
+ .init_box = snb_uncore_imc_init_box,
+ .enable_box = snb_uncore_imc_enable_box,
+ .disable_box = snb_uncore_imc_disable_box,
+ .disable_event = snb_uncore_imc_disable_event,
+ .enable_event = snb_uncore_imc_enable_event,
+ .hw_config = snb_uncore_imc_hw_config,
+ .read_counter = snb_uncore_imc_read_counter,
+};
+
+static struct intel_uncore_type snb_uncore_imc = {
+ .name = "imc",
+ .num_counters = 2,
+ .num_boxes = 1,
+ .fixed_ctr_bits = 32,
+ .fixed_ctr = SNB_UNCORE_PCI_IMC_CTR_BASE,
+ .event_descs = snb_uncore_imc_events,
+ .format_group = &snb_uncore_imc_format_group,
+ .perf_ctr = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
+ .event_mask = SNB_UNCORE_PCI_IMC_EVENT_MASK,
+ .ops = &snb_uncore_imc_ops,
+ .pmu = &snb_uncore_imc_pmu,
+};
+
+static struct intel_uncore_type *snb_pci_uncores[] = {
+ [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc,
+ NULL,
+};
+
+static const struct pci_device_id snb_uncore_pci_ids[] = {
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* end: all zeroes */ },
+};
+
+static const struct pci_device_id ivb_uncore_pci_ids[] = {
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* end: all zeroes */ },
+};
+
+static const struct pci_device_id hsw_uncore_pci_ids[] = {
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* end: all zeroes */ },
+};
+
+static struct pci_driver snb_uncore_pci_driver = {
+ .name = "snb_uncore",
+ .id_table = snb_uncore_pci_ids,
+};
+
+static struct pci_driver ivb_uncore_pci_driver = {
+ .name = "ivb_uncore",
+ .id_table = ivb_uncore_pci_ids,
+};
+
+static struct pci_driver hsw_uncore_pci_driver = {
+ .name = "hsw_uncore",
+ .id_table = hsw_uncore_pci_ids,
+};
+
+struct imc_uncore_pci_dev {
+ __u32 pci_id;
+ struct pci_driver *driver;
+};
+#define IMC_DEV(a, d) \
+ { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
+
+static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
+ IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
+ IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */
+ IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
+ IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
+ { /* end marker */ }
+};
+
+
+#define for_each_imc_pci_id(x, t) \
+ for (x = (t); (x)->pci_id; x++)
+
+static struct pci_driver *imc_uncore_find_dev(void)
+{
+ const struct imc_uncore_pci_dev *p;
+ int ret;
+
+ for_each_imc_pci_id(p, desktop_imc_pci_ids) {
+ ret = snb_pci2phy_map_init(p->pci_id);
+ if (ret == 0)
+ return p->driver;
+ }
+ return NULL;
+}
+
+static int imc_uncore_pci_init(void)
+{
+ struct pci_driver *imc_drv = imc_uncore_find_dev();
+
+ if (!imc_drv)
+ return -ENODEV;
+
+ uncore_pci_uncores = snb_pci_uncores;
+ uncore_pci_driver = imc_drv;
+
+ return 0;
+}
+
+int snb_uncore_pci_init(void)
+{
+ return imc_uncore_pci_init();
+}
+
+int ivb_uncore_pci_init(void)
+{
+ return imc_uncore_pci_init();
+}
+int hsw_uncore_pci_init(void)
+{
+ return imc_uncore_pci_init();
+}
+
+/* end of Sandy Bridge uncore support */
+
+/* Nehalem uncore support */
+static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
+{
+ wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
+}
+
+static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+ wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
+}
+
+static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->idx < UNCORE_PMC_IDX_FIXED)
+ wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
+ else
+ wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
+}
+
+static struct attribute *nhm_uncore_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_cmask8.attr,
+ NULL,
+};
+
+static struct attribute_group nhm_uncore_format_group = {
+ .name = "format",
+ .attrs = nhm_uncore_formats_attr,
+};
+
+static struct uncore_event_desc nhm_uncore_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
+ INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
+ INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
+ { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_ops nhm_uncore_msr_ops = {
+ .disable_box = nhm_uncore_msr_disable_box,
+ .enable_box = nhm_uncore_msr_enable_box,
+ .disable_event = snb_uncore_msr_disable_event,
+ .enable_event = nhm_uncore_msr_enable_event,
+ .read_counter = uncore_msr_read_counter,
+};
+
+static struct intel_uncore_type nhm_uncore = {
+ .name = "",
+ .num_counters = 8,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .fixed_ctr_bits = 48,
+ .event_ctl = NHM_UNC_PERFEVTSEL0,
+ .perf_ctr = NHM_UNC_UNCORE_PMC0,
+ .fixed_ctr = NHM_UNC_FIXED_CTR,
+ .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
+ .event_mask = NHM_UNC_RAW_EVENT_MASK,
+ .event_descs = nhm_uncore_events,
+ .ops = &nhm_uncore_msr_ops,
+ .format_group = &nhm_uncore_format_group,
+};
+
+static struct intel_uncore_type *nhm_msr_uncores[] = {
+ &nhm_uncore,
+ NULL,
+};
+
+void nhm_uncore_cpu_init(void)
+{
+ uncore_msr_uncores = nhm_msr_uncores;
+}
+
+/* end of Nehalem uncore support */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
new file mode 100644
index 000000000000..adf138eac85c
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -0,0 +1,2258 @@
+/* SandyBridge-EP/IvyTown uncore support */
+#include "perf_event_intel_uncore.h"
+
+
+/* SNB-EP Box level control */
+#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
+#define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
+#define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
+#define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
+#define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
+ SNBEP_PMON_BOX_CTL_RST_CTRS | \
+ SNBEP_PMON_BOX_CTL_FRZ_EN)
+/* SNB-EP event control */
+#define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
+#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
+#define SNBEP_PMON_CTL_RST (1 << 17)
+#define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
+#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
+#define SNBEP_PMON_CTL_EN (1 << 22)
+#define SNBEP_PMON_CTL_INVERT (1 << 23)
+#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
+#define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
+ SNBEP_PMON_CTL_UMASK_MASK | \
+ SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_PMON_CTL_INVERT | \
+ SNBEP_PMON_CTL_TRESH_MASK)
+
+/* SNB-EP Ubox event control */
+#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
+#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
+ (SNBEP_PMON_CTL_EV_SEL_MASK | \
+ SNBEP_PMON_CTL_UMASK_MASK | \
+ SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_PMON_CTL_INVERT | \
+ SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
+
+#define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
+#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
+ SNBEP_CBO_PMON_CTL_TID_EN)
+
+/* SNB-EP PCU event control */
+#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
+#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
+#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
+#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
+#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
+ (SNBEP_PMON_CTL_EV_SEL_MASK | \
+ SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
+ SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_PMON_CTL_EV_SEL_EXT | \
+ SNBEP_PMON_CTL_INVERT | \
+ SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
+ SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
+ SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
+
+#define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
+ (SNBEP_PMON_RAW_EVENT_MASK | \
+ SNBEP_PMON_CTL_EV_SEL_EXT)
+
+/* SNB-EP pci control register */
+#define SNBEP_PCI_PMON_BOX_CTL 0xf4
+#define SNBEP_PCI_PMON_CTL0 0xd8
+/* SNB-EP pci counter register */
+#define SNBEP_PCI_PMON_CTR0 0xa0
+
+/* SNB-EP home agent register */
+#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
+#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
+#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
+/* SNB-EP memory controller register */
+#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
+#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
+/* SNB-EP QPI register */
+#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
+#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
+#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
+#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
+
+/* SNB-EP Ubox register */
+#define SNBEP_U_MSR_PMON_CTR0 0xc16
+#define SNBEP_U_MSR_PMON_CTL0 0xc10
+
+#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
+#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
+
+/* SNB-EP Cbo register */
+#define SNBEP_C0_MSR_PMON_CTR0 0xd16
+#define SNBEP_C0_MSR_PMON_CTL0 0xd10
+#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
+#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
+#define SNBEP_CBO_MSR_OFFSET 0x20
+
+#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
+#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
+#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
+#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
+
+#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
+ .event = (e), \
+ .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
+ .config_mask = (m), \
+ .idx = (i) \
+}
+
+/* SNB-EP PCU register */
+#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
+#define SNBEP_PCU_MSR_PMON_CTL0 0xc30
+#define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
+#define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
+#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
+#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
+#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
+
+/* IVBEP event control */
+#define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
+ SNBEP_PMON_BOX_CTL_RST_CTRS)
+#define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
+ SNBEP_PMON_CTL_UMASK_MASK | \
+ SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_PMON_CTL_TRESH_MASK)
+/* IVBEP Ubox */
+#define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
+#define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
+#define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
+
+#define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
+ (SNBEP_PMON_CTL_EV_SEL_MASK | \
+ SNBEP_PMON_CTL_UMASK_MASK | \
+ SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
+/* IVBEP Cbo */
+#define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
+ SNBEP_CBO_PMON_CTL_TID_EN)
+
+#define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
+#define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
+#define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
+#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
+#define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
+#define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
+#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
+#define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
+
+/* IVBEP home agent */
+#define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
+#define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
+ (IVBEP_PMON_RAW_EVENT_MASK | \
+ IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
+/* IVBEP PCU */
+#define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
+ (SNBEP_PMON_CTL_EV_SEL_MASK | \
+ SNBEP_PMON_CTL_EV_SEL_EXT | \
+ SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
+ SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
+ SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
+ SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
+/* IVBEP QPI */
+#define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
+ (IVBEP_PMON_RAW_EVENT_MASK | \
+ SNBEP_PMON_CTL_EV_SEL_EXT)
+
+#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
+ ((1ULL << (n)) - 1)))
+
+/* Haswell-EP Ubox */
+#define HSWEP_U_MSR_PMON_CTR0 0x705
+#define HSWEP_U_MSR_PMON_CTL0 0x709
+#define HSWEP_U_MSR_PMON_FILTER 0x707
+
+#define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
+#define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
+
+#define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
+#define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
+#define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
+ (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
+ HSWEP_U_MSR_PMON_BOX_FILTER_CID)
+
+/* Haswell-EP CBo */
+#define HSWEP_C0_MSR_PMON_CTR0 0xe08
+#define HSWEP_C0_MSR_PMON_CTL0 0xe01
+#define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
+#define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
+#define HSWEP_CBO_MSR_OFFSET 0x10
+
+
+#define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
+#define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
+#define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
+#define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
+#define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
+#define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
+#define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
+#define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
+
+
+/* Haswell-EP Sbox */
+#define HSWEP_S0_MSR_PMON_CTR0 0x726
+#define HSWEP_S0_MSR_PMON_CTL0 0x721
+#define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
+#define HSWEP_SBOX_MSR_OFFSET 0xa
+#define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
+ SNBEP_CBO_PMON_CTL_TID_EN)
+
+/* Haswell-EP PCU */
+#define HSWEP_PCU_MSR_PMON_CTR0 0x717
+#define HSWEP_PCU_MSR_PMON_CTL0 0x711
+#define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
+#define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
+
+
+DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
+DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
+DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
+DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
+DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
+DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
+DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
+DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
+DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
+DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
+DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
+DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
+DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
+DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
+DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
+DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
+DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
+DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
+DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
+DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
+DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
+DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
+DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
+DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
+DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
+DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
+DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
+DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
+DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
+DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
+DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
+DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
+DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
+DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
+DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
+DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
+DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
+DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
+DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
+DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
+DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
+DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
+DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
+DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
+DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
+
+static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ int box_ctl = uncore_pci_box_ctl(box);
+ u32 config = 0;
+
+ if (!pci_read_config_dword(pdev, box_ctl, &config)) {
+ config |= SNBEP_PMON_BOX_CTL_FRZ;
+ pci_write_config_dword(pdev, box_ctl, config);
+ }
+}
+
+static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ int box_ctl = uncore_pci_box_ctl(box);
+ u32 config = 0;
+
+ if (!pci_read_config_dword(pdev, box_ctl, &config)) {
+ config &= ~SNBEP_PMON_BOX_CTL_FRZ;
+ pci_write_config_dword(pdev, box_ctl, config);
+ }
+}
+
+static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+
+ pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+}
+
+static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+
+ pci_write_config_dword(pdev, hwc->config_base, hwc->config);
+}
+
+static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+ u64 count = 0;
+
+ pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
+ pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
+
+ return count;
+}
+
+static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+
+ pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
+}
+
+static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
+{
+ u64 config;
+ unsigned msr;
+
+ msr = uncore_msr_box_ctl(box);
+ if (msr) {
+ rdmsrl(msr, config);
+ config |= SNBEP_PMON_BOX_CTL_FRZ;
+ wrmsrl(msr, config);
+ }
+}
+
+static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+ u64 config;
+ unsigned msr;
+
+ msr = uncore_msr_box_ctl(box);
+ if (msr) {
+ rdmsrl(msr, config);
+ config &= ~SNBEP_PMON_BOX_CTL_FRZ;
+ wrmsrl(msr, config);
+ }
+}
+
+static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+ if (reg1->idx != EXTRA_REG_NONE)
+ wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
+
+ wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+}
+
+static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ wrmsrl(hwc->config_base, hwc->config);
+}
+
+static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+ unsigned msr = uncore_msr_box_ctl(box);
+
+ if (msr)
+ wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
+}
+
+static struct attribute *snbep_uncore_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ NULL,
+};
+
+static struct attribute *snbep_uncore_ubox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh5.attr,
+ NULL,
+};
+
+static struct attribute *snbep_uncore_cbox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_tid_en.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ &format_attr_filter_tid.attr,
+ &format_attr_filter_nid.attr,
+ &format_attr_filter_state.attr,
+ &format_attr_filter_opc.attr,
+ NULL,
+};
+
+static struct attribute *snbep_uncore_pcu_formats_attr[] = {
+ &format_attr_event_ext.attr,
+ &format_attr_occ_sel.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh5.attr,
+ &format_attr_occ_invert.attr,
+ &format_attr_occ_edge.attr,
+ &format_attr_filter_band0.attr,
+ &format_attr_filter_band1.attr,
+ &format_attr_filter_band2.attr,
+ &format_attr_filter_band3.attr,
+ NULL,
+};
+
+static struct attribute *snbep_uncore_qpi_formats_attr[] = {
+ &format_attr_event_ext.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ &format_attr_match_rds.attr,
+ &format_attr_match_rnid30.attr,
+ &format_attr_match_rnid4.attr,
+ &format_attr_match_dnid.attr,
+ &format_attr_match_mc.attr,
+ &format_attr_match_opc.attr,
+ &format_attr_match_vnw.attr,
+ &format_attr_match0.attr,
+ &format_attr_match1.attr,
+ &format_attr_mask_rds.attr,
+ &format_attr_mask_rnid30.attr,
+ &format_attr_mask_rnid4.attr,
+ &format_attr_mask_dnid.attr,
+ &format_attr_mask_mc.attr,
+ &format_attr_mask_opc.attr,
+ &format_attr_mask_vnw.attr,
+ &format_attr_mask0.attr,
+ &format_attr_mask1.attr,
+ NULL,
+};
+
+static struct uncore_event_desc snbep_uncore_imc_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
+ { /* end: all zeroes */ },
+};
+
+static struct uncore_event_desc snbep_uncore_qpi_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
+ INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
+ INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
+ INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
+ { /* end: all zeroes */ },
+};
+
+static struct attribute_group snbep_uncore_format_group = {
+ .name = "format",
+ .attrs = snbep_uncore_formats_attr,
+};
+
+static struct attribute_group snbep_uncore_ubox_format_group = {
+ .name = "format",
+ .attrs = snbep_uncore_ubox_formats_attr,
+};
+
+static struct attribute_group snbep_uncore_cbox_format_group = {
+ .name = "format",
+ .attrs = snbep_uncore_cbox_formats_attr,
+};
+
+static struct attribute_group snbep_uncore_pcu_format_group = {
+ .name = "format",
+ .attrs = snbep_uncore_pcu_formats_attr,
+};
+
+static struct attribute_group snbep_uncore_qpi_format_group = {
+ .name = "format",
+ .attrs = snbep_uncore_qpi_formats_attr,
+};
+
+#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
+ .init_box = snbep_uncore_msr_init_box, \
+ .disable_box = snbep_uncore_msr_disable_box, \
+ .enable_box = snbep_uncore_msr_enable_box, \
+ .disable_event = snbep_uncore_msr_disable_event, \
+ .enable_event = snbep_uncore_msr_enable_event, \
+ .read_counter = uncore_msr_read_counter
+
+static struct intel_uncore_ops snbep_uncore_msr_ops = {
+ SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
+};
+
+#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
+ .init_box = snbep_uncore_pci_init_box, \
+ .disable_box = snbep_uncore_pci_disable_box, \
+ .enable_box = snbep_uncore_pci_enable_box, \
+ .disable_event = snbep_uncore_pci_disable_event, \
+ .read_counter = snbep_uncore_pci_read_counter
+
+static struct intel_uncore_ops snbep_uncore_pci_ops = {
+ SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
+ .enable_event = snbep_uncore_pci_enable_event, \
+};
+
+static struct event_constraint snbep_uncore_cbox_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
+ UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
+ UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
+ UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
+ EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
+ UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
+ EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+ EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
+ EVENT_CONSTRAINT_END
+};
+
+static struct intel_uncore_type snbep_uncore_ubox = {
+ .name = "ubox",
+ .num_counters = 2,
+ .num_boxes = 1,
+ .perf_ctr_bits = 44,
+ .fixed_ctr_bits = 48,
+ .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
+ .event_ctl = SNBEP_U_MSR_PMON_CTL0,
+ .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
+ .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
+ .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
+ .ops = &snbep_uncore_msr_ops,
+ .format_group = &snbep_uncore_ubox_format_group,
+};
+
+static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
+ SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
+ SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
+ EVENT_EXTRA_END
+};
+
+static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct intel_uncore_extra_reg *er = &box->shared_regs[0];
+ int i;
+
+ if (uncore_box_is_fake(box))
+ return;
+
+ for (i = 0; i < 5; i++) {
+ if (reg1->alloc & (0x1 << i))
+ atomic_sub(1 << (i * 6), &er->ref);
+ }
+ reg1->alloc = 0;
+}
+
+static struct event_constraint *
+__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
+ u64 (*cbox_filter_mask)(int fields))
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct intel_uncore_extra_reg *er = &box->shared_regs[0];
+ int i, alloc = 0;
+ unsigned long flags;
+ u64 mask;
+
+ if (reg1->idx == EXTRA_REG_NONE)
+ return NULL;
+
+ raw_spin_lock_irqsave(&er->lock, flags);
+ for (i = 0; i < 5; i++) {
+ if (!(reg1->idx & (0x1 << i)))
+ continue;
+ if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
+ continue;
+
+ mask = cbox_filter_mask(0x1 << i);
+ if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
+ !((reg1->config ^ er->config) & mask)) {
+ atomic_add(1 << (i * 6), &er->ref);
+ er->config &= ~mask;
+ er->config |= reg1->config & mask;
+ alloc |= (0x1 << i);
+ } else {
+ break;
+ }
+ }
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+ if (i < 5)
+ goto fail;
+
+ if (!uncore_box_is_fake(box))
+ reg1->alloc |= alloc;
+
+ return NULL;
+fail:
+ for (; i >= 0; i--) {
+ if (alloc & (0x1 << i))
+ atomic_sub(1 << (i * 6), &er->ref);
+ }
+ return &uncore_constraint_empty;
+}
+
+static u64 snbep_cbox_filter_mask(int fields)
+{
+ u64 mask = 0;
+
+ if (fields & 0x1)
+ mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
+ if (fields & 0x2)
+ mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
+ if (fields & 0x4)
+ mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
+ if (fields & 0x8)
+ mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
+
+ return mask;
+}
+
+static struct event_constraint *
+snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
+}
+
+static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct extra_reg *er;
+ int idx = 0;
+
+ for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
+ if (er->event != (event->hw.config & er->config_mask))
+ continue;
+ idx |= er->idx;
+ }
+
+ if (idx) {
+ reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
+ SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
+ reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
+ reg1->idx = idx;
+ }
+ return 0;
+}
+
+static struct intel_uncore_ops snbep_uncore_cbox_ops = {
+ SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
+ .hw_config = snbep_cbox_hw_config,
+ .get_constraint = snbep_cbox_get_constraint,
+ .put_constraint = snbep_cbox_put_constraint,
+};
+
+static struct intel_uncore_type snbep_uncore_cbox = {
+ .name = "cbox",
+ .num_counters = 4,
+ .num_boxes = 8,
+ .perf_ctr_bits = 44,
+ .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
+ .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
+ .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
+ .msr_offset = SNBEP_CBO_MSR_OFFSET,
+ .num_shared_regs = 1,
+ .constraints = snbep_uncore_cbox_constraints,
+ .ops = &snbep_uncore_cbox_ops,
+ .format_group = &snbep_uncore_cbox_format_group,
+};
+
+static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ u64 config = reg1->config;
+
+ if (new_idx > reg1->idx)
+ config <<= 8 * (new_idx - reg1->idx);
+ else
+ config >>= 8 * (reg1->idx - new_idx);
+
+ if (modify) {
+ hwc->config += new_idx - reg1->idx;
+ reg1->config = config;
+ reg1->idx = new_idx;
+ }
+ return config;
+}
+
+static struct event_constraint *
+snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct intel_uncore_extra_reg *er = &box->shared_regs[0];
+ unsigned long flags;
+ int idx = reg1->idx;
+ u64 mask, config1 = reg1->config;
+ bool ok = false;
+
+ if (reg1->idx == EXTRA_REG_NONE ||
+ (!uncore_box_is_fake(box) && reg1->alloc))
+ return NULL;
+again:
+ mask = 0xffULL << (idx * 8);
+ raw_spin_lock_irqsave(&er->lock, flags);
+ if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
+ !((config1 ^ er->config) & mask)) {
+ atomic_add(1 << (idx * 8), &er->ref);
+ er->config &= ~mask;
+ er->config |= config1 & mask;
+ ok = true;
+ }
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+
+ if (!ok) {
+ idx = (idx + 1) % 4;
+ if (idx != reg1->idx) {
+ config1 = snbep_pcu_alter_er(event, idx, false);
+ goto again;
+ }
+ return &uncore_constraint_empty;
+ }
+
+ if (!uncore_box_is_fake(box)) {
+ if (idx != reg1->idx)
+ snbep_pcu_alter_er(event, idx, true);
+ reg1->alloc = 1;
+ }
+ return NULL;
+}
+
+static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct intel_uncore_extra_reg *er = &box->shared_regs[0];
+
+ if (uncore_box_is_fake(box) || !reg1->alloc)
+ return;
+
+ atomic_sub(1 << (reg1->idx * 8), &er->ref);
+ reg1->alloc = 0;
+}
+
+static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
+
+ if (ev_sel >= 0xb && ev_sel <= 0xe) {
+ reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
+ reg1->idx = ev_sel - 0xb;
+ reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
+ }
+ return 0;
+}
+
+static struct intel_uncore_ops snbep_uncore_pcu_ops = {
+ SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
+ .hw_config = snbep_pcu_hw_config,
+ .get_constraint = snbep_pcu_get_constraint,
+ .put_constraint = snbep_pcu_put_constraint,
+};
+
+static struct intel_uncore_type snbep_uncore_pcu = {
+ .name = "pcu",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
+ .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
+ .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
+ .num_shared_regs = 1,
+ .ops = &snbep_uncore_pcu_ops,
+ .format_group = &snbep_uncore_pcu_format_group,
+};
+
+static struct intel_uncore_type *snbep_msr_uncores[] = {
+ &snbep_uncore_ubox,
+ &snbep_uncore_cbox,
+ &snbep_uncore_pcu,
+ NULL,
+};
+
+void snbep_uncore_cpu_init(void)
+{
+ if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
+ snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+ uncore_msr_uncores = snbep_msr_uncores;
+}
+
+enum {
+ SNBEP_PCI_QPI_PORT0_FILTER,
+ SNBEP_PCI_QPI_PORT1_FILTER,
+};
+
+static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+
+ if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
+ reg1->idx = 0;
+ reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
+ reg1->config = event->attr.config1;
+ reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
+ reg2->config = event->attr.config2;
+ }
+ return 0;
+}
+
+static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+
+ if (reg1->idx != EXTRA_REG_NONE) {
+ int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
+ struct pci_dev *filter_pdev = uncore_extra_pci_dev[box->phys_id][idx];
+ if (filter_pdev) {
+ pci_write_config_dword(filter_pdev, reg1->reg,
+ (u32)reg1->config);
+ pci_write_config_dword(filter_pdev, reg1->reg + 4,
+ (u32)(reg1->config >> 32));
+ pci_write_config_dword(filter_pdev, reg2->reg,
+ (u32)reg2->config);
+ pci_write_config_dword(filter_pdev, reg2->reg + 4,
+ (u32)(reg2->config >> 32));
+ }
+ }
+
+ pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+}
+
+static struct intel_uncore_ops snbep_uncore_qpi_ops = {
+ SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
+ .enable_event = snbep_qpi_enable_event,
+ .hw_config = snbep_qpi_hw_config,
+ .get_constraint = uncore_get_constraint,
+ .put_constraint = uncore_put_constraint,
+};
+
+#define SNBEP_UNCORE_PCI_COMMON_INIT() \
+ .perf_ctr = SNBEP_PCI_PMON_CTR0, \
+ .event_ctl = SNBEP_PCI_PMON_CTL0, \
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
+ .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
+ .ops = &snbep_uncore_pci_ops, \
+ .format_group = &snbep_uncore_format_group
+
+static struct intel_uncore_type snbep_uncore_ha = {
+ .name = "ha",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type snbep_uncore_imc = {
+ .name = "imc",
+ .num_counters = 4,
+ .num_boxes = 4,
+ .perf_ctr_bits = 48,
+ .fixed_ctr_bits = 48,
+ .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
+ .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
+ .event_descs = snbep_uncore_imc_events,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type snbep_uncore_qpi = {
+ .name = "qpi",
+ .num_counters = 4,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ .perf_ctr = SNBEP_PCI_PMON_CTR0,
+ .event_ctl = SNBEP_PCI_PMON_CTL0,
+ .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
+ .num_shared_regs = 1,
+ .ops = &snbep_uncore_qpi_ops,
+ .event_descs = snbep_uncore_qpi_events,
+ .format_group = &snbep_uncore_qpi_format_group,
+};
+
+
+static struct intel_uncore_type snbep_uncore_r2pcie = {
+ .name = "r2pcie",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 44,
+ .constraints = snbep_uncore_r2pcie_constraints,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type snbep_uncore_r3qpi = {
+ .name = "r3qpi",
+ .num_counters = 3,
+ .num_boxes = 2,
+ .perf_ctr_bits = 44,
+ .constraints = snbep_uncore_r3qpi_constraints,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+enum {
+ SNBEP_PCI_UNCORE_HA,
+ SNBEP_PCI_UNCORE_IMC,
+ SNBEP_PCI_UNCORE_QPI,
+ SNBEP_PCI_UNCORE_R2PCIE,
+ SNBEP_PCI_UNCORE_R3QPI,
+};
+
+static struct intel_uncore_type *snbep_pci_uncores[] = {
+ [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
+ [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
+ [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
+ [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
+ [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
+ NULL,
+};
+
+static const struct pci_device_id snbep_uncore_pci_ids[] = {
+ { /* Home Agent */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
+ },
+ { /* MC Channel 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
+ },
+ { /* MC Channel 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
+ },
+ { /* MC Channel 2 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
+ },
+ { /* MC Channel 3 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
+ },
+ { /* QPI Port 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
+ },
+ { /* QPI Port 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
+ },
+ { /* R2PCIe */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
+ },
+ { /* R3QPI Link 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
+ },
+ { /* R3QPI Link 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
+ },
+ { /* QPI Port 0 filter */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ SNBEP_PCI_QPI_PORT0_FILTER),
+ },
+ { /* QPI Port 0 filter */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ SNBEP_PCI_QPI_PORT1_FILTER),
+ },
+ { /* end: all zeroes */ }
+};
+
+static struct pci_driver snbep_uncore_pci_driver = {
+ .name = "snbep_uncore",
+ .id_table = snbep_uncore_pci_ids,
+};
+
+/*
+ * build pci bus to socket mapping
+ */
+static int snbep_pci2phy_map_init(int devid)
+{
+ struct pci_dev *ubox_dev = NULL;
+ int i, bus, nodeid;
+ int err = 0;
+ u32 config = 0;
+
+ while (1) {
+ /* find the UBOX device */
+ ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
+ if (!ubox_dev)
+ break;
+ bus = ubox_dev->bus->number;
+ /* get the Node ID of the local register */
+ err = pci_read_config_dword(ubox_dev, 0x40, &config);
+ if (err)
+ break;
+ nodeid = config;
+ /* get the Node ID mapping */
+ err = pci_read_config_dword(ubox_dev, 0x54, &config);
+ if (err)
+ break;
+ /*
+ * every three bits in the Node ID mapping register maps
+ * to a particular node.
+ */
+ for (i = 0; i < 8; i++) {
+ if (nodeid == ((config >> (3 * i)) & 0x7)) {
+ uncore_pcibus_to_physid[bus] = i;
+ break;
+ }
+ }
+ }
+
+ if (!err) {
+ /*
+ * For PCI bus with no UBOX device, find the next bus
+ * that has UBOX device and use its mapping.
+ */
+ i = -1;
+ for (bus = 255; bus >= 0; bus--) {
+ if (uncore_pcibus_to_physid[bus] >= 0)
+ i = uncore_pcibus_to_physid[bus];
+ else
+ uncore_pcibus_to_physid[bus] = i;
+ }
+ }
+
+ if (ubox_dev)
+ pci_dev_put(ubox_dev);
+
+ return err ? pcibios_err_to_errno(err) : 0;
+}
+
+int snbep_uncore_pci_init(void)
+{
+ int ret = snbep_pci2phy_map_init(0x3ce0);
+ if (ret)
+ return ret;
+ uncore_pci_uncores = snbep_pci_uncores;
+ uncore_pci_driver = &snbep_uncore_pci_driver;
+ return 0;
+}
+/* end of Sandy Bridge-EP uncore support */
+
+/* IvyTown uncore support */
+static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+ unsigned msr = uncore_msr_box_ctl(box);
+ if (msr)
+ wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
+}
+
+static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+
+ pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
+}
+
+#define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
+ .init_box = ivbep_uncore_msr_init_box, \
+ .disable_box = snbep_uncore_msr_disable_box, \
+ .enable_box = snbep_uncore_msr_enable_box, \
+ .disable_event = snbep_uncore_msr_disable_event, \
+ .enable_event = snbep_uncore_msr_enable_event, \
+ .read_counter = uncore_msr_read_counter
+
+static struct intel_uncore_ops ivbep_uncore_msr_ops = {
+ IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
+};
+
+static struct intel_uncore_ops ivbep_uncore_pci_ops = {
+ .init_box = ivbep_uncore_pci_init_box,
+ .disable_box = snbep_uncore_pci_disable_box,
+ .enable_box = snbep_uncore_pci_enable_box,
+ .disable_event = snbep_uncore_pci_disable_event,
+ .enable_event = snbep_uncore_pci_enable_event,
+ .read_counter = snbep_uncore_pci_read_counter,
+};
+
+#define IVBEP_UNCORE_PCI_COMMON_INIT() \
+ .perf_ctr = SNBEP_PCI_PMON_CTR0, \
+ .event_ctl = SNBEP_PCI_PMON_CTL0, \
+ .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
+ .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
+ .ops = &ivbep_uncore_pci_ops, \
+ .format_group = &ivbep_uncore_format_group
+
+static struct attribute *ivbep_uncore_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ NULL,
+};
+
+static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh5.attr,
+ NULL,
+};
+
+static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_tid_en.attr,
+ &format_attr_thresh8.attr,
+ &format_attr_filter_tid.attr,
+ &format_attr_filter_link.attr,
+ &format_attr_filter_state2.attr,
+ &format_attr_filter_nid2.attr,
+ &format_attr_filter_opc2.attr,
+ &format_attr_filter_nc.attr,
+ &format_attr_filter_c6.attr,
+ &format_attr_filter_isoc.attr,
+ NULL,
+};
+
+static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
+ &format_attr_event_ext.attr,
+ &format_attr_occ_sel.attr,
+ &format_attr_edge.attr,
+ &format_attr_thresh5.attr,
+ &format_attr_occ_invert.attr,
+ &format_attr_occ_edge.attr,
+ &format_attr_filter_band0.attr,
+ &format_attr_filter_band1.attr,
+ &format_attr_filter_band2.attr,
+ &format_attr_filter_band3.attr,
+ NULL,
+};
+
+static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
+ &format_attr_event_ext.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_thresh8.attr,
+ &format_attr_match_rds.attr,
+ &format_attr_match_rnid30.attr,
+ &format_attr_match_rnid4.attr,
+ &format_attr_match_dnid.attr,
+ &format_attr_match_mc.attr,
+ &format_attr_match_opc.attr,
+ &format_attr_match_vnw.attr,
+ &format_attr_match0.attr,
+ &format_attr_match1.attr,
+ &format_attr_mask_rds.attr,
+ &format_attr_mask_rnid30.attr,
+ &format_attr_mask_rnid4.attr,
+ &format_attr_mask_dnid.attr,
+ &format_attr_mask_mc.attr,
+ &format_attr_mask_opc.attr,
+ &format_attr_mask_vnw.attr,
+ &format_attr_mask0.attr,
+ &format_attr_mask1.attr,
+ NULL,
+};
+
+static struct attribute_group ivbep_uncore_format_group = {
+ .name = "format",
+ .attrs = ivbep_uncore_formats_attr,
+};
+
+static struct attribute_group ivbep_uncore_ubox_format_group = {
+ .name = "format",
+ .attrs = ivbep_uncore_ubox_formats_attr,
+};
+
+static struct attribute_group ivbep_uncore_cbox_format_group = {
+ .name = "format",
+ .attrs = ivbep_uncore_cbox_formats_attr,
+};
+
+static struct attribute_group ivbep_uncore_pcu_format_group = {
+ .name = "format",
+ .attrs = ivbep_uncore_pcu_formats_attr,
+};
+
+static struct attribute_group ivbep_uncore_qpi_format_group = {
+ .name = "format",
+ .attrs = ivbep_uncore_qpi_formats_attr,
+};
+
+static struct intel_uncore_type ivbep_uncore_ubox = {
+ .name = "ubox",
+ .num_counters = 2,
+ .num_boxes = 1,
+ .perf_ctr_bits = 44,
+ .fixed_ctr_bits = 48,
+ .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
+ .event_ctl = SNBEP_U_MSR_PMON_CTL0,
+ .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
+ .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
+ .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
+ .ops = &ivbep_uncore_msr_ops,
+ .format_group = &ivbep_uncore_ubox_format_group,
+};
+
+static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
+ SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
+ SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
+ EVENT_EXTRA_END
+};
+
+static u64 ivbep_cbox_filter_mask(int fields)
+{
+ u64 mask = 0;
+
+ if (fields & 0x1)
+ mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
+ if (fields & 0x2)
+ mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
+ if (fields & 0x4)
+ mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
+ if (fields & 0x8)
+ mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
+ if (fields & 0x10) {
+ mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
+ mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
+ mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
+ mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
+ }
+
+ return mask;
+}
+
+static struct event_constraint *
+ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
+}
+
+static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct extra_reg *er;
+ int idx = 0;
+
+ for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
+ if (er->event != (event->hw.config & er->config_mask))
+ continue;
+ idx |= er->idx;
+ }
+
+ if (idx) {
+ reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
+ SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
+ reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
+ reg1->idx = idx;
+ }
+ return 0;
+}
+
+static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+ if (reg1->idx != EXTRA_REG_NONE) {
+ u64 filter = uncore_shared_reg_config(box, 0);
+ wrmsrl(reg1->reg, filter & 0xffffffff);
+ wrmsrl(reg1->reg + 6, filter >> 32);
+ }
+
+ wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+}
+
+static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
+ .init_box = ivbep_uncore_msr_init_box,
+ .disable_box = snbep_uncore_msr_disable_box,
+ .enable_box = snbep_uncore_msr_enable_box,
+ .disable_event = snbep_uncore_msr_disable_event,
+ .enable_event = ivbep_cbox_enable_event,
+ .read_counter = uncore_msr_read_counter,
+ .hw_config = ivbep_cbox_hw_config,
+ .get_constraint = ivbep_cbox_get_constraint,
+ .put_constraint = snbep_cbox_put_constraint,
+};
+
+static struct intel_uncore_type ivbep_uncore_cbox = {
+ .name = "cbox",
+ .num_counters = 4,
+ .num_boxes = 15,
+ .perf_ctr_bits = 44,
+ .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
+ .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
+ .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
+ .msr_offset = SNBEP_CBO_MSR_OFFSET,
+ .num_shared_regs = 1,
+ .constraints = snbep_uncore_cbox_constraints,
+ .ops = &ivbep_uncore_cbox_ops,
+ .format_group = &ivbep_uncore_cbox_format_group,
+};
+
+static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
+ IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
+ .hw_config = snbep_pcu_hw_config,
+ .get_constraint = snbep_pcu_get_constraint,
+ .put_constraint = snbep_pcu_put_constraint,
+};
+
+static struct intel_uncore_type ivbep_uncore_pcu = {
+ .name = "pcu",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
+ .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
+ .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
+ .num_shared_regs = 1,
+ .ops = &ivbep_uncore_pcu_ops,
+ .format_group = &ivbep_uncore_pcu_format_group,
+};
+
+static struct intel_uncore_type *ivbep_msr_uncores[] = {
+ &ivbep_uncore_ubox,
+ &ivbep_uncore_cbox,
+ &ivbep_uncore_pcu,
+ NULL,
+};
+
+void ivbep_uncore_cpu_init(void)
+{
+ if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
+ ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+ uncore_msr_uncores = ivbep_msr_uncores;
+}
+
+static struct intel_uncore_type ivbep_uncore_ha = {
+ .name = "ha",
+ .num_counters = 4,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ IVBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type ivbep_uncore_imc = {
+ .name = "imc",
+ .num_counters = 4,
+ .num_boxes = 8,
+ .perf_ctr_bits = 48,
+ .fixed_ctr_bits = 48,
+ .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
+ .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
+ .event_descs = snbep_uncore_imc_events,
+ IVBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+/* registers in IRP boxes are not properly aligned */
+static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
+static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
+
+static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+
+ pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
+ hwc->config | SNBEP_PMON_CTL_EN);
+}
+
+static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+
+ pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
+}
+
+static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+ u64 count = 0;
+
+ pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
+ pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
+
+ return count;
+}
+
+static struct intel_uncore_ops ivbep_uncore_irp_ops = {
+ .init_box = ivbep_uncore_pci_init_box,
+ .disable_box = snbep_uncore_pci_disable_box,
+ .enable_box = snbep_uncore_pci_enable_box,
+ .disable_event = ivbep_uncore_irp_disable_event,
+ .enable_event = ivbep_uncore_irp_enable_event,
+ .read_counter = ivbep_uncore_irp_read_counter,
+};
+
+static struct intel_uncore_type ivbep_uncore_irp = {
+ .name = "irp",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
+ .ops = &ivbep_uncore_irp_ops,
+ .format_group = &ivbep_uncore_format_group,
+};
+
+static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
+ .init_box = ivbep_uncore_pci_init_box,
+ .disable_box = snbep_uncore_pci_disable_box,
+ .enable_box = snbep_uncore_pci_enable_box,
+ .disable_event = snbep_uncore_pci_disable_event,
+ .enable_event = snbep_qpi_enable_event,
+ .read_counter = snbep_uncore_pci_read_counter,
+ .hw_config = snbep_qpi_hw_config,
+ .get_constraint = uncore_get_constraint,
+ .put_constraint = uncore_put_constraint,
+};
+
+static struct intel_uncore_type ivbep_uncore_qpi = {
+ .name = "qpi",
+ .num_counters = 4,
+ .num_boxes = 3,
+ .perf_ctr_bits = 48,
+ .perf_ctr = SNBEP_PCI_PMON_CTR0,
+ .event_ctl = SNBEP_PCI_PMON_CTL0,
+ .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
+ .num_shared_regs = 1,
+ .ops = &ivbep_uncore_qpi_ops,
+ .format_group = &ivbep_uncore_qpi_format_group,
+};
+
+static struct intel_uncore_type ivbep_uncore_r2pcie = {
+ .name = "r2pcie",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 44,
+ .constraints = snbep_uncore_r2pcie_constraints,
+ IVBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type ivbep_uncore_r3qpi = {
+ .name = "r3qpi",
+ .num_counters = 3,
+ .num_boxes = 2,
+ .perf_ctr_bits = 44,
+ .constraints = snbep_uncore_r3qpi_constraints,
+ IVBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+enum {
+ IVBEP_PCI_UNCORE_HA,
+ IVBEP_PCI_UNCORE_IMC,
+ IVBEP_PCI_UNCORE_IRP,
+ IVBEP_PCI_UNCORE_QPI,
+ IVBEP_PCI_UNCORE_R2PCIE,
+ IVBEP_PCI_UNCORE_R3QPI,
+};
+
+static struct intel_uncore_type *ivbep_pci_uncores[] = {
+ [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
+ [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
+ [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
+ [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
+ [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
+ [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
+ NULL,
+};
+
+static const struct pci_device_id ivbep_uncore_pci_ids[] = {
+ { /* Home Agent 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
+ },
+ { /* Home Agent 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
+ },
+ { /* MC0 Channel 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
+ },
+ { /* MC0 Channel 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
+ },
+ { /* MC0 Channel 3 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
+ },
+ { /* MC0 Channel 4 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
+ },
+ { /* MC1 Channel 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
+ },
+ { /* MC1 Channel 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
+ },
+ { /* MC1 Channel 3 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
+ },
+ { /* MC1 Channel 4 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
+ },
+ { /* IRP */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
+ },
+ { /* QPI0 Port 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
+ },
+ { /* QPI0 Port 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
+ },
+ { /* QPI1 Port 2 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
+ },
+ { /* R2PCIe */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
+ },
+ { /* R3QPI0 Link 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
+ },
+ { /* R3QPI0 Link 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
+ },
+ { /* R3QPI1 Link 2 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
+ .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
+ },
+ { /* QPI Port 0 filter */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ SNBEP_PCI_QPI_PORT0_FILTER),
+ },
+ { /* QPI Port 0 filter */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ SNBEP_PCI_QPI_PORT1_FILTER),
+ },
+ { /* end: all zeroes */ }
+};
+
+static struct pci_driver ivbep_uncore_pci_driver = {
+ .name = "ivbep_uncore",
+ .id_table = ivbep_uncore_pci_ids,
+};
+
+int ivbep_uncore_pci_init(void)
+{
+ int ret = snbep_pci2phy_map_init(0x0e1e);
+ if (ret)
+ return ret;
+ uncore_pci_uncores = ivbep_pci_uncores;
+ uncore_pci_driver = &ivbep_uncore_pci_driver;
+ return 0;
+}
+/* end of IvyTown uncore support */
+
+/* Haswell-EP uncore support */
+static struct attribute *hswep_uncore_ubox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh5.attr,
+ &format_attr_filter_tid2.attr,
+ &format_attr_filter_cid.attr,
+ NULL,
+};
+
+static struct attribute_group hswep_uncore_ubox_format_group = {
+ .name = "format",
+ .attrs = hswep_uncore_ubox_formats_attr,
+};
+
+static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ reg1->reg = HSWEP_U_MSR_PMON_FILTER;
+ reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
+ reg1->idx = 0;
+ return 0;
+}
+
+static struct intel_uncore_ops hswep_uncore_ubox_ops = {
+ SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
+ .hw_config = hswep_ubox_hw_config,
+ .get_constraint = uncore_get_constraint,
+ .put_constraint = uncore_put_constraint,
+};
+
+static struct intel_uncore_type hswep_uncore_ubox = {
+ .name = "ubox",
+ .num_counters = 2,
+ .num_boxes = 1,
+ .perf_ctr_bits = 44,
+ .fixed_ctr_bits = 48,
+ .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
+ .event_ctl = HSWEP_U_MSR_PMON_CTL0,
+ .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
+ .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
+ .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
+ .num_shared_regs = 1,
+ .ops = &hswep_uncore_ubox_ops,
+ .format_group = &hswep_uncore_ubox_format_group,
+};
+
+static struct attribute *hswep_uncore_cbox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_tid_en.attr,
+ &format_attr_thresh8.attr,
+ &format_attr_filter_tid3.attr,
+ &format_attr_filter_link2.attr,
+ &format_attr_filter_state3.attr,
+ &format_attr_filter_nid2.attr,
+ &format_attr_filter_opc2.attr,
+ &format_attr_filter_nc.attr,
+ &format_attr_filter_c6.attr,
+ &format_attr_filter_isoc.attr,
+ NULL,
+};
+
+static struct attribute_group hswep_uncore_cbox_format_group = {
+ .name = "format",
+ .attrs = hswep_uncore_cbox_formats_attr,
+};
+
+static struct event_constraint hswep_uncore_cbox_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
+ EVENT_CONSTRAINT_END
+};
+
+static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
+ SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
+ SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
+ EVENT_EXTRA_END
+};
+
+static u64 hswep_cbox_filter_mask(int fields)
+{
+ u64 mask = 0;
+ if (fields & 0x1)
+ mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
+ if (fields & 0x2)
+ mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
+ if (fields & 0x4)
+ mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
+ if (fields & 0x8)
+ mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
+ if (fields & 0x10) {
+ mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
+ mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
+ mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
+ mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
+ }
+ return mask;
+}
+
+static struct event_constraint *
+hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
+}
+
+static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct extra_reg *er;
+ int idx = 0;
+
+ for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
+ if (er->event != (event->hw.config & er->config_mask))
+ continue;
+ idx |= er->idx;
+ }
+
+ if (idx) {
+ reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
+ HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
+ reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
+ reg1->idx = idx;
+ }
+ return 0;
+}
+
+static void hswep_cbox_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+ if (reg1->idx != EXTRA_REG_NONE) {
+ u64 filter = uncore_shared_reg_config(box, 0);
+ wrmsrl(reg1->reg, filter & 0xffffffff);
+ wrmsrl(reg1->reg + 1, filter >> 32);
+ }
+
+ wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+}
+
+static struct intel_uncore_ops hswep_uncore_cbox_ops = {
+ .init_box = snbep_uncore_msr_init_box,
+ .disable_box = snbep_uncore_msr_disable_box,
+ .enable_box = snbep_uncore_msr_enable_box,
+ .disable_event = snbep_uncore_msr_disable_event,
+ .enable_event = hswep_cbox_enable_event,
+ .read_counter = uncore_msr_read_counter,
+ .hw_config = hswep_cbox_hw_config,
+ .get_constraint = hswep_cbox_get_constraint,
+ .put_constraint = snbep_cbox_put_constraint,
+};
+
+static struct intel_uncore_type hswep_uncore_cbox = {
+ .name = "cbox",
+ .num_counters = 4,
+ .num_boxes = 18,
+ .perf_ctr_bits = 44,
+ .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
+ .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
+ .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
+ .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
+ .msr_offset = HSWEP_CBO_MSR_OFFSET,
+ .num_shared_regs = 1,
+ .constraints = hswep_uncore_cbox_constraints,
+ .ops = &hswep_uncore_cbox_ops,
+ .format_group = &hswep_uncore_cbox_format_group,
+};
+
+static struct attribute *hswep_uncore_sbox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_tid_en.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ NULL,
+};
+
+static struct attribute_group hswep_uncore_sbox_format_group = {
+ .name = "format",
+ .attrs = hswep_uncore_sbox_formats_attr,
+};
+
+static struct intel_uncore_type hswep_uncore_sbox = {
+ .name = "sbox",
+ .num_counters = 4,
+ .num_boxes = 4,
+ .perf_ctr_bits = 44,
+ .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
+ .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
+ .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
+ .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
+ .msr_offset = HSWEP_SBOX_MSR_OFFSET,
+ .ops = &snbep_uncore_msr_ops,
+ .format_group = &hswep_uncore_sbox_format_group,
+};
+
+static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
+
+ if (ev_sel >= 0xb && ev_sel <= 0xe) {
+ reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
+ reg1->idx = ev_sel - 0xb;
+ reg1->config = event->attr.config1 & (0xff << reg1->idx);
+ }
+ return 0;
+}
+
+static struct intel_uncore_ops hswep_uncore_pcu_ops = {
+ SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
+ .hw_config = hswep_pcu_hw_config,
+ .get_constraint = snbep_pcu_get_constraint,
+ .put_constraint = snbep_pcu_put_constraint,
+};
+
+static struct intel_uncore_type hswep_uncore_pcu = {
+ .name = "pcu",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
+ .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
+ .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
+ .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
+ .num_shared_regs = 1,
+ .ops = &hswep_uncore_pcu_ops,
+ .format_group = &snbep_uncore_pcu_format_group,
+};
+
+static struct intel_uncore_type *hswep_msr_uncores[] = {
+ &hswep_uncore_ubox,
+ &hswep_uncore_cbox,
+ &hswep_uncore_sbox,
+ &hswep_uncore_pcu,
+ NULL,
+};
+
+void hswep_uncore_cpu_init(void)
+{
+ if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
+ hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+ uncore_msr_uncores = hswep_msr_uncores;
+}
+
+static struct intel_uncore_type hswep_uncore_ha = {
+ .name = "ha",
+ .num_counters = 5,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct uncore_event_desc hswep_uncore_imc_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
+ { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_type hswep_uncore_imc = {
+ .name = "imc",
+ .num_counters = 5,
+ .num_boxes = 8,
+ .perf_ctr_bits = 48,
+ .fixed_ctr_bits = 48,
+ .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
+ .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
+ .event_descs = hswep_uncore_imc_events,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_ops hswep_uncore_irp_ops = {
+ .init_box = snbep_uncore_pci_init_box,
+ .disable_box = snbep_uncore_pci_disable_box,
+ .enable_box = snbep_uncore_pci_enable_box,
+ .disable_event = ivbep_uncore_irp_disable_event,
+ .enable_event = ivbep_uncore_irp_enable_event,
+ .read_counter = ivbep_uncore_irp_read_counter,
+};
+
+static struct intel_uncore_type hswep_uncore_irp = {
+ .name = "irp",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
+ .ops = &hswep_uncore_irp_ops,
+ .format_group = &snbep_uncore_format_group,
+};
+
+static struct intel_uncore_type hswep_uncore_qpi = {
+ .name = "qpi",
+ .num_counters = 5,
+ .num_boxes = 3,
+ .perf_ctr_bits = 48,
+ .perf_ctr = SNBEP_PCI_PMON_CTR0,
+ .event_ctl = SNBEP_PCI_PMON_CTL0,
+ .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
+ .num_shared_regs = 1,
+ .ops = &snbep_uncore_qpi_ops,
+ .format_group = &snbep_uncore_qpi_format_group,
+};
+
+static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
+ EVENT_CONSTRAINT_END
+};
+
+static struct intel_uncore_type hswep_uncore_r2pcie = {
+ .name = "r2pcie",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .constraints = hswep_uncore_r2pcie_constraints,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
+ UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
+ UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
+ UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
+ UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
+ UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
+ EVENT_CONSTRAINT_END
+};
+
+static struct intel_uncore_type hswep_uncore_r3qpi = {
+ .name = "r3qpi",
+ .num_counters = 4,
+ .num_boxes = 3,
+ .perf_ctr_bits = 44,
+ .constraints = hswep_uncore_r3qpi_constraints,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+enum {
+ HSWEP_PCI_UNCORE_HA,
+ HSWEP_PCI_UNCORE_IMC,
+ HSWEP_PCI_UNCORE_IRP,
+ HSWEP_PCI_UNCORE_QPI,
+ HSWEP_PCI_UNCORE_R2PCIE,
+ HSWEP_PCI_UNCORE_R3QPI,
+};
+
+static struct intel_uncore_type *hswep_pci_uncores[] = {
+ [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
+ [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
+ [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
+ [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
+ [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
+ [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
+ NULL,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = {
+ { /* Home Agent 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
+ },
+ { /* Home Agent 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
+ },
+ { /* MC0 Channel 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
+ },
+ { /* MC0 Channel 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
+ },
+ { /* MC0 Channel 2 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
+ },
+ { /* MC0 Channel 3 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
+ },
+ { /* MC1 Channel 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
+ },
+ { /* MC1 Channel 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
+ },
+ { /* MC1 Channel 2 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
+ },
+ { /* MC1 Channel 3 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
+ },
+ { /* IRP */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
+ },
+ { /* QPI0 Port 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
+ },
+ { /* QPI0 Port 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
+ },
+ { /* QPI1 Port 2 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
+ },
+ { /* R2PCIe */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
+ },
+ { /* R3QPI0 Link 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
+ },
+ { /* R3QPI0 Link 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
+ },
+ { /* R3QPI1 Link 2 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
+ .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
+ },
+ { /* QPI Port 0 filter */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ SNBEP_PCI_QPI_PORT0_FILTER),
+ },
+ { /* QPI Port 1 filter */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ SNBEP_PCI_QPI_PORT1_FILTER),
+ },
+ { /* end: all zeroes */ }
+};
+
+static struct pci_driver hswep_uncore_pci_driver = {
+ .name = "hswep_uncore",
+ .id_table = hswep_uncore_pci_ids,
+};
+
+int hswep_uncore_pci_init(void)
+{
+ int ret = snbep_pci2phy_map_init(0x2f1e);
+ if (ret)
+ return ret;
+ uncore_pci_uncores = hswep_pci_uncores;
+ uncore_pci_driver = &hswep_uncore_pci_driver;
+ return 0;
+}
+/* end of Haswell-EP uncore support */
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 988c00a1f60d..49f886481615 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -682,15 +682,14 @@ void __init parse_e820_ext(u64 phys_addr, u32 data_len)
* hibernation (32 bit) or software suspend and suspend to RAM (64 bit).
*
* This function requires the e820 map to be sorted and without any
- * overlapping entries and assumes the first e820 area to be RAM.
+ * overlapping entries.
*/
void __init e820_mark_nosave_regions(unsigned long limit_pfn)
{
int i;
- unsigned long pfn;
+ unsigned long pfn = 0;
- pfn = PFN_DOWN(e820.map[0].addr + e820.map[0].size);
- for (i = 1; i < e820.nr_map; i++) {
+ for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i];
if (pfn < PFN_UP(ei->addr))
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 2fac1343a90b..df088bb03fb3 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -404,8 +404,8 @@ GLOBAL(system_call_after_swapgs)
* and short:
*/
ENABLE_INTERRUPTS(CLBR_NONE)
- SAVE_ARGS 8,0
- movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
+ SAVE_ARGS 8, 0, rax_enosys=1
+ movq_cfi rax,(ORIG_RAX-ARGOFFSET)
movq %rcx,RIP-ARGOFFSET(%rsp)
CFI_REL_OFFSET rip,RIP-ARGOFFSET
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
@@ -417,7 +417,7 @@ system_call_fastpath:
andl $__SYSCALL_MASK,%eax
cmpl $__NR_syscall_max,%eax
#endif
- ja badsys
+ ja ret_from_sys_call /* and return regs->ax */
movq %r10,%rcx
call *sys_call_table(,%rax,8) # XXX: rip relative
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -476,28 +476,8 @@ sysret_signal:
FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
jmp int_check_syscall_exit_work
-badsys:
- movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
- jmp ret_from_sys_call
-
#ifdef CONFIG_AUDITSYSCALL
/*
- * Fast path for syscall audit without full syscall trace.
- * We just call __audit_syscall_entry() directly, and then
- * jump back to the normal fast path.
- */
-auditsys:
- movq %r10,%r9 /* 6th arg: 4th syscall arg */
- movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
- movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
- movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
- movq %rax,%rsi /* 2nd arg: syscall number */
- movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
- call __audit_syscall_entry
- LOAD_ARGS 0 /* reload call-clobbered registers */
- jmp system_call_fastpath
-
- /*
* Return fast path for syscall audit. Call __audit_syscall_exit()
* directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
* masked off.
@@ -514,18 +494,25 @@ sysret_audit:
/* Do syscall tracing */
tracesys:
-#ifdef CONFIG_AUDITSYSCALL
- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
- jz auditsys
-#endif
+ leaq -REST_SKIP(%rsp), %rdi
+ movq $AUDIT_ARCH_X86_64, %rsi
+ call syscall_trace_enter_phase1
+ test %rax, %rax
+ jnz tracesys_phase2 /* if needed, run the slow path */
+ LOAD_ARGS 0 /* else restore clobbered regs */
+ jmp system_call_fastpath /* and return to the fast path */
+
+tracesys_phase2:
SAVE_REST
- movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
FIXUP_TOP_OF_STACK %rdi
- movq %rsp,%rdi
- call syscall_trace_enter
+ movq %rsp, %rdi
+ movq $AUDIT_ARCH_X86_64, %rsi
+ movq %rax,%rdx
+ call syscall_trace_enter_phase2
+
/*
* Reload arg registers from stack in case ptrace changed them.
- * We don't reload %rax because syscall_trace_enter() returned
+ * We don't reload %rax because syscall_trace_entry_phase2() returned
* the value it wants us to use in the table lookup.
*/
LOAD_ARGS ARGOFFSET, 1
@@ -536,7 +523,7 @@ tracesys:
andl $__SYSCALL_MASK,%eax
cmpl $__NR_syscall_max,%eax
#endif
- ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
+ ja int_ret_from_sys_call /* RAX(%rsp) is already set */
movq %r10,%rcx /* fixup for C */
call *sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
diff --git a/arch/x86/kernel/iosf_mbi.c b/arch/x86/kernel/iosf_mbi.c
index 9030e83db6ee..82f8d02f0df2 100644
--- a/arch/x86/kernel/iosf_mbi.c
+++ b/arch/x86/kernel/iosf_mbi.c
@@ -22,10 +22,13 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/capability.h>
#include <asm/iosf_mbi.h>
#define PCI_DEVICE_ID_BAYTRAIL 0x0F00
+#define PCI_DEVICE_ID_BRASWELL 0x2280
#define PCI_DEVICE_ID_QUARK_X1000 0x0958
static DEFINE_SPINLOCK(iosf_mbi_lock);
@@ -187,6 +190,89 @@ bool iosf_mbi_available(void)
}
EXPORT_SYMBOL(iosf_mbi_available);
+#ifdef CONFIG_IOSF_MBI_DEBUG
+static u32 dbg_mdr;
+static u32 dbg_mcr;
+static u32 dbg_mcrx;
+
+static int mcr_get(void *data, u64 *val)
+{
+ *val = *(u32 *)data;
+ return 0;
+}
+
+static int mcr_set(void *data, u64 val)
+{
+ u8 command = ((u32)val & 0xFF000000) >> 24,
+ port = ((u32)val & 0x00FF0000) >> 16,
+ offset = ((u32)val & 0x0000FF00) >> 8;
+ int err;
+
+ *(u32 *)data = val;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EACCES;
+
+ if (command & 1u)
+ err = iosf_mbi_write(port,
+ command,
+ dbg_mcrx | offset,
+ dbg_mdr);
+ else
+ err = iosf_mbi_read(port,
+ command,
+ dbg_mcrx | offset,
+ &dbg_mdr);
+
+ return err;
+}
+DEFINE_SIMPLE_ATTRIBUTE(iosf_mcr_fops, mcr_get, mcr_set , "%llx\n");
+
+static struct dentry *iosf_dbg;
+
+static void iosf_sideband_debug_init(void)
+{
+ struct dentry *d;
+
+ iosf_dbg = debugfs_create_dir("iosf_sb", NULL);
+ if (IS_ERR_OR_NULL(iosf_dbg))
+ return;
+
+ /* mdr */
+ d = debugfs_create_x32("mdr", 0660, iosf_dbg, &dbg_mdr);
+ if (IS_ERR_OR_NULL(d))
+ goto cleanup;
+
+ /* mcrx */
+ debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx);
+ if (IS_ERR_OR_NULL(d))
+ goto cleanup;
+
+ /* mcr - initiates mailbox tranaction */
+ debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops);
+ if (IS_ERR_OR_NULL(d))
+ goto cleanup;
+
+ return;
+
+cleanup:
+ debugfs_remove_recursive(d);
+}
+
+static void iosf_debugfs_init(void)
+{
+ iosf_sideband_debug_init();
+}
+
+static void iosf_debugfs_remove(void)
+{
+ debugfs_remove_recursive(iosf_dbg);
+}
+#else
+static inline void iosf_debugfs_init(void) { }
+static inline void iosf_debugfs_remove(void) { }
+#endif /* CONFIG_IOSF_MBI_DEBUG */
+
static int iosf_mbi_probe(struct pci_dev *pdev,
const struct pci_device_id *unused)
{
@@ -204,6 +290,7 @@ static int iosf_mbi_probe(struct pci_dev *pdev,
static const struct pci_device_id iosf_mbi_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BAYTRAIL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRASWELL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_QUARK_X1000) },
{ 0, },
};
@@ -217,11 +304,15 @@ static struct pci_driver iosf_mbi_pci_driver = {
static int __init iosf_mbi_init(void)
{
+ iosf_debugfs_init();
+
return pci_register_driver(&iosf_mbi_pci_driver);
}
static void __exit iosf_mbi_exit(void)
{
+ iosf_debugfs_remove();
+
pci_unregister_driver(&iosf_mbi_pci_driver);
if (mbi_pdev) {
pci_dev_put(mbi_pdev);
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 1667b1de8d5d..72e8e310258d 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -247,7 +247,8 @@ void machine_kexec(struct kimage *image)
/* now call it */
image->start = relocate_kernel_ptr((unsigned long)image->head,
(unsigned long)page_list,
- image->start, cpu_has_pae,
+ image->start,
+ boot_cpu_has(X86_FEATURE_PAE),
image->preserve_context);
#ifdef CONFIG_KEXEC_JUMP
diff --git a/arch/x86/kernel/pmc_atom.c b/arch/x86/kernel/pmc_atom.c
index 0c424a67985d..0ee5025e0fa4 100644
--- a/arch/x86/kernel/pmc_atom.c
+++ b/arch/x86/kernel/pmc_atom.c
@@ -235,6 +235,11 @@ err:
pmc_dbgfs_unregister(pmc);
return -ENODEV;
}
+#else
+static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev)
+{
+ return 0;
+}
#endif /* CONFIG_DEBUG_FS */
static int pmc_setup_dev(struct pci_dev *pdev)
@@ -262,14 +267,12 @@ static int pmc_setup_dev(struct pci_dev *pdev)
/* PMC hardware registers setup */
pmc_hw_reg_setup(pmc);
-#ifdef CONFIG_DEBUG_FS
ret = pmc_dbgfs_register(pmc, pdev);
if (ret) {
iounmap(pmc->regmap);
- return ret;
}
-#endif /* CONFIG_DEBUG_FS */
- return 0;
+
+ return ret;
}
/*
diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
deleted file mode 100644
index ca7f0d58a87d..000000000000
--- a/arch/x86/kernel/preempt.S
+++ /dev/null
@@ -1,25 +0,0 @@
-
-#include <linux/linkage.h>
-#include <asm/dwarf2.h>
-#include <asm/asm.h>
-#include <asm/calling.h>
-
-ENTRY(___preempt_schedule)
- CFI_STARTPROC
- SAVE_ALL
- call preempt_schedule
- RESTORE_ALL
- ret
- CFI_ENDPROC
-
-#ifdef CONFIG_CONTEXT_TRACKING
-
-ENTRY(___preempt_schedule_context)
- CFI_STARTPROC
- SAVE_ALL
- call preempt_schedule_context
- RESTORE_ALL
- ret
- CFI_ENDPROC
-
-#endif
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index f804dc935d2a..e127ddaa2d5a 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -64,14 +64,16 @@ EXPORT_SYMBOL_GPL(task_xstate_cachep);
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- int ret;
-
*dst = *src;
- if (fpu_allocated(&src->thread.fpu)) {
- memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
- ret = fpu_alloc(&dst->thread.fpu);
- if (ret)
- return ret;
+
+ dst->thread.fpu_counter = 0;
+ dst->thread.fpu.has_fpu = 0;
+ dst->thread.fpu.last_cpu = ~0;
+ dst->thread.fpu.state = NULL;
+ if (tsk_used_math(src)) {
+ int err = fpu_alloc(&dst->thread.fpu);
+ if (err)
+ return err;
fpu_copy(dst, src);
}
return 0;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 7bc86bbe7485..8f3ebfe710d0 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -138,6 +138,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
p->thread.sp = (unsigned long) childregs;
p->thread.sp0 = (unsigned long) (childregs+1);
+ memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
@@ -152,9 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
childregs->orig_ax = -1;
childregs->cs = __KERNEL_CS | get_kernel_rpl();
childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
- p->thread.fpu_counter = 0;
p->thread.io_bitmap_ptr = NULL;
- memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
return 0;
}
*childregs = *current_pt_regs();
@@ -165,13 +164,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
p->thread.ip = (unsigned long) ret_from_fork;
task_user_gs(p) = get_user_gs(current_pt_regs());
- p->thread.fpu_counter = 0;
p->thread.io_bitmap_ptr = NULL;
tsk = current;
err = -ENOMEM;
- memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
-
if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
IO_BITMAP_BYTES, GFP_KERNEL);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ca5b02d405c3..3ed4a68d4013 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -163,7 +163,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
p->thread.sp = (unsigned long) childregs;
p->thread.usersp = me->thread.usersp;
set_tsk_thread_flag(p, TIF_FORK);
- p->thread.fpu_counter = 0;
p->thread.io_bitmap_ptr = NULL;
savesegment(gs, p->thread.gsindex);
@@ -193,8 +192,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
childregs->sp = sp;
err = -ENOMEM;
- memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
-
if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
IO_BITMAP_BYTES, GFP_KERNEL);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 678c0ada3b3c..29576c244699 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1441,24 +1441,126 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
force_sig_info(SIGTRAP, &info, tsk);
}
-
-#ifdef CONFIG_X86_32
-# define IS_IA32 1
-#elif defined CONFIG_IA32_EMULATION
-# define IS_IA32 is_compat_task()
-#else
-# define IS_IA32 0
+static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
+{
+#ifdef CONFIG_X86_64
+ if (arch == AUDIT_ARCH_X86_64) {
+ audit_syscall_entry(arch, regs->orig_ax, regs->di,
+ regs->si, regs->dx, regs->r10);
+ } else
#endif
+ {
+ audit_syscall_entry(arch, regs->orig_ax, regs->bx,
+ regs->cx, regs->dx, regs->si);
+ }
+}
/*
- * We must return the syscall number to actually look up in the table.
- * This can be -1L to skip running any syscall at all.
+ * We can return 0 to resume the syscall or anything else to go to phase
+ * 2. If we resume the syscall, we need to put something appropriate in
+ * regs->orig_ax.
+ *
+ * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax
+ * are fully functional.
+ *
+ * For phase 2's benefit, our return value is:
+ * 0: resume the syscall
+ * 1: go to phase 2; no seccomp phase 2 needed
+ * anything else: go to phase 2; pass return value to seccomp
*/
-long syscall_trace_enter(struct pt_regs *regs)
+unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
+{
+ unsigned long ret = 0;
+ u32 work;
+
+ BUG_ON(regs != task_pt_regs(current));
+
+ work = ACCESS_ONCE(current_thread_info()->flags) &
+ _TIF_WORK_SYSCALL_ENTRY;
+
+ /*
+ * If TIF_NOHZ is set, we are required to call user_exit() before
+ * doing anything that could touch RCU.
+ */
+ if (work & _TIF_NOHZ) {
+ user_exit();
+ work &= ~TIF_NOHZ;
+ }
+
+#ifdef CONFIG_SECCOMP
+ /*
+ * Do seccomp first -- it should minimize exposure of other
+ * code, and keeping seccomp fast is probably more valuable
+ * than the rest of this.
+ */
+ if (work & _TIF_SECCOMP) {
+ struct seccomp_data sd;
+
+ sd.arch = arch;
+ sd.nr = regs->orig_ax;
+ sd.instruction_pointer = regs->ip;
+#ifdef CONFIG_X86_64
+ if (arch == AUDIT_ARCH_X86_64) {
+ sd.args[0] = regs->di;
+ sd.args[1] = regs->si;
+ sd.args[2] = regs->dx;
+ sd.args[3] = regs->r10;
+ sd.args[4] = regs->r8;
+ sd.args[5] = regs->r9;
+ } else
+#endif
+ {
+ sd.args[0] = regs->bx;
+ sd.args[1] = regs->cx;
+ sd.args[2] = regs->dx;
+ sd.args[3] = regs->si;
+ sd.args[4] = regs->di;
+ sd.args[5] = regs->bp;
+ }
+
+ BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0);
+ BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1);
+
+ ret = seccomp_phase1(&sd);
+ if (ret == SECCOMP_PHASE1_SKIP) {
+ regs->orig_ax = -1;
+ ret = 0;
+ } else if (ret != SECCOMP_PHASE1_OK) {
+ return ret; /* Go directly to phase 2 */
+ }
+
+ work &= ~_TIF_SECCOMP;
+ }
+#endif
+
+ /* Do our best to finish without phase 2. */
+ if (work == 0)
+ return ret; /* seccomp and/or nohz only (ret == 0 here) */
+
+#ifdef CONFIG_AUDITSYSCALL
+ if (work == _TIF_SYSCALL_AUDIT) {
+ /*
+ * If there is no more work to be done except auditing,
+ * then audit in phase 1. Phase 2 always audits, so, if
+ * we audit here, then we can't go on to phase 2.
+ */
+ do_audit_syscall_entry(regs, arch);
+ return 0;
+ }
+#endif
+
+ return 1; /* Something is enabled that we can't handle in phase 1 */
+}
+
+/* Returns the syscall nr to run (which should match regs->orig_ax). */
+long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
+ unsigned long phase1_result)
{
long ret = 0;
+ u32 work = ACCESS_ONCE(current_thread_info()->flags) &
+ _TIF_WORK_SYSCALL_ENTRY;
- user_exit();
+ BUG_ON(regs != task_pt_regs(current));
/*
* If we stepped into a sysenter/syscall insn, it trapped in
@@ -1467,17 +1569,21 @@ long syscall_trace_enter(struct pt_regs *regs)
* do_debug() and we need to set it again to restore the user
* state. If we entered on the slow path, TF was already set.
*/
- if (test_thread_flag(TIF_SINGLESTEP))
+ if (work & _TIF_SINGLESTEP)
regs->flags |= X86_EFLAGS_TF;
- /* do the secure computing check first */
- if (secure_computing(regs->orig_ax)) {
+#ifdef CONFIG_SECCOMP
+ /*
+ * Call seccomp_phase2 before running the other hooks so that
+ * they can see any changes made by a seccomp tracer.
+ */
+ if (phase1_result > 1 && seccomp_phase2(phase1_result)) {
/* seccomp failures shouldn't expose any additional code. */
- ret = -1L;
- goto out;
+ return -1;
}
+#endif
- if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
+ if (unlikely(work & _TIF_SYSCALL_EMU))
ret = -1L;
if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
@@ -1487,23 +1593,22 @@ long syscall_trace_enter(struct pt_regs *regs)
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->orig_ax);
- if (IS_IA32)
- audit_syscall_entry(AUDIT_ARCH_I386,
- regs->orig_ax,
- regs->bx, regs->cx,
- regs->dx, regs->si);
-#ifdef CONFIG_X86_64
- else
- audit_syscall_entry(AUDIT_ARCH_X86_64,
- regs->orig_ax,
- regs->di, regs->si,
- regs->dx, regs->r10);
-#endif
+ do_audit_syscall_entry(regs, arch);
-out:
return ret ?: regs->orig_ax;
}
+long syscall_trace_enter(struct pt_regs *regs)
+{
+ u32 arch = is_ia32_task() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
+ unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch);
+
+ if (phase1_result == 0)
+ return regs->orig_ax;
+ else
+ return syscall_trace_enter_phase2(regs, arch, phase1_result);
+}
+
void syscall_trace_leave(struct pt_regs *regs)
{
bool step;
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index ff898bbf579d..176a0f99d4da 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -498,6 +498,24 @@ void force_hpet_resume(void)
}
/*
+ * According to the datasheet e6xx systems have the HPET hardwired to
+ * 0xfed00000
+ */
+static void e6xx_force_enable_hpet(struct pci_dev *dev)
+{
+ if (hpet_address || force_hpet_address)
+ return;
+
+ force_hpet_address = 0xFED00000;
+ force_hpet_resume_type = NONE_FORCE_HPET_RESUME;
+ dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
+ "0x%lx\n", force_hpet_address);
+ return;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
+ e6xx_force_enable_hpet);
+
+/*
* HPET MSI on some boards (ATI SB700/SB800) has side effect on
* floppy DMA. Disable HPET MSI on such platforms.
* See erratum #27 (Misinterpreted MSI Requests May Result in
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 41ead8d3bc0b..235cfd39e0d7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -879,6 +879,15 @@ void __init setup_arch(char **cmdline_p)
KERNEL_PGD_PTRS);
load_cr3(swapper_pg_dir);
+ /*
+ * Note: Quark X1000 CPUs advertise PGE incorrectly and require
+ * a cr3 based tlb flush, so the following __flush_tlb_all()
+ * will not flush anything because the cpu quirk which clears
+ * X86_FEATURE_PGE has not been invoked yet. Though due to the
+ * load_cr3() above the TLB has been flushed already. The
+ * quirk is invoked before subsequent calls to __flush_tlb_all()
+ * so proper operation is guaranteed.
+ */
__flush_tlb_all();
#else
printk(KERN_INFO "Command line: %s\n", boot_command_line);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 2851d63c1202..ed37a768d0fc 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -675,6 +675,11 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
* handler too.
*/
regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
+ /*
+ * Ensure the signal handler starts with the new fpu state.
+ */
+ if (used_math())
+ drop_init_fpu(current);
}
signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 42a2dca984b3..2d5200e56357 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -102,6 +102,8 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info);
+static DEFINE_PER_CPU(struct completion, die_complete);
+
atomic_t init_deasserted;
/*
@@ -111,7 +113,6 @@ atomic_t init_deasserted;
static void smp_callin(void)
{
int cpuid, phys_id;
- unsigned long timeout;
/*
* If waken up by an INIT in an 82489DX configuration
@@ -130,37 +131,6 @@ static void smp_callin(void)
* (This works even if the APIC is not enabled.)
*/
phys_id = read_apic_id();
- if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
- panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
- phys_id, cpuid);
- }
- pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
-
- /*
- * STARTUP IPIs are fragile beasts as they might sometimes
- * trigger some glue motherboard logic. Complete APIC bus
- * silence for 1 second, this overestimates the time the
- * boot CPU is spending to send the up to 2 STARTUP IPIs
- * by a factor of two. This should be enough.
- */
-
- /*
- * Waiting 2s total for startup (udelay is not yet working)
- */
- timeout = jiffies + 2*HZ;
- while (time_before(jiffies, timeout)) {
- /*
- * Has the boot CPU finished it's STARTUP sequence?
- */
- if (cpumask_test_cpu(cpuid, cpu_callout_mask))
- break;
- cpu_relax();
- }
-
- if (!time_before(jiffies, timeout)) {
- panic("%s: CPU%d started up but did not get a callout!\n",
- __func__, cpuid);
- }
/*
* the boot CPU has finished the init stage and is spinning
@@ -296,11 +266,19 @@ void smp_store_cpu_info(int id)
}
static bool
+topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+{
+ int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
+
+ return (cpu_to_node(cpu1) == cpu_to_node(cpu2));
+}
+
+static bool
topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
{
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
- return !WARN_ONCE(cpu_to_node(cpu1) != cpu_to_node(cpu2),
+ return !WARN_ONCE(!topology_same_node(c, o),
"sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
"[node: %d != %d]. Ignoring dependency.\n",
cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
@@ -341,17 +319,44 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
return false;
}
-static bool match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+/*
+ * Unlike the other levels, we do not enforce keeping a
+ * multicore group inside a NUMA node. If this happens, we will
+ * discard the MC level of the topology later.
+ */
+static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
- if (c->phys_proc_id == o->phys_proc_id) {
- if (cpu_has(c, X86_FEATURE_AMD_DCM))
- return true;
-
- return topology_sane(c, o, "mc");
- }
+ if (c->phys_proc_id == o->phys_proc_id)
+ return true;
return false;
}
+static struct sched_domain_topology_level numa_inside_package_topology[] = {
+#ifdef CONFIG_SCHED_SMT
+ { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
+#endif
+#ifdef CONFIG_SCHED_MC
+ { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
+#endif
+ { NULL, },
+};
+/*
+ * set_sched_topology() sets the topology internal to a CPU. The
+ * NUMA topologies are layered on top of it to build the full
+ * system topology.
+ *
+ * If NUMA nodes are observed to occur within a CPU package, this
+ * function should be called. It forces the sched domain code to
+ * only use the SMT level for the CPU portion of the topology.
+ * This essentially falls back to relying on NUMA information
+ * from the SRAT table to describe the entire system topology
+ * (except for hyperthreads).
+ */
+static void primarily_use_numa_for_topology(void)
+{
+ set_sched_topology(numa_inside_package_topology);
+}
+
void set_cpu_sibling_map(int cpu)
{
bool has_smt = smp_num_siblings > 1;
@@ -388,7 +393,7 @@ void set_cpu_sibling_map(int cpu)
for_each_cpu(i, cpu_sibling_setup_mask) {
o = &cpu_data(i);
- if ((i == cpu) || (has_mp && match_mc(c, o))) {
+ if ((i == cpu) || (has_mp && match_die(c, o))) {
link_mask(core, cpu, i);
/*
@@ -410,6 +415,8 @@ void set_cpu_sibling_map(int cpu)
} else if (i != cpu && !c->booted_cores)
c->booted_cores = cpu_data(i).booted_cores;
}
+ if (match_die(c, o) && !topology_same_node(c, o))
+ primarily_use_numa_for_topology();
}
}
@@ -753,8 +760,8 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
unsigned long start_ip = real_mode_header->trampoline_start;
unsigned long boot_error = 0;
- int timeout;
int cpu0_nmi_registered = 0;
+ unsigned long timeout;
/* Just in case we booted with a single CPU. */
alternatives_enable_smp();
@@ -802,6 +809,15 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
}
/*
+ * AP might wait on cpu_callout_mask in cpu_init() with
+ * cpu_initialized_mask set if previous attempt to online
+ * it timed-out. Clear cpu_initialized_mask so that after
+ * INIT/SIPI it could start with a clean state.
+ */
+ cpumask_clear_cpu(cpu, cpu_initialized_mask);
+ smp_mb();
+
+ /*
* Wake up a CPU in difference cases:
* - Use the method in the APIC driver if it's defined
* Otherwise,
@@ -815,53 +831,38 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
if (!boot_error) {
/*
- * allow APs to start initializing.
+ * Wait 10s total for a response from AP
*/
- pr_debug("Before Callout %d\n", cpu);
- cpumask_set_cpu(cpu, cpu_callout_mask);
- pr_debug("After Callout %d\n", cpu);
+ boot_error = -1;
+ timeout = jiffies + 10*HZ;
+ while (time_before(jiffies, timeout)) {
+ if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
+ /*
+ * Tell AP to proceed with initialization
+ */
+ cpumask_set_cpu(cpu, cpu_callout_mask);
+ boot_error = 0;
+ break;
+ }
+ udelay(100);
+ schedule();
+ }
+ }
+ if (!boot_error) {
/*
- * Wait 5s total for a response
+ * Wait till AP completes initial initialization
*/
- for (timeout = 0; timeout < 50000; timeout++) {
- if (cpumask_test_cpu(cpu, cpu_callin_mask))
- break; /* It has booted */
- udelay(100);
+ while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
/*
* Allow other tasks to run while we wait for the
* AP to come online. This also gives a chance
* for the MTRR work(triggered by the AP coming online)
* to be completed in the stop machine context.
*/
+ udelay(100);
schedule();
}
-
- if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
- print_cpu_msr(&cpu_data(cpu));
- pr_debug("CPU%d: has booted.\n", cpu);
- } else {
- boot_error = 1;
- if (*trampoline_status == 0xA5A5A5A5)
- /* trampoline started but...? */
- pr_err("CPU%d: Stuck ??\n", cpu);
- else
- /* trampoline code not run */
- pr_err("CPU%d: Not responding\n", cpu);
- if (apic->inquire_remote_apic)
- apic->inquire_remote_apic(apicid);
- }
- }
-
- if (boot_error) {
- /* Try to put things back the way they were before ... */
- numa_remove_cpu(cpu); /* was set by numa_add_cpu */
-
- /* was set by do_boot_cpu() */
- cpumask_clear_cpu(cpu, cpu_callout_mask);
-
- /* was set by cpu_init() */
- cpumask_clear_cpu(cpu, cpu_initialized_mask);
}
/* mark "stuck" area as not stuck */
@@ -1326,26 +1327,24 @@ int native_cpu_disable(void)
return ret;
clear_local_APIC();
-
+ init_completion(&per_cpu(die_complete, smp_processor_id()));
cpu_disable_common();
+
return 0;
}
void native_cpu_die(unsigned int cpu)
{
/* We don't do anything here: idle task is faking death itself. */
- unsigned int i;
+ wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ);
- for (i = 0; i < 10; i++) {
- /* They ack this in play_dead by setting CPU_DEAD */
- if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
- if (system_state == SYSTEM_RUNNING)
- pr_info("CPU %u is now offline\n", cpu);
- return;
- }
- msleep(100);
+ /* They ack this in play_dead() by setting CPU_DEAD */
+ if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
+ if (system_state == SYSTEM_RUNNING)
+ pr_info("CPU %u is now offline\n", cpu);
+ } else {
+ pr_err("CPU %u didn't die...\n", cpu);
}
- pr_err("CPU %u didn't die...\n", cpu);
}
void play_dead_common(void)
@@ -1357,6 +1356,7 @@ void play_dead_common(void)
mb();
/* Ack it */
__this_cpu_write(cpu_state, CPU_DEAD);
+ complete(&per_cpu(die_complete, smp_processor_id()));
/*
* With physical CPU hotplug, we should halt the cpu
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index e1e1e80fc6a6..957779f4eb40 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -216,7 +216,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
*/
regs->orig_ax = syscall_nr;
regs->ax = -ENOSYS;
- tmp = secure_computing(syscall_nr);
+ tmp = secure_computing();
if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
warn_bad_vsyscall(KERN_DEBUG, regs,
"seccomp tried to change syscall nr or ip");
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 940b142cc11f..4c540c4719d8 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -271,8 +271,6 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
return -1;
- drop_init_fpu(tsk); /* trigger finit */
-
return 0;
}
@@ -402,8 +400,11 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
set_used_math();
}
- if (use_eager_fpu())
+ if (use_eager_fpu()) {
+ preempt_disable();
math_state_restore();
+ preempt_enable();
+ }
return err;
} else {
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 4d4f96a27638..db92793b7e23 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -20,7 +20,6 @@ lib-y := delay.o misc.o cmdline.o
lib-y += thunk_$(BITS).o
lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
-lib-$(CONFIG_SMP) += rwlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
@@ -39,7 +38,7 @@ endif
else
obj-y += iomap_copy_64.o
lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
- lib-y += thunk_64.o clear_page_64.o copy_page_64.o
+ lib-y += clear_page_64.o copy_page_64.o
lib-y += memmove_64.o memset_64.o
lib-y += copy_user_64.o copy_user_nocache_64.o
lib-y += cmpxchg16b_emu.o
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
index 1e572c507d06..40a172541ee2 100644
--- a/arch/x86/lib/cmpxchg16b_emu.S
+++ b/arch/x86/lib/cmpxchg16b_emu.S
@@ -6,15 +6,8 @@
*
*/
#include <linux/linkage.h>
-#include <asm/alternative-asm.h>
-#include <asm/frame.h>
#include <asm/dwarf2.h>
-
-#ifdef CONFIG_SMP
-#define SEG_PREFIX %gs:
-#else
-#define SEG_PREFIX
-#endif
+#include <asm/percpu.h>
.text
@@ -39,24 +32,25 @@ CFI_STARTPROC
# *atomic* on a single cpu (as provided by the this_cpu_xx class of
# macros).
#
-this_cpu_cmpxchg16b_emu:
- pushf
+ pushfq_cfi
cli
- cmpq SEG_PREFIX(%rsi), %rax
- jne not_same
- cmpq SEG_PREFIX 8(%rsi), %rdx
- jne not_same
+ cmpq PER_CPU_VAR((%rsi)), %rax
+ jne .Lnot_same
+ cmpq PER_CPU_VAR(8(%rsi)), %rdx
+ jne .Lnot_same
- movq %rbx, SEG_PREFIX(%rsi)
- movq %rcx, SEG_PREFIX 8(%rsi)
+ movq %rbx, PER_CPU_VAR((%rsi))
+ movq %rcx, PER_CPU_VAR(8(%rsi))
- popf
+ CFI_REMEMBER_STATE
+ popfq_cfi
mov $1, %al
ret
- not_same:
- popf
+ CFI_RESTORE_STATE
+.Lnot_same:
+ popfq_cfi
xor %al,%al
ret
diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S
index 828cb710dec2..b4807fce5177 100644
--- a/arch/x86/lib/cmpxchg8b_emu.S
+++ b/arch/x86/lib/cmpxchg8b_emu.S
@@ -7,11 +7,8 @@
*/
#include <linux/linkage.h>
-#include <asm/alternative-asm.h>
-#include <asm/frame.h>
#include <asm/dwarf2.h>
-
.text
/*
@@ -30,27 +27,28 @@ CFI_STARTPROC
# set the whole ZF thing (caller will just compare
# eax:edx with the expected value)
#
-cmpxchg8b_emu:
- pushfl
+ pushfl_cfi
cli
cmpl (%esi), %eax
- jne not_same
+ jne .Lnot_same
cmpl 4(%esi), %edx
- jne half_same
+ jne .Lhalf_same
movl %ebx, (%esi)
movl %ecx, 4(%esi)
- popfl
+ CFI_REMEMBER_STATE
+ popfl_cfi
ret
- not_same:
+ CFI_RESTORE_STATE
+.Lnot_same:
movl (%esi), %eax
- half_same:
+.Lhalf_same:
movl 4(%esi), %edx
- popfl
+ popfl_cfi
ret
CFI_ENDPROC
diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
deleted file mode 100644
index 1cad22139c88..000000000000
--- a/arch/x86/lib/rwlock.S
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Slow paths of read/write spinlocks. */
-
-#include <linux/linkage.h>
-#include <asm/alternative-asm.h>
-#include <asm/frame.h>
-#include <asm/rwlock.h>
-
-#ifdef CONFIG_X86_32
-# define __lock_ptr eax
-#else
-# define __lock_ptr rdi
-#endif
-
-ENTRY(__write_lock_failed)
- CFI_STARTPROC
- FRAME
-0: LOCK_PREFIX
- WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
-1: rep; nop
- cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
- jne 1b
- LOCK_PREFIX
- WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
- jnz 0b
- ENDFRAME
- ret
- CFI_ENDPROC
-END(__write_lock_failed)
-
-ENTRY(__read_lock_failed)
- CFI_STARTPROC
- FRAME
-0: LOCK_PREFIX
- READ_LOCK_SIZE(inc) (%__lock_ptr)
-1: rep; nop
- READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
- js 1b
- LOCK_PREFIX
- READ_LOCK_SIZE(dec) (%__lock_ptr)
- js 0b
- ENDFRAME
- ret
- CFI_ENDPROC
-END(__read_lock_failed)
diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/lib/thunk_32.S
index 28f85c916712..e28cdaf5ac2c 100644
--- a/arch/x86/lib/thunk_32.S
+++ b/arch/x86/lib/thunk_32.S
@@ -6,25 +6,46 @@
*/
#include <linux/linkage.h>
#include <asm/asm.h>
+ #include <asm/dwarf2.h>
-#ifdef CONFIG_TRACE_IRQFLAGS
/* put return address in eax (arg1) */
- .macro thunk_ra name,func
+ .macro THUNK name, func, put_ret_addr_in_eax=0
.globl \name
\name:
- pushl %eax
- pushl %ecx
- pushl %edx
+ CFI_STARTPROC
+ pushl_cfi %eax
+ CFI_REL_OFFSET eax, 0
+ pushl_cfi %ecx
+ CFI_REL_OFFSET ecx, 0
+ pushl_cfi %edx
+ CFI_REL_OFFSET edx, 0
+
+ .if \put_ret_addr_in_eax
/* Place EIP in the arg1 */
movl 3*4(%esp), %eax
+ .endif
+
call \func
- popl %edx
- popl %ecx
- popl %eax
+ popl_cfi %edx
+ CFI_RESTORE edx
+ popl_cfi %ecx
+ CFI_RESTORE ecx
+ popl_cfi %eax
+ CFI_RESTORE eax
ret
+ CFI_ENDPROC
_ASM_NOKPROBE(\name)
.endm
- thunk_ra trace_hardirqs_on_thunk,trace_hardirqs_on_caller
- thunk_ra trace_hardirqs_off_thunk,trace_hardirqs_off_caller
+#ifdef CONFIG_TRACE_IRQFLAGS
+ THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
+ THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
+#endif
+
+#ifdef CONFIG_PREEMPT
+ THUNK ___preempt_schedule, preempt_schedule
+#ifdef CONFIG_CONTEXT_TRACKING
+ THUNK ___preempt_schedule_context, preempt_schedule_context
#endif
+#endif
+
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
index 92d9feaff42b..b30b5ebd614a 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/lib/thunk_64.S
@@ -38,6 +38,13 @@
THUNK lockdep_sys_exit_thunk,lockdep_sys_exit
#endif
+#ifdef CONFIG_PREEMPT
+ THUNK ___preempt_schedule, preempt_schedule
+#ifdef CONFIG_CONTEXT_TRACKING
+ THUNK ___preempt_schedule_context, preempt_schedule_context
+#endif
+#endif
+
/* SAVE_ARGS below is used only for the .cfi directives it contains. */
CFI_STARTPROC
SAVE_ARGS
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index a24194681513..d973e61e450d 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -3,7 +3,6 @@
* Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
* Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
*/
-#include <linux/magic.h> /* STACK_END_MAGIC */
#include <linux/sched.h> /* test_thread_flag(), ... */
#include <linux/kdebug.h> /* oops_begin/end, ... */
#include <linux/module.h> /* search_exception_table */
@@ -350,7 +349,7 @@ out:
void vmalloc_sync_all(void)
{
- sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
+ sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END, 0);
}
/*
@@ -649,7 +648,6 @@ no_context(struct pt_regs *regs, unsigned long error_code,
unsigned long address, int signal, int si_code)
{
struct task_struct *tsk = current;
- unsigned long *stackend;
unsigned long flags;
int sig;
@@ -709,8 +707,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
show_fault_oops(regs, error_code, address);
- stackend = end_of_stack(tsk);
- if (tsk != &init_task && *stackend != STACK_END_MAGIC)
+ if (task_stack_end_corrupted(tsk))
printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
tsk->thread.cr2 = address;
@@ -933,8 +930,17 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
* cross-processor TLB flush, even if no stale TLB entries exist
* on other processors.
*
+ * Spurious faults may only occur if the TLB contains an entry with
+ * fewer permission than the page table entry. Non-present (P = 0)
+ * and reserved bit (R = 1) faults are never spurious.
+ *
* There are no security implications to leaving a stale TLB when
* increasing the permissions on a page.
+ *
+ * Returns non-zero if a spurious fault was handled, zero otherwise.
+ *
+ * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
+ * (Optional Invalidation).
*/
static noinline int
spurious_fault(unsigned long error_code, unsigned long address)
@@ -945,8 +951,17 @@ spurious_fault(unsigned long error_code, unsigned long address)
pte_t *pte;
int ret;
- /* Reserved-bit violation or user access to kernel space? */
- if (error_code & (PF_USER | PF_RSVD))
+ /*
+ * Only writes to RO or instruction fetches from NX may cause
+ * spurious faults.
+ *
+ * These could be from user or supervisor accesses but the TLB
+ * is only lazily flushed after a kernel mapping protection
+ * change, so user accesses are not expected to cause spurious
+ * faults.
+ */
+ if (error_code != (PF_WRITE | PF_PROT)
+ && error_code != (PF_INSTR | PF_PROT))
return 0;
pgd = init_mm.pgd + pgd_index(address);
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 7d05565ba781..c8140e12816a 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -537,7 +537,7 @@ static void __init pagetable_init(void)
permanent_kmaps_init(pgd_base);
}
-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
+pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
EXPORT_SYMBOL_GPL(__supported_pte_mask);
/* user-defined highmem size */
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 5621c47d7a1a..4cb8763868fc 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
* around without checking the pgd every time.
*/
-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
+pteval_t __supported_pte_mask __read_mostly = ~0;
EXPORT_SYMBOL_GPL(__supported_pte_mask);
int force_personality32;
@@ -178,7 +178,7 @@ __setup("noexec32=", nonx32_setup);
* When memory was added/removed make sure all the processes MM have
* suitable PGD entries in the local PGD level page.
*/
-void sync_global_pgds(unsigned long start, unsigned long end)
+void sync_global_pgds(unsigned long start, unsigned long end, int removed)
{
unsigned long address;
@@ -186,7 +186,12 @@ void sync_global_pgds(unsigned long start, unsigned long end)
const pgd_t *pgd_ref = pgd_offset_k(address);
struct page *page;
- if (pgd_none(*pgd_ref))
+ /*
+ * When it is called after memory hot remove, pgd_none()
+ * returns true. In this case (removed == 1), we must clear
+ * the PGD entries in the local PGD level page.
+ */
+ if (pgd_none(*pgd_ref) && !removed)
continue;
spin_lock(&pgd_lock);
@@ -199,12 +204,18 @@ void sync_global_pgds(unsigned long start, unsigned long end)
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock);
- if (pgd_none(*pgd))
- set_pgd(pgd, *pgd_ref);
- else
+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
BUG_ON(pgd_page_vaddr(*pgd)
!= pgd_page_vaddr(*pgd_ref));
+ if (removed) {
+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
+ pgd_clear(pgd);
+ } else {
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
+ }
+
spin_unlock(pgt_lock);
}
spin_unlock(&pgd_lock);
@@ -633,7 +644,7 @@ kernel_physical_mapping_init(unsigned long start,
}
if (pgd_changed)
- sync_global_pgds(addr, end - 1);
+ sync_global_pgds(addr, end - 1, 0);
__flush_tlb_all();
@@ -976,25 +987,26 @@ static void __meminit
remove_pagetable(unsigned long start, unsigned long end, bool direct)
{
unsigned long next;
+ unsigned long addr;
pgd_t *pgd;
pud_t *pud;
bool pgd_changed = false;
- for (; start < end; start = next) {
- next = pgd_addr_end(start, end);
+ for (addr = start; addr < end; addr = next) {
+ next = pgd_addr_end(addr, end);
- pgd = pgd_offset_k(start);
+ pgd = pgd_offset_k(addr);
if (!pgd_present(*pgd))
continue;
pud = (pud_t *)pgd_page_vaddr(*pgd);
- remove_pud_table(pud, start, next, direct);
+ remove_pud_table(pud, addr, next, direct);
if (free_pud_table(pud, pgd))
pgd_changed = true;
}
if (pgd_changed)
- sync_global_pgds(start, end - 1);
+ sync_global_pgds(start, end - 1, 1);
flush_tlb_all();
}
@@ -1341,7 +1353,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
else
err = vmemmap_populate_basepages(start, end, node);
if (!err)
- sync_global_pgds(start, end - 1);
+ sync_global_pgds(start, end - 1, 0);
return err;
}
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index a32b706c401a..d221374d5ce8 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -185,8 +185,8 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
return numa_add_memblk_to(nid, start, end, &numa_meminfo);
}
-/* Initialize NODE_DATA for a node on the local memory */
-static void __init setup_node_data(int nid, u64 start, u64 end)
+/* Allocate NODE_DATA for a node on the local memory */
+static void __init alloc_node_data(int nid)
{
const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
u64 nd_pa;
@@ -194,18 +194,6 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
int tnid;
/*
- * Don't confuse VM with a node that doesn't have the
- * minimum amount of memory:
- */
- if (end && (end - start) < NODE_MIN_SIZE)
- return;
-
- start = roundup(start, ZONE_ALIGN);
-
- printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
- nid, start, end - 1);
-
- /*
* Allocate node data. Try node-local memory and then any node.
* Never allocate in DMA zone.
*/
@@ -222,7 +210,7 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
nd = __va(nd_pa);
/* report and initialize */
- printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]\n",
+ printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
nd_pa, nd_pa + nd_size - 1);
tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
if (tnid != nid)
@@ -230,9 +218,6 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
node_data[nid] = nd;
memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
- NODE_DATA(nid)->node_id = nid;
- NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT;
- NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT;
node_set_online(nid);
}
@@ -523,8 +508,17 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
end = max(mi->blk[i].end, end);
}
- if (start < end)
- setup_node_data(nid, start, end);
+ if (start >= end)
+ continue;
+
+ /*
+ * Don't confuse VM with a node that doesn't have the
+ * minimum amount of memory:
+ */
+ if (end && (end - start) < NODE_MIN_SIZE)
+ continue;
+
+ alloc_node_data(nid);
}
/* Dump memblock with node info and return. */
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 4dd8cf652579..75cc0978d45d 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -59,41 +59,6 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
__flush_tlb_one(vaddr);
}
-/*
- * Associate a large virtual page frame with a given physical page frame
- * and protection flags for that frame. pfn is for the base of the page,
- * vaddr is what the page gets mapped to - both must be properly aligned.
- * The pmd must already be instantiated. Assumes PAE mode.
- */
-void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
-
- if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
- printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
- return; /* BUG(); */
- }
- if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
- printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
- return; /* BUG(); */
- }
- pgd = swapper_pg_dir + pgd_index(vaddr);
- if (pgd_none(*pgd)) {
- printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
- return; /* BUG(); */
- }
- pud = pud_offset(pgd, vaddr);
- pmd = pmd_offset(pud, vaddr);
- set_pmd(pmd, pfn_pmd(pfn, flags));
- /*
- * It's enough to flush this one mapping.
- * (PGE mappings get flushed as well)
- */
- __flush_tlb_one(vaddr);
-}
-
unsigned long __FIXADDR_TOP = 0xfffff000;
EXPORT_SYMBOL(__FIXADDR_TOP);
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 2ae525e0d8ba..37c1435889ce 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -442,8 +442,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
*/
prot |= _PAGE_CACHE_UC_MINUS;
- prot |= _PAGE_IOMAP; /* creating a mapping for IO */
-
vma->vm_page_prot = __pgprot(prot);
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index bbb1d2259ecf..a5efb21d5228 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -695,7 +695,7 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
*
*/
static int per_cpu_shndx = -1;
-Elf_Addr per_cpu_load_addr;
+static Elf_Addr per_cpu_load_addr;
static void percpu_init(void)
{
diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
index fd57829b30d8..0224987556ce 100644
--- a/arch/x86/vdso/vdso2c.h
+++ b/arch/x86/vdso/vdso2c.h
@@ -109,16 +109,18 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
/* Validate mapping addresses. */
for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
- if (!syms[i])
+ INT_BITS symval = syms[special_pages[i]];
+
+ if (!symval)
continue; /* The mapping isn't used; ignore it. */
- if (syms[i] % 4096)
+ if (symval % 4096)
fail("%s must be a multiple of 4096\n",
required_syms[i].name);
- if (syms[sym_vvar_start] > syms[i] + 4096)
- fail("%s underruns begin_vvar\n",
+ if (symval + 4096 < syms[sym_vvar_start])
+ fail("%s underruns vvar_start\n",
required_syms[i].name);
- if (syms[i] + 4096 > 0)
+ if (symval + 4096 > 0)
fail("%s is on the wrong side of the vdso text\n",
required_syms[i].name);
}
diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
index a02e09e18f57..be14cc3e48d5 100644
--- a/arch/x86/xen/efi.c
+++ b/arch/x86/xen/efi.c
@@ -15,12 +15,14 @@
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/bitops.h>
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/string.h>
#include <xen/xen-ops.h>
+#include <asm/page.h>
#include <asm/setup.h>
void __init xen_efi_init(void)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c0cb11fb5008..acb0effd8077 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1463,6 +1463,7 @@ static void __ref xen_setup_gdt(int cpu)
pv_cpu_ops.load_gdt = xen_load_gdt;
}
+#ifdef CONFIG_XEN_PVH
/*
* A PV guest starts with default flags that are not set for PVH, set them
* here asap.
@@ -1508,17 +1509,21 @@ static void __init xen_pvh_early_guest_init(void)
return;
xen_have_vector_callback = 1;
+
+ xen_pvh_early_cpu_init(0, false);
xen_pvh_set_cr_flags(0);
#ifdef CONFIG_X86_32
BUG(); /* PVH: Implement proper support. */
#endif
}
+#endif /* CONFIG_XEN_PVH */
/* First C function to be called on Xen boot */
asmlinkage __visible void __init xen_start_kernel(void)
{
struct physdev_set_iopl set_iopl;
+ unsigned long initrd_start = 0;
int rc;
if (!xen_start_info)
@@ -1527,7 +1532,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
xen_domain_type = XEN_PV_DOMAIN;
xen_setup_features();
+#ifdef CONFIG_XEN_PVH
xen_pvh_early_guest_init();
+#endif
xen_setup_machphys_mapping();
/* Install Xen paravirt ops */
@@ -1559,8 +1566,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
#endif
__supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
- __supported_pte_mask |= _PAGE_IOMAP;
-
/*
* Prevent page tables from being allocated in highmem, even
* if CONFIG_HIGHPTE is enabled.
@@ -1667,10 +1672,16 @@ asmlinkage __visible void __init xen_start_kernel(void)
new_cpu_data.x86_capability[0] = cpuid_edx(1);
#endif
+ if (xen_start_info->mod_start) {
+ if (xen_start_info->flags & SIF_MOD_START_PFN)
+ initrd_start = PFN_PHYS(xen_start_info->mod_start);
+ else
+ initrd_start = __pa(xen_start_info->mod_start);
+ }
+
/* Poke various useful things into boot_params */
boot_params.hdr.type_of_loader = (9 << 4) | 0;
- boot_params.hdr.ramdisk_image = xen_start_info->mod_start
- ? __pa(xen_start_info->mod_start) : 0;
+ boot_params.hdr.ramdisk_image = initrd_start;
boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 16fb0099b7f2..f62af7647ec9 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -399,38 +399,14 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
if (unlikely(mfn == INVALID_P2M_ENTRY)) {
mfn = 0;
flags = 0;
- } else {
- /*
- * Paramount to do this test _after_ the
- * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
- * IDENTITY_FRAME_BIT resolves to true.
- */
- mfn &= ~FOREIGN_FRAME_BIT;
- if (mfn & IDENTITY_FRAME_BIT) {
- mfn &= ~IDENTITY_FRAME_BIT;
- flags |= _PAGE_IOMAP;
- }
- }
+ } else
+ mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
}
return val;
}
-static pteval_t iomap_pte(pteval_t val)
-{
- if (val & _PAGE_PRESENT) {
- unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
- pteval_t flags = val & PTE_FLAGS_MASK;
-
- /* We assume the pte frame number is a MFN, so
- just use it as-is. */
- val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
- }
-
- return val;
-}
-
__visible pteval_t xen_pte_val(pte_t pte)
{
pteval_t pteval = pte.pte;
@@ -441,9 +417,6 @@ __visible pteval_t xen_pte_val(pte_t pte)
pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
}
#endif
- if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
- return pteval;
-
return pte_mfn_to_pfn(pteval);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
@@ -481,7 +454,6 @@ void xen_set_pat(u64 pat)
__visible pte_t xen_make_pte(pteval_t pte)
{
- phys_addr_t addr = (pte & PTE_PFN_MASK);
#if 0
/* If Linux is trying to set a WC pte, then map to the Xen WC.
* If _PAGE_PAT is set, then it probably means it is really
@@ -496,19 +468,7 @@ __visible pte_t xen_make_pte(pteval_t pte)
pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
}
#endif
- /*
- * Unprivileged domains are allowed to do IOMAPpings for
- * PCI passthrough, but not map ISA space. The ISA
- * mappings are just dummy local mappings to keep other
- * parts of the kernel happy.
- */
- if (unlikely(pte & _PAGE_IOMAP) &&
- (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
- pte = iomap_pte(pte);
- } else {
- pte &= ~_PAGE_IOMAP;
- pte = pte_pfn_to_mfn(pte);
- }
+ pte = pte_pfn_to_mfn(pte);
return native_make_pte(pte);
}
@@ -2091,7 +2051,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
default:
/* By default, set_fixmap is used for hardware mappings */
- pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
+ pte = mfn_pte(phys, prot);
break;
}
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 3172692381ae..9f5983b01ed9 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -173,6 +173,7 @@
#include <xen/balloon.h>
#include <xen/grant_table.h>
+#include "p2m.h"
#include "multicalls.h"
#include "xen-ops.h"
@@ -180,12 +181,6 @@ static void __init m2p_override_init(void);
unsigned long xen_max_p2m_pfn __read_mostly;
-#define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
-#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
-#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **))
-
-#define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
-
/* Placeholders for holes in the address space */
static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
@@ -202,16 +197,12 @@ static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_identity_mfn, P2M_MID_PER_PAGE);
RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
-/* We might hit two boundary violations at the start and end, at max each
- * boundary violation will require three middle nodes. */
-RESERVE_BRK(p2m_mid_extra, PAGE_SIZE * 2 * 3);
-
-/* When we populate back during bootup, the amount of pages can vary. The
- * max we have is seen is 395979, but that does not mean it can't be more.
- * Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle
- * it can re-use Xen provided mfn_list array, so we only need to allocate at
- * most three P2M top nodes. */
-RESERVE_BRK(p2m_populated, PAGE_SIZE * 3);
+/* For each I/O range remapped we may lose up to two leaf pages for the boundary
+ * violations and three mid pages to cover up to 3GB. With
+ * early_can_reuse_p2m_middle() most of the leaf pages will be reused by the
+ * remapped region.
+ */
+RESERVE_BRK(p2m_identity_remap, PAGE_SIZE * 2 * 3 * MAX_REMAP_RANGES);
static inline unsigned p2m_top_index(unsigned long pfn)
{
diff --git a/arch/x86/xen/p2m.h b/arch/x86/xen/p2m.h
new file mode 100644
index 000000000000..ad8aee24ab72
--- /dev/null
+++ b/arch/x86/xen/p2m.h
@@ -0,0 +1,15 @@
+#ifndef _XEN_P2M_H
+#define _XEN_P2M_H
+
+#define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
+#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
+#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **))
+
+#define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
+
+#define MAX_REMAP_RANGES 10
+
+extern unsigned long __init set_phys_range_identity(unsigned long pfn_s,
+ unsigned long pfn_e);
+
+#endif /* _XEN_P2M_H */
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 2e555163c2fe..af7216128d93 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -29,6 +29,7 @@
#include <xen/features.h>
#include "xen-ops.h"
#include "vdso.h"
+#include "p2m.h"
/* These are code, but not functions. Defined in entry.S */
extern const char xen_hypervisor_callback[];
@@ -46,6 +47,9 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
/* Number of pages released from the initial allocation. */
unsigned long xen_released_pages;
+/* Buffer used to remap identity mapped pages */
+unsigned long xen_remap_buf[P2M_PER_PAGE] __initdata;
+
/*
* The maximum amount of extra memory compared to the base size. The
* main scaling factor is the size of struct page. At extreme ratios
@@ -151,107 +155,325 @@ static unsigned long __init xen_do_chunk(unsigned long start,
return len;
}
-static unsigned long __init xen_release_chunk(unsigned long start,
- unsigned long end)
-{
- return xen_do_chunk(start, end, true);
-}
-
-static unsigned long __init xen_populate_chunk(
+/*
+ * Finds the next RAM pfn available in the E820 map after min_pfn.
+ * This function updates min_pfn with the pfn found and returns
+ * the size of that range or zero if not found.
+ */
+static unsigned long __init xen_find_pfn_range(
const struct e820entry *list, size_t map_size,
- unsigned long max_pfn, unsigned long *last_pfn,
- unsigned long credits_left)
+ unsigned long *min_pfn)
{
const struct e820entry *entry;
unsigned int i;
unsigned long done = 0;
- unsigned long dest_pfn;
for (i = 0, entry = list; i < map_size; i++, entry++) {
unsigned long s_pfn;
unsigned long e_pfn;
- unsigned long pfns;
- long capacity;
-
- if (credits_left <= 0)
- break;
if (entry->type != E820_RAM)
continue;
e_pfn = PFN_DOWN(entry->addr + entry->size);
- /* We only care about E820 after the xen_start_info->nr_pages */
- if (e_pfn <= max_pfn)
+ /* We only care about E820 after this */
+ if (e_pfn < *min_pfn)
continue;
s_pfn = PFN_UP(entry->addr);
- /* If the E820 falls within the nr_pages, we want to start
- * at the nr_pages PFN.
- * If that would mean going past the E820 entry, skip it
+
+ /* If min_pfn falls within the E820 entry, we want to start
+ * at the min_pfn PFN.
*/
- if (s_pfn <= max_pfn) {
- capacity = e_pfn - max_pfn;
- dest_pfn = max_pfn;
+ if (s_pfn <= *min_pfn) {
+ done = e_pfn - *min_pfn;
} else {
- capacity = e_pfn - s_pfn;
- dest_pfn = s_pfn;
+ done = e_pfn - s_pfn;
+ *min_pfn = s_pfn;
}
+ break;
+ }
- if (credits_left < capacity)
- capacity = credits_left;
+ return done;
+}
- pfns = xen_do_chunk(dest_pfn, dest_pfn + capacity, false);
- done += pfns;
- *last_pfn = (dest_pfn + pfns);
- if (pfns < capacity)
- break;
- credits_left -= pfns;
+/*
+ * This releases a chunk of memory and then does the identity map. It's used as
+ * as a fallback if the remapping fails.
+ */
+static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
+ unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
+ unsigned long *released)
+{
+ WARN_ON(start_pfn > end_pfn);
+
+ /* Need to release pages first */
+ *released += xen_do_chunk(start_pfn, min(end_pfn, nr_pages), true);
+ *identity += set_phys_range_identity(start_pfn, end_pfn);
+}
+
+/*
+ * Helper function to update both the p2m and m2p tables.
+ */
+static unsigned long __init xen_update_mem_tables(unsigned long pfn,
+ unsigned long mfn)
+{
+ struct mmu_update update = {
+ .ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
+ .val = pfn
+ };
+
+ /* Update p2m */
+ if (!early_set_phys_to_machine(pfn, mfn)) {
+ WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
+ pfn, mfn);
+ return false;
}
- return done;
+
+ /* Update m2p */
+ if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
+ WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
+ mfn, pfn);
+ return false;
+ }
+
+ return true;
}
-static void __init xen_set_identity_and_release_chunk(
- unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
- unsigned long *released, unsigned long *identity)
+/*
+ * This function updates the p2m and m2p tables with an identity map from
+ * start_pfn to start_pfn+size and remaps the underlying RAM of the original
+ * allocation at remap_pfn. It must do so carefully in P2M_PER_PAGE sized blocks
+ * to not exhaust the reserved brk space. Doing it in properly aligned blocks
+ * ensures we only allocate the minimum required leaf pages in the p2m table. It
+ * copies the existing mfns from the p2m table under the 1:1 map, overwrites
+ * them with the identity map and then updates the p2m and m2p tables with the
+ * remapped memory.
+ */
+static unsigned long __init xen_do_set_identity_and_remap_chunk(
+ unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
{
- unsigned long pfn;
+ unsigned long ident_pfn_iter, remap_pfn_iter;
+ unsigned long ident_start_pfn_align, remap_start_pfn_align;
+ unsigned long ident_end_pfn_align, remap_end_pfn_align;
+ unsigned long ident_boundary_pfn, remap_boundary_pfn;
+ unsigned long ident_cnt = 0;
+ unsigned long remap_cnt = 0;
+ unsigned long left = size;
+ unsigned long mod;
+ int i;
+
+ WARN_ON(size == 0);
+
+ BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
/*
- * If the PFNs are currently mapped, clear the mappings
- * (except for the ISA region which must be 1:1 mapped) to
- * release the refcounts (in Xen) on the original frames.
+ * Determine the proper alignment to remap memory in P2M_PER_PAGE sized
+ * blocks. We need to keep track of both the existing pfn mapping and
+ * the new pfn remapping.
*/
- for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
- pte_t pte = __pte_ma(0);
+ mod = start_pfn % P2M_PER_PAGE;
+ ident_start_pfn_align =
+ mod ? (start_pfn - mod + P2M_PER_PAGE) : start_pfn;
+ mod = remap_pfn % P2M_PER_PAGE;
+ remap_start_pfn_align =
+ mod ? (remap_pfn - mod + P2M_PER_PAGE) : remap_pfn;
+ mod = (start_pfn + size) % P2M_PER_PAGE;
+ ident_end_pfn_align = start_pfn + size - mod;
+ mod = (remap_pfn + size) % P2M_PER_PAGE;
+ remap_end_pfn_align = remap_pfn + size - mod;
+
+ /* Iterate over each p2m leaf node in each range */
+ for (ident_pfn_iter = ident_start_pfn_align, remap_pfn_iter = remap_start_pfn_align;
+ ident_pfn_iter < ident_end_pfn_align && remap_pfn_iter < remap_end_pfn_align;
+ ident_pfn_iter += P2M_PER_PAGE, remap_pfn_iter += P2M_PER_PAGE) {
+ /* Check we aren't past the end */
+ BUG_ON(ident_pfn_iter + P2M_PER_PAGE > start_pfn + size);
+ BUG_ON(remap_pfn_iter + P2M_PER_PAGE > remap_pfn + size);
+
+ /* Save p2m mappings */
+ for (i = 0; i < P2M_PER_PAGE; i++)
+ xen_remap_buf[i] = pfn_to_mfn(ident_pfn_iter + i);
+
+ /* Set identity map which will free a p2m leaf */
+ ident_cnt += set_phys_range_identity(ident_pfn_iter,
+ ident_pfn_iter + P2M_PER_PAGE);
+
+#ifdef DEBUG
+ /* Helps verify a p2m leaf has been freed */
+ for (i = 0; i < P2M_PER_PAGE; i++) {
+ unsigned int pfn = ident_pfn_iter + i;
+ BUG_ON(pfn_to_mfn(pfn) != pfn);
+ }
+#endif
+ /* Now remap memory */
+ for (i = 0; i < P2M_PER_PAGE; i++) {
+ unsigned long mfn = xen_remap_buf[i];
+
+ /* This will use the p2m leaf freed above */
+ if (!xen_update_mem_tables(remap_pfn_iter + i, mfn)) {
+ WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n",
+ remap_pfn_iter + i, mfn);
+ return 0;
+ }
+
+ remap_cnt++;
+ }
- if (pfn < PFN_UP(ISA_END_ADDRESS))
- pte = mfn_pte(pfn, PAGE_KERNEL_IO);
+ left -= P2M_PER_PAGE;
+ }
- (void)HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn << PAGE_SHIFT), pte, 0);
+ /* Max boundary space possible */
+ BUG_ON(left > (P2M_PER_PAGE - 1) * 2);
+
+ /* Now handle the boundary conditions */
+ ident_boundary_pfn = start_pfn;
+ remap_boundary_pfn = remap_pfn;
+ for (i = 0; i < left; i++) {
+ unsigned long mfn;
+
+ /* These two checks move from the start to end boundaries */
+ if (ident_boundary_pfn == ident_start_pfn_align)
+ ident_boundary_pfn = ident_pfn_iter;
+ if (remap_boundary_pfn == remap_start_pfn_align)
+ remap_boundary_pfn = remap_pfn_iter;
+
+ /* Check we aren't past the end */
+ BUG_ON(ident_boundary_pfn >= start_pfn + size);
+ BUG_ON(remap_boundary_pfn >= remap_pfn + size);
+
+ mfn = pfn_to_mfn(ident_boundary_pfn);
+
+ if (!xen_update_mem_tables(remap_boundary_pfn, mfn)) {
+ WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n",
+ remap_pfn_iter + i, mfn);
+ return 0;
+ }
+ remap_cnt++;
+
+ ident_boundary_pfn++;
+ remap_boundary_pfn++;
}
- if (start_pfn < nr_pages)
- *released += xen_release_chunk(
- start_pfn, min(end_pfn, nr_pages));
+ /* Finish up the identity map */
+ if (ident_start_pfn_align >= ident_end_pfn_align) {
+ /*
+ * In this case we have an identity range which does not span an
+ * aligned block so everything needs to be identity mapped here.
+ * If we didn't check this we might remap too many pages since
+ * the align boundaries are not meaningful in this case.
+ */
+ ident_cnt += set_phys_range_identity(start_pfn,
+ start_pfn + size);
+ } else {
+ /* Remapped above so check each end of the chunk */
+ if (start_pfn < ident_start_pfn_align)
+ ident_cnt += set_phys_range_identity(start_pfn,
+ ident_start_pfn_align);
+ if (start_pfn + size > ident_pfn_iter)
+ ident_cnt += set_phys_range_identity(ident_pfn_iter,
+ start_pfn + size);
+ }
- *identity += set_phys_range_identity(start_pfn, end_pfn);
+ BUG_ON(ident_cnt != size);
+ BUG_ON(remap_cnt != size);
+
+ return size;
}
-static unsigned long __init xen_set_identity_and_release(
- const struct e820entry *list, size_t map_size, unsigned long nr_pages)
+/*
+ * This function takes a contiguous pfn range that needs to be identity mapped
+ * and:
+ *
+ * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
+ * 2) Calls the do_ function to actually do the mapping/remapping work.
+ *
+ * The goal is to not allocate additional memory but to remap the existing
+ * pages. In the case of an error the underlying memory is simply released back
+ * to Xen and not remapped.
+ */
+static unsigned long __init xen_set_identity_and_remap_chunk(
+ const struct e820entry *list, size_t map_size, unsigned long start_pfn,
+ unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
+ unsigned long *identity, unsigned long *remapped,
+ unsigned long *released)
+{
+ unsigned long pfn;
+ unsigned long i = 0;
+ unsigned long n = end_pfn - start_pfn;
+
+ while (i < n) {
+ unsigned long cur_pfn = start_pfn + i;
+ unsigned long left = n - i;
+ unsigned long size = left;
+ unsigned long remap_range_size;
+
+ /* Do not remap pages beyond the current allocation */
+ if (cur_pfn >= nr_pages) {
+ /* Identity map remaining pages */
+ *identity += set_phys_range_identity(cur_pfn,
+ cur_pfn + size);
+ break;
+ }
+ if (cur_pfn + size > nr_pages)
+ size = nr_pages - cur_pfn;
+
+ remap_range_size = xen_find_pfn_range(list, map_size,
+ &remap_pfn);
+ if (!remap_range_size) {
+ pr_warning("Unable to find available pfn range, not remapping identity pages\n");
+ xen_set_identity_and_release_chunk(cur_pfn,
+ cur_pfn + left, nr_pages, identity, released);
+ break;
+ }
+ /* Adjust size to fit in current e820 RAM region */
+ if (size > remap_range_size)
+ size = remap_range_size;
+
+ if (!xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn)) {
+ WARN(1, "Failed to remap 1:1 memory cur_pfn=%ld size=%ld remap_pfn=%ld\n",
+ cur_pfn, size, remap_pfn);
+ xen_set_identity_and_release_chunk(cur_pfn,
+ cur_pfn + left, nr_pages, identity, released);
+ break;
+ }
+
+ /* Update variables to reflect new mappings. */
+ i += size;
+ remap_pfn += size;
+ *identity += size;
+ *remapped += size;
+ }
+
+ /*
+ * If the PFNs are currently mapped, the VA mapping also needs
+ * to be updated to be 1:1.
+ */
+ for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
+ (void)HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ mfn_pte(pfn, PAGE_KERNEL_IO), 0);
+
+ return remap_pfn;
+}
+
+static unsigned long __init xen_set_identity_and_remap(
+ const struct e820entry *list, size_t map_size, unsigned long nr_pages,
+ unsigned long *released)
{
phys_addr_t start = 0;
- unsigned long released = 0;
unsigned long identity = 0;
+ unsigned long remapped = 0;
+ unsigned long last_pfn = nr_pages;
const struct e820entry *entry;
+ unsigned long num_released = 0;
int i;
/*
* Combine non-RAM regions and gaps until a RAM region (or the
* end of the map) is reached, then set the 1:1 map and
- * release the pages (if available) in those non-RAM regions.
+ * remap the memory in those non-RAM regions.
*
* The combined non-RAM regions are rounded to a whole number
* of pages so any partial pages are accessible via the 1:1
@@ -269,22 +491,24 @@ static unsigned long __init xen_set_identity_and_release(
end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn)
- xen_set_identity_and_release_chunk(
- start_pfn, end_pfn, nr_pages,
- &released, &identity);
-
+ last_pfn = xen_set_identity_and_remap_chunk(
+ list, map_size, start_pfn,
+ end_pfn, nr_pages, last_pfn,
+ &identity, &remapped,
+ &num_released);
start = end;
}
}
- if (released)
- printk(KERN_INFO "Released %lu pages of unused memory\n", released);
- if (identity)
- printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
+ *released = num_released;
- return released;
-}
+ pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
+ pr_info("Remapped %ld page(s), last_pfn=%ld\n", remapped,
+ last_pfn);
+ pr_info("Released %ld page(s)\n", num_released);
+ return last_pfn;
+}
static unsigned long __init xen_get_max_pages(void)
{
unsigned long max_pages = MAX_DOMAIN_PAGES;
@@ -347,7 +571,6 @@ char * __init xen_memory_setup(void)
unsigned long max_pages;
unsigned long last_pfn = 0;
unsigned long extra_pages = 0;
- unsigned long populated;
int i;
int op;
@@ -392,20 +615,11 @@ char * __init xen_memory_setup(void)
extra_pages += max_pages - max_pfn;
/*
- * Set P2M for all non-RAM pages and E820 gaps to be identity
- * type PFNs. Any RAM pages that would be made inaccesible by
- * this are first released.
+ * Set identity map on non-RAM pages and remap the underlying RAM.
*/
- xen_released_pages = xen_set_identity_and_release(
- map, memmap.nr_entries, max_pfn);
-
- /*
- * Populate back the non-RAM pages and E820 gaps that had been
- * released. */
- populated = xen_populate_chunk(map, memmap.nr_entries,
- max_pfn, &last_pfn, xen_released_pages);
+ last_pfn = xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
+ &xen_released_pages);
- xen_released_pages -= populated;
extra_pages += xen_released_pages;
if (last_pfn > max_pfn) {
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 7005974c3ff3..8650cdb53209 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -37,6 +37,7 @@
#include <xen/hvc-console.h>
#include "xen-ops.h"
#include "mmu.h"
+#include "smp.h"
cpumask_var_t xen_cpu_initialized_map;
@@ -99,10 +100,14 @@ static void cpu_bringup(void)
wmb(); /* make sure everything is out */
}
-/* Note: cpu parameter is only relevant for PVH */
-static void cpu_bringup_and_idle(int cpu)
+/*
+ * Note: cpu parameter is only relevant for PVH. The reason for passing it
+ * is we can't do smp_processor_id until the percpu segments are loaded, for
+ * which we need the cpu number! So we pass it in rdi as first parameter.
+ */
+asmlinkage __visible void cpu_bringup_and_idle(int cpu)
{
-#ifdef CONFIG_X86_64
+#ifdef CONFIG_XEN_PVH
if (xen_feature(XENFEAT_auto_translated_physmap) &&
xen_feature(XENFEAT_supervisor_mode_kernel))
xen_pvh_secondary_vcpu_init(cpu);
@@ -360,6 +365,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
struct desc_struct *gdt;
unsigned long gdt_mfn;
+ /* used to tell cpu_init() that it can proceed with initialization */
+ cpumask_set_cpu(cpu, cpu_callout_mask);
if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
return 0;
@@ -374,11 +381,10 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
ctxt->user_regs.fs = __KERNEL_PERCPU;
ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
#endif
- ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
-
memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
ctxt->flags = VGCF_IN_KERNEL;
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
ctxt->user_regs.ds = __USER_DS;
@@ -413,15 +419,18 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
(unsigned long)xen_failsafe_callback;
ctxt->user_regs.cs = __KERNEL_CS;
per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
-#ifdef CONFIG_X86_32
}
-#else
- } else
- /* N.B. The user_regs.eip (cpu_bringup_and_idle) is called with
- * %rdi having the cpu number - which means are passing in
- * as the first parameter the cpu. Subtle!
+#ifdef CONFIG_XEN_PVH
+ else {
+ /*
+ * The vcpu comes on kernel page tables which have the NX pte
+ * bit set. This means before DS/SS is touched, NX in
+ * EFER must be set. Hence the following assembly glue code.
*/
+ ctxt->user_regs.eip = (unsigned long)xen_pvh_early_cpu_init;
ctxt->user_regs.rdi = cpu;
+ ctxt->user_regs.rsi = true; /* entry == true */
+ }
#endif
ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
index c7c2d89efd76..963d62a35c82 100644
--- a/arch/x86/xen/smp.h
+++ b/arch/x86/xen/smp.h
@@ -8,4 +8,12 @@ extern void xen_send_IPI_allbutself(int vector);
extern void xen_send_IPI_all(int vector);
extern void xen_send_IPI_self(int vector);
+#ifdef CONFIG_XEN_PVH
+extern void xen_pvh_early_cpu_init(int cpu, bool entry);
+#else
+static inline void xen_pvh_early_cpu_init(int cpu, bool entry)
+{
+}
+#endif
+
#endif
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 485b69585540..674b222544b7 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -47,6 +47,41 @@ ENTRY(startup_xen)
__FINIT
+#ifdef CONFIG_XEN_PVH
+/*
+ * xen_pvh_early_cpu_init() - early PVH VCPU initialization
+ * @cpu: this cpu number (%rdi)
+ * @entry: true if this is a secondary vcpu coming up on this entry
+ * point, false if this is the boot CPU being initialized for
+ * the first time (%rsi)
+ *
+ * Note: This is called as a function on the boot CPU, and is the entry point
+ * on the secondary CPU.
+ */
+ENTRY(xen_pvh_early_cpu_init)
+ mov %rsi, %r11
+
+ /* Gather features to see if NX implemented. */
+ mov $0x80000001, %eax
+ cpuid
+ mov %edx, %esi
+
+ mov $MSR_EFER, %ecx
+ rdmsr
+ bts $_EFER_SCE, %eax
+
+ bt $20, %esi
+ jnc 1f /* No NX, skip setting it */
+ bts $_EFER_NX, %eax
+1: wrmsr
+#ifdef CONFIG_SMP
+ cmp $0, %r11b
+ jne cpu_bringup_and_idle
+#endif
+ ret
+
+#endif /* CONFIG_XEN_PVH */
+
.pushsection .text
.balign PAGE_SIZE
ENTRY(hypercall_page)
@@ -124,6 +159,7 @@ NEXT_HYPERCALL(arch_6)
ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID,
.quad _PAGE_PRESENT; .quad _PAGE_PRESENT)
ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long 1)
+ ELFNOTE(Xen, XEN_ELFNOTE_MOD_START_PFN, .long 1)
ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, _ASM_PTR __HYPERVISOR_VIRT_START)
ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, _ASM_PTR 0)
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index e5103b47a8ce..00b7d46b35b8 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -47,7 +47,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
/**
* atomic_set - set atomic variable
@@ -58,165 +58,96 @@
*/
#define atomic_set(v,i) ((v)->counter = (i))
-/**
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v.
- */
-static inline void atomic_add(int i, atomic_t * v)
-{
#if XCHAL_HAVE_S32C1I
- unsigned long tmp;
- int result;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " add %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (i), "a" (v)
- : "memory"
- );
-#else
- unsigned int vval;
-
- __asm__ __volatile__(
- " rsil a15, "__stringify(LOCKLEVEL)"\n"
- " l32i %0, %2, 0\n"
- " add %0, %0, %1\n"
- " s32i %0, %2, 0\n"
- " wsr a15, ps\n"
- " rsync\n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
-#endif
-}
-
-/**
- * atomic_sub - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v.
- */
-static inline void atomic_sub(int i, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
- unsigned long tmp;
- int result;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " sub %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (i), "a" (v)
- : "memory"
- );
-#else
- unsigned int vval;
-
- __asm__ __volatile__(
- " rsil a15, "__stringify(LOCKLEVEL)"\n"
- " l32i %0, %2, 0\n"
- " sub %0, %0, %1\n"
- " s32i %0, %2, 0\n"
- " wsr a15, ps\n"
- " rsync\n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
-#endif
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t * v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %1, %3, 0\n" \
+ " wsr %1, scompare1\n" \
+ " " #op " %0, %1, %2\n" \
+ " s32c1i %0, %3, 0\n" \
+ " bne %0, %1, 1b\n" \
+ : "=&a" (result), "=&a" (tmp) \
+ : "a" (i), "a" (v) \
+ : "memory" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t * v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %1, %3, 0\n" \
+ " wsr %1, scompare1\n" \
+ " " #op " %0, %1, %2\n" \
+ " s32c1i %0, %3, 0\n" \
+ " bne %0, %1, 1b\n" \
+ " " #op " %0, %0, %2\n" \
+ : "=&a" (result), "=&a" (tmp) \
+ : "a" (i), "a" (v) \
+ : "memory" \
+ ); \
+ \
+ return result; \
}
-/*
- * We use atomic_{add|sub}_return to define other functions.
- */
-
-static inline int atomic_add_return(int i, atomic_t * v)
-{
-#if XCHAL_HAVE_S32C1I
- unsigned long tmp;
- int result;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " add %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- " add %0, %0, %2\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (i), "a" (v)
- : "memory"
- );
-
- return result;
-#else
- unsigned int vval;
-
- __asm__ __volatile__(
- " rsil a15,"__stringify(LOCKLEVEL)"\n"
- " l32i %0, %2, 0\n"
- " add %0, %0, %1\n"
- " s32i %0, %2, 0\n"
- " wsr a15, ps\n"
- " rsync\n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
-
- return vval;
-#endif
+#else /* XCHAL_HAVE_S32C1I */
+
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t * v) \
+{ \
+ unsigned int vval; \
+ \
+ __asm__ __volatile__( \
+ " rsil a15, "__stringify(LOCKLEVEL)"\n"\
+ " l32i %0, %2, 0\n" \
+ " " #op " %0, %0, %1\n" \
+ " s32i %0, %2, 0\n" \
+ " wsr a15, ps\n" \
+ " rsync\n" \
+ : "=&a" (vval) \
+ : "a" (i), "a" (v) \
+ : "a15", "memory" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t * v) \
+{ \
+ unsigned int vval; \
+ \
+ __asm__ __volatile__( \
+ " rsil a15,"__stringify(LOCKLEVEL)"\n" \
+ " l32i %0, %2, 0\n" \
+ " " #op " %0, %0, %1\n" \
+ " s32i %0, %2, 0\n" \
+ " wsr a15, ps\n" \
+ " rsync\n" \
+ : "=&a" (vval) \
+ : "a" (i), "a" (v) \
+ : "a15", "memory" \
+ ); \
+ \
+ return vval; \
}
-static inline int atomic_sub_return(int i, atomic_t * v)
-{
-#if XCHAL_HAVE_S32C1I
- unsigned long tmp;
- int result;
+#endif /* XCHAL_HAVE_S32C1I */
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " sub %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- " sub %0, %0, %2\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (i), "a" (v)
- : "memory"
- );
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
- return result;
-#else
- unsigned int vval;
-
- __asm__ __volatile__(
- " rsil a15,"__stringify(LOCKLEVEL)"\n"
- " l32i %0, %2, 0\n"
- " sub %0, %0, %1\n"
- " s32i %0, %2, 0\n"
- " wsr a15, ps\n"
- " rsync\n"
- : "=&a" (vval)
- : "a" (i), "a" (v)
- : "a15", "memory"
- );
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
- return vval;
-#endif
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
/**
* atomic_sub_and_test - subtract value from variable and test result
diff --git a/crypto/asymmetric_keys/asymmetric_keys.h b/crypto/asymmetric_keys/asymmetric_keys.h
index a63c551c6557..f97330886d58 100644
--- a/crypto/asymmetric_keys/asymmetric_keys.h
+++ b/crypto/asymmetric_keys/asymmetric_keys.h
@@ -9,9 +9,10 @@
* 2 of the Licence, or (at your option) any later version.
*/
-int asymmetric_keyid_match(const char *kid, const char *id);
+extern struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id);
-static inline const char *asymmetric_key_id(const struct key *key)
+static inline
+const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key)
{
return key->type_data.p[1];
}
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index eb8cd46961a5..bcbbbd794e1d 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -15,6 +15,7 @@
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/ctype.h>
#include "asymmetric_keys.h"
MODULE_LICENSE("GPL");
@@ -22,85 +23,209 @@ MODULE_LICENSE("GPL");
static LIST_HEAD(asymmetric_key_parsers);
static DECLARE_RWSEM(asymmetric_key_parsers_sem);
-/*
- * Match asymmetric key id with partial match
- * @id: key id to match in a form "id:<id>"
+/**
+ * asymmetric_key_generate_id: Construct an asymmetric key ID
+ * @val_1: First binary blob
+ * @len_1: Length of first binary blob
+ * @val_2: Second binary blob
+ * @len_2: Length of second binary blob
+ *
+ * Construct an asymmetric key ID from a pair of binary blobs.
*/
-int asymmetric_keyid_match(const char *kid, const char *id)
+struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1,
+ size_t len_1,
+ const void *val_2,
+ size_t len_2)
{
- size_t idlen, kidlen;
+ struct asymmetric_key_id *kid;
+
+ kid = kmalloc(sizeof(struct asymmetric_key_id) + len_1 + len_2,
+ GFP_KERNEL);
+ if (!kid)
+ return ERR_PTR(-ENOMEM);
+ kid->len = len_1 + len_2;
+ memcpy(kid->data, val_1, len_1);
+ memcpy(kid->data + len_1, val_2, len_2);
+ return kid;
+}
+EXPORT_SYMBOL_GPL(asymmetric_key_generate_id);
- if (!kid || !id)
- return 0;
+/**
+ * asymmetric_key_id_same - Return true if two asymmetric keys IDs are the same.
+ * @kid_1, @kid_2: The key IDs to compare
+ */
+bool asymmetric_key_id_same(const struct asymmetric_key_id *kid1,
+ const struct asymmetric_key_id *kid2)
+{
+ if (!kid1 || !kid2)
+ return false;
+ if (kid1->len != kid2->len)
+ return false;
+ return memcmp(kid1->data, kid2->data, kid1->len) == 0;
+}
+EXPORT_SYMBOL_GPL(asymmetric_key_id_same);
- /* make it possible to use id as in the request: "id:<id>" */
- if (strncmp(id, "id:", 3) == 0)
- id += 3;
+/**
+ * asymmetric_key_id_partial - Return true if two asymmetric keys IDs
+ * partially match
+ * @kid_1, @kid_2: The key IDs to compare
+ */
+bool asymmetric_key_id_partial(const struct asymmetric_key_id *kid1,
+ const struct asymmetric_key_id *kid2)
+{
+ if (!kid1 || !kid2)
+ return false;
+ if (kid1->len < kid2->len)
+ return false;
+ return memcmp(kid1->data + (kid1->len - kid2->len),
+ kid2->data, kid2->len) == 0;
+}
+EXPORT_SYMBOL_GPL(asymmetric_key_id_partial);
- /* Anything after here requires a partial match on the ID string */
- idlen = strlen(id);
- kidlen = strlen(kid);
- if (idlen > kidlen)
- return 0;
+/**
+ * asymmetric_match_key_ids - Search asymmetric key IDs
+ * @kids: The list of key IDs to check
+ * @match_id: The key ID we're looking for
+ * @match: The match function to use
+ */
+static bool asymmetric_match_key_ids(
+ const struct asymmetric_key_ids *kids,
+ const struct asymmetric_key_id *match_id,
+ bool (*match)(const struct asymmetric_key_id *kid1,
+ const struct asymmetric_key_id *kid2))
+{
+ int i;
+
+ if (!kids || !match_id)
+ return false;
+ for (i = 0; i < ARRAY_SIZE(kids->id); i++)
+ if (match(kids->id[i], match_id))
+ return true;
+ return false;
+}
- kid += kidlen - idlen;
- if (strcasecmp(id, kid) != 0)
- return 0;
+/**
+ * asymmetric_key_hex_to_key_id - Convert a hex string into a key ID.
+ * @id: The ID as a hex string.
+ */
+struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id)
+{
+ struct asymmetric_key_id *match_id;
+ size_t hexlen;
+ int ret;
- return 1;
+ if (!*id)
+ return ERR_PTR(-EINVAL);
+ hexlen = strlen(id);
+ if (hexlen & 1)
+ return ERR_PTR(-EINVAL);
+
+ match_id = kmalloc(sizeof(struct asymmetric_key_id) + hexlen / 2,
+ GFP_KERNEL);
+ if (!match_id)
+ return ERR_PTR(-ENOMEM);
+ match_id->len = hexlen / 2;
+ ret = hex2bin(match_id->data, id, hexlen / 2);
+ if (ret < 0) {
+ kfree(match_id);
+ return ERR_PTR(-EINVAL);
+ }
+ return match_id;
}
-EXPORT_SYMBOL_GPL(asymmetric_keyid_match);
/*
- * Match asymmetric keys on (part of) their name
- * We have some shorthand methods for matching keys. We allow:
- *
- * "<desc>" - request a key by description
- * "id:<id>" - request a key matching the ID
- * "<subtype>:<id>" - request a key of a subtype
+ * Match asymmetric keys by an exact match on an ID.
*/
-static int asymmetric_key_match(const struct key *key, const void *description)
+static bool asymmetric_key_cmp(const struct key *key,
+ const struct key_match_data *match_data)
{
- const struct asymmetric_key_subtype *subtype = asymmetric_key_subtype(key);
- const char *spec = description;
- const char *id;
- ptrdiff_t speclen;
+ const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
+ const struct asymmetric_key_id *match_id = match_data->preparsed;
- if (!subtype || !spec || !*spec)
- return 0;
+ return asymmetric_match_key_ids(kids, match_id,
+ asymmetric_key_id_same);
+}
- /* See if the full key description matches as is */
- if (key->description && strcmp(key->description, description) == 0)
- return 1;
+/*
+ * Match asymmetric keys by a partial match on an IDs.
+ */
+static bool asymmetric_key_cmp_partial(const struct key *key,
+ const struct key_match_data *match_data)
+{
+ const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
+ const struct asymmetric_key_id *match_id = match_data->preparsed;
- /* All tests from here on break the criterion description into a
- * specifier, a colon and then an identifier.
- */
- id = strchr(spec, ':');
- if (!id)
- return 0;
+ return asymmetric_match_key_ids(kids, match_id,
+ asymmetric_key_id_partial);
+}
+
+/*
+ * Preparse the match criterion. If we don't set lookup_type and cmp,
+ * the default will be an exact match on the key description.
+ *
+ * There are some specifiers for matching key IDs rather than by the key
+ * description:
+ *
+ * "id:<id>" - find a key by partial match on any available ID
+ * "ex:<id>" - find a key by exact match on any available ID
+ *
+ * These have to be searched by iteration rather than by direct lookup because
+ * the key is hashed according to its description.
+ */
+static int asymmetric_key_match_preparse(struct key_match_data *match_data)
+{
+ struct asymmetric_key_id *match_id;
+ const char *spec = match_data->raw_data;
+ const char *id;
+ bool (*cmp)(const struct key *, const struct key_match_data *) =
+ asymmetric_key_cmp;
- speclen = id - spec;
- id++;
+ if (!spec || !*spec)
+ return -EINVAL;
+ if (spec[0] == 'i' &&
+ spec[1] == 'd' &&
+ spec[2] == ':') {
+ id = spec + 3;
+ cmp = asymmetric_key_cmp_partial;
+ } else if (spec[0] == 'e' &&
+ spec[1] == 'x' &&
+ spec[2] == ':') {
+ id = spec + 3;
+ } else {
+ goto default_match;
+ }
- if (speclen == 2 && memcmp(spec, "id", 2) == 0)
- return asymmetric_keyid_match(asymmetric_key_id(key), id);
+ match_id = asymmetric_key_hex_to_key_id(id);
+ if (IS_ERR(match_id))
+ return PTR_ERR(match_id);
- if (speclen == subtype->name_len &&
- memcmp(spec, subtype->name, speclen) == 0)
- return 1;
+ match_data->preparsed = match_id;
+ match_data->cmp = cmp;
+ match_data->lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE;
+ return 0;
+default_match:
return 0;
}
/*
+ * Free the preparsed the match criterion.
+ */
+static void asymmetric_key_match_free(struct key_match_data *match_data)
+{
+ kfree(match_data->preparsed);
+}
+
+/*
* Describe the asymmetric key
*/
static void asymmetric_key_describe(const struct key *key, struct seq_file *m)
{
const struct asymmetric_key_subtype *subtype = asymmetric_key_subtype(key);
- const char *kid = asymmetric_key_id(key);
- size_t n;
+ const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
+ const struct asymmetric_key_id *kid;
+ const unsigned char *p;
+ int n;
seq_puts(m, key->description);
@@ -108,13 +233,16 @@ static void asymmetric_key_describe(const struct key *key, struct seq_file *m)
seq_puts(m, ": ");
subtype->describe(key, m);
- if (kid) {
+ if (kids && kids->id[1]) {
+ kid = kids->id[1];
seq_putc(m, ' ');
- n = strlen(kid);
- if (n <= 8)
- seq_puts(m, kid);
- else
- seq_puts(m, kid + n - 8);
+ n = kid->len;
+ p = kid->data;
+ if (n > 4) {
+ p += n - 4;
+ n = 4;
+ }
+ seq_printf(m, "%*phN", n, p);
}
seq_puts(m, " [");
@@ -165,6 +293,8 @@ static int asymmetric_key_preparse(struct key_preparsed_payload *prep)
static void asymmetric_key_free_preparse(struct key_preparsed_payload *prep)
{
struct asymmetric_key_subtype *subtype = prep->type_data[0];
+ struct asymmetric_key_ids *kids = prep->type_data[1];
+ int i;
pr_devel("==>%s()\n", __func__);
@@ -172,7 +302,11 @@ static void asymmetric_key_free_preparse(struct key_preparsed_payload *prep)
subtype->destroy(prep->payload[0]);
module_put(subtype->owner);
}
- kfree(prep->type_data[1]);
+ if (kids) {
+ for (i = 0; i < ARRAY_SIZE(kids->id); i++)
+ kfree(kids->id[i]);
+ kfree(kids);
+ }
kfree(prep->description);
}
@@ -182,13 +316,20 @@ static void asymmetric_key_free_preparse(struct key_preparsed_payload *prep)
static void asymmetric_key_destroy(struct key *key)
{
struct asymmetric_key_subtype *subtype = asymmetric_key_subtype(key);
+ struct asymmetric_key_ids *kids = key->type_data.p[1];
+
if (subtype) {
subtype->destroy(key->payload.data);
module_put(subtype->owner);
key->type_data.p[0] = NULL;
}
- kfree(key->type_data.p[1]);
- key->type_data.p[1] = NULL;
+
+ if (kids) {
+ kfree(kids->id[0]);
+ kfree(kids->id[1]);
+ kfree(kids);
+ key->type_data.p[1] = NULL;
+ }
}
struct key_type key_type_asymmetric = {
@@ -196,10 +337,10 @@ struct key_type key_type_asymmetric = {
.preparse = asymmetric_key_preparse,
.free_preparse = asymmetric_key_free_preparse,
.instantiate = generic_key_instantiate,
- .match = asymmetric_key_match,
+ .match_preparse = asymmetric_key_match_preparse,
+ .match_free = asymmetric_key_match_free,
.destroy = asymmetric_key_destroy,
.describe = asymmetric_key_describe,
- .def_lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE,
};
EXPORT_SYMBOL_GPL(key_type_asymmetric);
diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c
index 3de5fb011de0..751f8fd7335d 100644
--- a/crypto/asymmetric_keys/pkcs7_key_type.c
+++ b/crypto/asymmetric_keys/pkcs7_key_type.c
@@ -72,11 +72,9 @@ error:
*/
static struct key_type key_type_pkcs7 = {
.name = "pkcs7_test",
- .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.preparse = pkcs7_preparse,
.free_preparse = user_free_preparse,
.instantiate = generic_key_instantiate,
- .match = user_match,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = user_describe,
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index 42e56aa7d277..3bd5a1e4c493 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -29,8 +29,25 @@ struct pkcs7_parse_context {
enum OID last_oid; /* Last OID encountered */
unsigned x509_index;
unsigned sinfo_index;
+ const void *raw_serial;
+ unsigned raw_serial_size;
+ unsigned raw_issuer_size;
+ const void *raw_issuer;
};
+/*
+ * Free a signed information block.
+ */
+static void pkcs7_free_signed_info(struct pkcs7_signed_info *sinfo)
+{
+ if (sinfo) {
+ mpi_free(sinfo->sig.mpi[0]);
+ kfree(sinfo->sig.digest);
+ kfree(sinfo->signing_cert_id);
+ kfree(sinfo);
+ }
+}
+
/**
* pkcs7_free_message - Free a PKCS#7 message
* @pkcs7: The PKCS#7 message to free
@@ -54,9 +71,7 @@ void pkcs7_free_message(struct pkcs7_message *pkcs7)
while (pkcs7->signed_infos) {
sinfo = pkcs7->signed_infos;
pkcs7->signed_infos = sinfo->next;
- mpi_free(sinfo->sig.mpi[0]);
- kfree(sinfo->sig.digest);
- kfree(sinfo);
+ pkcs7_free_signed_info(sinfo);
}
kfree(pkcs7);
}
@@ -71,51 +86,46 @@ EXPORT_SYMBOL_GPL(pkcs7_free_message);
struct pkcs7_message *pkcs7_parse_message(const void *data, size_t datalen)
{
struct pkcs7_parse_context *ctx;
- struct pkcs7_message *msg;
- long ret;
+ struct pkcs7_message *msg = ERR_PTR(-ENOMEM);
+ int ret;
- ret = -ENOMEM;
- msg = kzalloc(sizeof(struct pkcs7_message), GFP_KERNEL);
- if (!msg)
- goto error_no_sig;
ctx = kzalloc(sizeof(struct pkcs7_parse_context), GFP_KERNEL);
if (!ctx)
- goto error_no_ctx;
+ goto out_no_ctx;
+ ctx->msg = kzalloc(sizeof(struct pkcs7_message), GFP_KERNEL);
+ if (!ctx->msg)
+ goto out_no_msg;
ctx->sinfo = kzalloc(sizeof(struct pkcs7_signed_info), GFP_KERNEL);
if (!ctx->sinfo)
- goto error_no_sinfo;
+ goto out_no_sinfo;
- ctx->msg = msg;
ctx->data = (unsigned long)data;
ctx->ppcerts = &ctx->certs;
ctx->ppsinfo = &ctx->msg->signed_infos;
/* Attempt to decode the signature */
ret = asn1_ber_decoder(&pkcs7_decoder, ctx, data, datalen);
- if (ret < 0)
- goto error_decode;
+ if (ret < 0) {
+ msg = ERR_PTR(ret);
+ goto out;
+ }
+ msg = ctx->msg;
+ ctx->msg = NULL;
+
+out:
while (ctx->certs) {
struct x509_certificate *cert = ctx->certs;
ctx->certs = cert->next;
x509_free_certificate(cert);
}
- mpi_free(ctx->sinfo->sig.mpi[0]);
- kfree(ctx->sinfo->sig.digest);
- kfree(ctx->sinfo);
+ pkcs7_free_signed_info(ctx->sinfo);
+out_no_sinfo:
+ pkcs7_free_message(ctx->msg);
+out_no_msg:
kfree(ctx);
+out_no_ctx:
return msg;
-
-error_decode:
- mpi_free(ctx->sinfo->sig.mpi[0]);
- kfree(ctx->sinfo->sig.digest);
- kfree(ctx->sinfo);
-error_no_sinfo:
- kfree(ctx);
-error_no_ctx:
- pkcs7_free_message(msg);
-error_no_sig:
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(pkcs7_parse_message);
@@ -246,10 +256,10 @@ int pkcs7_extract_cert(void *context, size_t hdrlen,
if (IS_ERR(x509))
return PTR_ERR(x509);
- pr_debug("Got cert for %s\n", x509->subject);
- pr_debug("- fingerprint %s\n", x509->fingerprint);
-
x509->index = ++ctx->x509_index;
+ pr_debug("Got cert %u for %s\n", x509->index, x509->subject);
+ pr_debug("- fingerprint %*phN\n", x509->id->len, x509->id->data);
+
*ctx->ppcerts = x509;
ctx->ppcerts = &x509->next;
return 0;
@@ -338,8 +348,8 @@ int pkcs7_sig_note_serial(void *context, size_t hdrlen,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
- ctx->sinfo->raw_serial = value;
- ctx->sinfo->raw_serial_size = vlen;
+ ctx->raw_serial = value;
+ ctx->raw_serial_size = vlen;
return 0;
}
@@ -351,8 +361,8 @@ int pkcs7_sig_note_issuer(void *context, size_t hdrlen,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
- ctx->sinfo->raw_issuer = value;
- ctx->sinfo->raw_issuer_size = vlen;
+ ctx->raw_issuer = value;
+ ctx->raw_issuer_size = vlen;
return 0;
}
@@ -385,10 +395,21 @@ int pkcs7_note_signed_info(void *context, size_t hdrlen,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
-
- ctx->sinfo->index = ++ctx->sinfo_index;
- *ctx->ppsinfo = ctx->sinfo;
- ctx->ppsinfo = &ctx->sinfo->next;
+ struct pkcs7_signed_info *sinfo = ctx->sinfo;
+ struct asymmetric_key_id *kid;
+
+ /* Generate cert issuer + serial number key ID */
+ kid = asymmetric_key_generate_id(ctx->raw_serial,
+ ctx->raw_serial_size,
+ ctx->raw_issuer,
+ ctx->raw_issuer_size);
+ if (IS_ERR(kid))
+ return PTR_ERR(kid);
+
+ sinfo->signing_cert_id = kid;
+ sinfo->index = ++ctx->sinfo_index;
+ *ctx->ppsinfo = sinfo;
+ ctx->ppsinfo = &sinfo->next;
ctx->sinfo = kzalloc(sizeof(struct pkcs7_signed_info), GFP_KERNEL);
if (!ctx->sinfo)
return -ENOMEM;
diff --git a/crypto/asymmetric_keys/pkcs7_parser.h b/crypto/asymmetric_keys/pkcs7_parser.h
index d25f4d15370f..efc7dc9b8f9c 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.h
+++ b/crypto/asymmetric_keys/pkcs7_parser.h
@@ -23,6 +23,7 @@ struct pkcs7_signed_info {
struct x509_certificate *signer; /* Signing certificate (in msg->certs) */
unsigned index;
bool trusted;
+ bool unsupported_crypto; /* T if not usable due to missing crypto */
/* Message digest - the digest of the Content Data (or NULL) */
const void *msgdigest;
@@ -33,10 +34,7 @@ struct pkcs7_signed_info {
const void *authattrs;
/* Issuing cert serial number and issuer's name */
- const void *raw_serial;
- unsigned raw_serial_size;
- unsigned raw_issuer_size;
- const void *raw_issuer;
+ struct asymmetric_key_id *signing_cert_id;
/* Message signature.
*
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
index e666eb011a85..1d29376072da 100644
--- a/crypto/asymmetric_keys/pkcs7_trust.c
+++ b/crypto/asymmetric_keys/pkcs7_trust.c
@@ -23,9 +23,9 @@
/**
* Check the trust on one PKCS#7 SignedInfo block.
*/
-int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
- struct pkcs7_signed_info *sinfo,
- struct key *trust_keyring)
+static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
+ struct pkcs7_signed_info *sinfo,
+ struct key *trust_keyring)
{
struct public_key_signature *sig = &sinfo->sig;
struct x509_certificate *x509, *last = NULL, *p;
@@ -35,6 +35,11 @@ int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
kenter(",%u,", sinfo->index);
+ if (sinfo->unsupported_crypto) {
+ kleave(" = -ENOPKG [cached]");
+ return -ENOPKG;
+ }
+
for (x509 = sinfo->signer; x509; x509 = x509->signer) {
if (x509->seen) {
if (x509->verified) {
@@ -49,15 +54,18 @@ int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
/* Look to see if this certificate is present in the trusted
* keys.
*/
- key = x509_request_asymmetric_key(trust_keyring, x509->subject,
- x509->fingerprint);
- if (!IS_ERR(key))
+ key = x509_request_asymmetric_key(trust_keyring, x509->id,
+ false);
+ if (!IS_ERR(key)) {
/* One of the X.509 certificates in the PKCS#7 message
* is apparently the same as one we already trust.
* Verify that the trusted variant can also validate
* the signature on the descendant.
*/
+ pr_devel("sinfo %u: Cert %u as key %x\n",
+ sinfo->index, x509->index, key_serial(key));
goto matched;
+ }
if (key == ERR_PTR(-ENOMEM))
return -ENOMEM;
@@ -77,16 +85,36 @@ int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
/* No match - see if the root certificate has a signer amongst the
* trusted keys.
*/
- if (!last || !last->issuer || !last->authority) {
- kleave(" = -ENOKEY [no backref]");
- return -ENOKEY;
+ if (last && last->authority) {
+ key = x509_request_asymmetric_key(trust_keyring, last->authority,
+ false);
+ if (!IS_ERR(key)) {
+ x509 = last;
+ pr_devel("sinfo %u: Root cert %u signer is key %x\n",
+ sinfo->index, x509->index, key_serial(key));
+ goto matched;
+ }
+ if (PTR_ERR(key) != -ENOKEY)
+ return PTR_ERR(key);
+ }
+
+ /* As a last resort, see if we have a trusted public key that matches
+ * the signed info directly.
+ */
+ key = x509_request_asymmetric_key(trust_keyring,
+ sinfo->signing_cert_id,
+ false);
+ if (!IS_ERR(key)) {
+ pr_devel("sinfo %u: Direct signer is key %x\n",
+ sinfo->index, key_serial(key));
+ x509 = NULL;
+ goto matched;
}
+ if (PTR_ERR(key) != -ENOKEY)
+ return PTR_ERR(key);
- key = x509_request_asymmetric_key(trust_keyring, last->issuer,
- last->authority);
- if (IS_ERR(key))
- return PTR_ERR(key) == -ENOMEM ? -ENOMEM : -ENOKEY;
- x509 = last;
+ kleave(" = -ENOKEY [no backref]");
+ return -ENOKEY;
matched:
ret = verify_signature(key, sig);
@@ -100,10 +128,12 @@ matched:
}
verified:
- x509->verified = true;
- for (p = sinfo->signer; p != x509; p = p->signer) {
- p->verified = true;
- p->trusted = trusted;
+ if (x509) {
+ x509->verified = true;
+ for (p = sinfo->signer; p != x509; p = p->signer) {
+ p->verified = true;
+ p->trusted = trusted;
+ }
}
sinfo->trusted = trusted;
kleave(" = 0");
@@ -141,24 +171,28 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
{
struct pkcs7_signed_info *sinfo;
struct x509_certificate *p;
- int cached_ret = 0, ret;
+ int cached_ret = -ENOKEY;
+ int ret;
for (p = pkcs7->certs; p; p = p->next)
p->seen = false;
for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) {
ret = pkcs7_validate_trust_one(pkcs7, sinfo, trust_keyring);
- if (ret < 0) {
- if (ret == -ENOPKG) {
+ switch (ret) {
+ case -ENOKEY:
+ continue;
+ case -ENOPKG:
+ if (cached_ret == -ENOKEY)
cached_ret = -ENOPKG;
- } else if (ret == -ENOKEY) {
- if (cached_ret == 0)
- cached_ret = -ENOKEY;
- } else {
- return ret;
- }
+ continue;
+ case 0:
+ *_trusted |= sinfo->trusted;
+ cached_ret = 0;
+ continue;
+ default:
+ return ret;
}
- *_trusted |= sinfo->trusted;
}
return cached_ret;
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index c62cf8006e1f..cd455450b069 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -131,8 +131,7 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7,
struct x509_certificate *x509;
unsigned certix = 1;
- kenter("%u,%u,%u",
- sinfo->index, sinfo->raw_serial_size, sinfo->raw_issuer_size);
+ kenter("%u", sinfo->index);
for (x509 = pkcs7->certs; x509; x509 = x509->next, certix++) {
/* I'm _assuming_ that the generator of the PKCS#7 message will
@@ -140,21 +139,11 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7,
* PKCS#7 message - but I can't be 100% sure of that. It's
* possible this will need element-by-element comparison.
*/
- if (x509->raw_serial_size != sinfo->raw_serial_size ||
- memcmp(x509->raw_serial, sinfo->raw_serial,
- sinfo->raw_serial_size) != 0)
+ if (!asymmetric_key_id_same(x509->id, sinfo->signing_cert_id))
continue;
pr_devel("Sig %u: Found cert serial match X.509[%u]\n",
sinfo->index, certix);
- if (x509->raw_issuer_size != sinfo->raw_issuer_size ||
- memcmp(x509->raw_issuer, sinfo->raw_issuer,
- sinfo->raw_issuer_size) != 0) {
- pr_warn("Sig %u: X.509 subject and PKCS#7 issuer don't match\n",
- sinfo->index);
- continue;
- }
-
if (x509->pub->pkey_algo != sinfo->sig.pkey_algo) {
pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n",
sinfo->index);
@@ -164,9 +153,14 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7,
sinfo->signer = x509;
return 0;
}
- pr_warn("Sig %u: Issuing X.509 cert not found (#%*ph)\n",
- sinfo->index, sinfo->raw_serial_size, sinfo->raw_serial);
- return -ENOKEY;
+
+ /* The relevant X.509 cert isn't found here, but it might be found in
+ * the trust keyring.
+ */
+ pr_debug("Sig %u: Issuing X.509 cert not found (#%*phN)\n",
+ sinfo->index,
+ sinfo->signing_cert_id->len, sinfo->signing_cert_id->data);
+ return 0;
}
/*
@@ -184,15 +178,18 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
p->seen = false;
for (;;) {
- pr_debug("verify %s: %s\n", x509->subject, x509->fingerprint);
+ pr_debug("verify %s: %*phN\n",
+ x509->subject,
+ x509->raw_serial_size, x509->raw_serial);
x509->seen = true;
ret = x509_get_sig_params(x509);
if (ret < 0)
- return ret;
+ goto maybe_missing_crypto_in_x509;
pr_debug("- issuer %s\n", x509->issuer);
if (x509->authority)
- pr_debug("- authkeyid %s\n", x509->authority);
+ pr_debug("- authkeyid %*phN\n",
+ x509->authority->len, x509->authority->data);
if (!x509->authority ||
strcmp(x509->subject, x509->issuer) == 0) {
@@ -209,7 +206,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
ret = x509_check_signature(x509->pub, x509);
if (ret < 0)
- return ret;
+ goto maybe_missing_crypto_in_x509;
x509->signer = x509;
pr_debug("- self-signed\n");
return 0;
@@ -218,13 +215,14 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
/* Look through the X.509 certificates in the PKCS#7 message's
* list to see if the next one is there.
*/
- pr_debug("- want %s\n", x509->authority);
+ pr_debug("- want %*phN\n",
+ x509->authority->len, x509->authority->data);
for (p = pkcs7->certs; p; p = p->next) {
- pr_debug("- cmp [%u] %s\n", p->index, p->fingerprint);
- if (p->raw_subject_size == x509->raw_issuer_size &&
- strcmp(p->fingerprint, x509->authority) == 0 &&
- memcmp(p->raw_subject, x509->raw_issuer,
- x509->raw_issuer_size) == 0)
+ if (!p->skid)
+ continue;
+ pr_debug("- cmp [%u] %*phN\n",
+ p->index, p->skid->len, p->skid->data);
+ if (asymmetric_key_id_same(p->skid, x509->authority))
goto found_issuer;
}
@@ -233,7 +231,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
return 0;
found_issuer:
- pr_debug("- issuer %s\n", p->subject);
+ pr_debug("- subject %s\n", p->subject);
if (p->seen) {
pr_warn("Sig %u: X.509 chain contains loop\n",
sinfo->index);
@@ -250,6 +248,17 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
x509 = p;
might_sleep();
}
+
+maybe_missing_crypto_in_x509:
+ /* Just prune the certificate chain at this point if we lack some
+ * crypto module to go further. Note, however, we don't want to set
+ * sinfo->missing_crypto as the signed info block may still be
+ * validatable against an X.509 cert lower in the chain that we have a
+ * trusted copy of.
+ */
+ if (ret == -ENOPKG)
+ return 0;
+ return ret;
}
/*
@@ -269,11 +278,14 @@ static int pkcs7_verify_one(struct pkcs7_message *pkcs7,
if (ret < 0)
return ret;
- /* Find the key for the signature */
+ /* Find the key for the signature if there is one */
ret = pkcs7_find_key(pkcs7, sinfo);
if (ret < 0)
return ret;
+ if (!sinfo->signer)
+ return 0;
+
pr_devel("Using X.509[%u] for sig %u\n",
sinfo->signer->index, sinfo->index);
@@ -291,11 +303,33 @@ static int pkcs7_verify_one(struct pkcs7_message *pkcs7,
/**
* pkcs7_verify - Verify a PKCS#7 message
* @pkcs7: The PKCS#7 message to be verified
+ *
+ * Verify a PKCS#7 message is internally consistent - that is, the data digest
+ * matches the digest in the AuthAttrs and any signature in the message or one
+ * of the X.509 certificates it carries that matches another X.509 cert in the
+ * message can be verified.
+ *
+ * This does not look to match the contents of the PKCS#7 message against any
+ * external public keys.
+ *
+ * Returns, in order of descending priority:
+ *
+ * (*) -EKEYREJECTED if a signature failed to match for which we found an
+ * appropriate X.509 certificate, or:
+ *
+ * (*) -EBADMSG if some part of the message was invalid, or:
+ *
+ * (*) -ENOPKG if none of the signature chains are verifiable because suitable
+ * crypto modules couldn't be found, or:
+ *
+ * (*) 0 if all the signature chains that don't incur -ENOPKG can be verified
+ * (note that a signature chain may be of zero length), or:
*/
int pkcs7_verify(struct pkcs7_message *pkcs7)
{
struct pkcs7_signed_info *sinfo;
struct x509_certificate *x509;
+ int enopkg = -ENOPKG;
int ret, n;
kenter("");
@@ -304,18 +338,24 @@ int pkcs7_verify(struct pkcs7_message *pkcs7)
ret = x509_get_sig_params(x509);
if (ret < 0)
return ret;
- pr_debug("X.509[%u] %s\n", n, x509->authority);
+ pr_debug("X.509[%u] %*phN\n",
+ n, x509->authority->len, x509->authority->data);
}
for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) {
ret = pkcs7_verify_one(pkcs7, sinfo);
if (ret < 0) {
+ if (ret == -ENOPKG) {
+ sinfo->unsupported_crypto = true;
+ continue;
+ }
kleave(" = %d", ret);
return ret;
}
+ enopkg = 0;
}
- kleave(" = 0");
- return 0;
+ kleave(" = %d", enopkg);
+ return enopkg;
}
EXPORT_SYMBOL_GPL(pkcs7_verify);
diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c
index 50b3f880b4ff..7525fd183574 100644
--- a/crypto/asymmetric_keys/signature.c
+++ b/crypto/asymmetric_keys/signature.c
@@ -11,6 +11,7 @@
* 2 of the Licence, or (at your option) any later version.
*/
+#define pr_fmt(fmt) "SIG: "fmt
#include <keys/asymmetric-subtype.h>
#include <linux/module.h>
#include <linux/err.h>
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index ac72348c186a..a668d90302d3 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -46,7 +46,8 @@ void x509_free_certificate(struct x509_certificate *cert)
public_key_destroy(cert->pub);
kfree(cert->issuer);
kfree(cert->subject);
- kfree(cert->fingerprint);
+ kfree(cert->id);
+ kfree(cert->skid);
kfree(cert->authority);
kfree(cert->sig.digest);
mpi_free(cert->sig.rsa.s);
@@ -62,6 +63,7 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
{
struct x509_certificate *cert;
struct x509_parse_context *ctx;
+ struct asymmetric_key_id *kid;
long ret;
ret = -ENOMEM;
@@ -89,6 +91,17 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
if (ret < 0)
goto error_decode;
+ /* Generate cert issuer + serial number key ID */
+ kid = asymmetric_key_generate_id(cert->raw_serial,
+ cert->raw_serial_size,
+ cert->raw_issuer,
+ cert->raw_issuer_size);
+ if (IS_ERR(kid)) {
+ ret = PTR_ERR(kid);
+ goto error_decode;
+ }
+ cert->id = kid;
+
kfree(ctx);
return cert;
@@ -407,36 +420,36 @@ int x509_process_extension(void *context, size_t hdrlen,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
+ struct asymmetric_key_id *kid;
const unsigned char *v = value;
- char *f;
int i;
pr_debug("Extension: %u\n", ctx->last_oid);
if (ctx->last_oid == OID_subjectKeyIdentifier) {
/* Get hold of the key fingerprint */
- if (vlen < 3)
+ if (ctx->cert->skid || vlen < 3)
return -EBADMSG;
if (v[0] != ASN1_OTS || v[1] != vlen - 2)
return -EBADMSG;
v += 2;
vlen -= 2;
- f = kmalloc(vlen * 2 + 1, GFP_KERNEL);
- if (!f)
- return -ENOMEM;
- for (i = 0; i < vlen; i++)
- sprintf(f + i * 2, "%02x", v[i]);
- pr_debug("fingerprint %s\n", f);
- ctx->cert->fingerprint = f;
+ ctx->cert->raw_skid_size = vlen;
+ ctx->cert->raw_skid = v;
+ kid = asymmetric_key_generate_id(ctx->cert->raw_subject,
+ ctx->cert->raw_subject_size,
+ v, vlen);
+ if (IS_ERR(kid))
+ return PTR_ERR(kid);
+ ctx->cert->skid = kid;
+ pr_debug("subjkeyid %*phN\n", kid->len, kid->data);
return 0;
}
if (ctx->last_oid == OID_authorityKeyIdentifier) {
- size_t key_len;
-
/* Get hold of the CA key fingerprint */
- if (vlen < 5)
+ if (ctx->cert->authority || vlen < 5)
return -EBADMSG;
/* Authority Key Identifier must be a Constructed SEQUENCE */
@@ -454,7 +467,7 @@ int x509_process_extension(void *context, size_t hdrlen,
v[3] > vlen - 4)
return -EBADMSG;
- key_len = v[3];
+ vlen = v[3];
v += 4;
} else {
/* Long Form length */
@@ -476,17 +489,17 @@ int x509_process_extension(void *context, size_t hdrlen,
v[sub + 1] > vlen - 4 - sub)
return -EBADMSG;
- key_len = v[sub + 1];
+ vlen = v[sub + 1];
v += (sub + 2);
}
- f = kmalloc(key_len * 2 + 1, GFP_KERNEL);
- if (!f)
- return -ENOMEM;
- for (i = 0; i < key_len; i++)
- sprintf(f + i * 2, "%02x", v[i]);
- pr_debug("authority %s\n", f);
- ctx->cert->authority = f;
+ kid = asymmetric_key_generate_id(ctx->cert->raw_issuer,
+ ctx->cert->raw_issuer_size,
+ v, vlen);
+ if (IS_ERR(kid))
+ return PTR_ERR(kid);
+ pr_debug("authkeyid %*phN\n", kid->len, kid->data);
+ ctx->cert->authority = kid;
return 0;
}
diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
index 1b76f207c1f3..3dfe6b5d6f0b 100644
--- a/crypto/asymmetric_keys/x509_parser.h
+++ b/crypto/asymmetric_keys/x509_parser.h
@@ -19,8 +19,9 @@ struct x509_certificate {
struct public_key_signature sig; /* Signature parameters */
char *issuer; /* Name of certificate issuer */
char *subject; /* Name of certificate subject */
- char *fingerprint; /* Key fingerprint as hex */
- char *authority; /* Authority key fingerprint as hex */
+ struct asymmetric_key_id *id; /* Serial number + issuer */
+ struct asymmetric_key_id *skid; /* Subject + subjectKeyId (optional) */
+ struct asymmetric_key_id *authority; /* Authority key identifier (optional) */
struct tm valid_from;
struct tm valid_to;
const void *tbs; /* Signed data */
@@ -33,10 +34,13 @@ struct x509_certificate {
const void *raw_issuer; /* Raw issuer name in ASN.1 */
const void *raw_subject; /* Raw subject name in ASN.1 */
unsigned raw_subject_size;
+ unsigned raw_skid_size;
+ const void *raw_skid; /* Raw subjectKeyId in ASN.1 */
unsigned index;
bool seen; /* Infinite recursion prevention */
bool verified;
bool trusted;
+ bool unsupported_crypto; /* T if can't be verified due to missing crypto */
};
/*
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index f3d62307e6ee..a6c42031628e 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -25,7 +25,7 @@
#include "x509_parser.h"
static bool use_builtin_keys;
-static char *ca_keyid;
+static struct asymmetric_key_id *ca_keyid;
#ifndef MODULE
static int __init ca_keys_setup(char *str)
@@ -33,10 +33,16 @@ static int __init ca_keys_setup(char *str)
if (!str) /* default system keyring */
return 1;
- if (strncmp(str, "id:", 3) == 0)
- ca_keyid = str; /* owner key 'id:xxxxxx' */
- else if (strcmp(str, "builtin") == 0)
+ if (strncmp(str, "id:", 3) == 0) {
+ struct asymmetric_key_id *p;
+ p = asymmetric_key_hex_to_key_id(str + 3);
+ if (p == ERR_PTR(-EINVAL))
+ pr_err("Unparsable hex string in ca_keys\n");
+ else if (!IS_ERR(p))
+ ca_keyid = p; /* owner key 'id:xxxxxx' */
+ } else if (strcmp(str, "builtin") == 0) {
use_builtin_keys = true;
+ }
return 1;
}
@@ -46,31 +52,35 @@ __setup("ca_keys=", ca_keys_setup);
/**
* x509_request_asymmetric_key - Request a key by X.509 certificate params.
* @keyring: The keys to search.
- * @subject: The name of the subject to whom the key belongs.
- * @key_id: The subject key ID as a hex string.
+ * @kid: The key ID.
+ * @partial: Use partial match if true, exact if false.
*
* Find a key in the given keyring by subject name and key ID. These might,
* for instance, be the issuer name and the authority key ID of an X.509
* certificate that needs to be verified.
*/
struct key *x509_request_asymmetric_key(struct key *keyring,
- const char *subject,
- const char *key_id)
+ const struct asymmetric_key_id *kid,
+ bool partial)
{
key_ref_t key;
- size_t subject_len = strlen(subject), key_id_len = strlen(key_id);
- char *id;
+ char *id, *p;
- /* Construct an identifier "<subjname>:<keyid>". */
- id = kmalloc(subject_len + 2 + key_id_len + 1, GFP_KERNEL);
+ /* Construct an identifier "id:<keyid>". */
+ p = id = kmalloc(2 + 1 + kid->len * 2 + 1, GFP_KERNEL);
if (!id)
return ERR_PTR(-ENOMEM);
- memcpy(id, subject, subject_len);
- id[subject_len + 0] = ':';
- id[subject_len + 1] = ' ';
- memcpy(id + subject_len + 2, key_id, key_id_len);
- id[subject_len + 2 + key_id_len] = 0;
+ if (partial) {
+ *p++ = 'i';
+ *p++ = 'd';
+ } else {
+ *p++ = 'e';
+ *p++ = 'x';
+ }
+ *p++ = ':';
+ p = bin2hex(p, kid->data, kid->len);
+ *p = 0;
pr_debug("Look up: \"%s\"\n", id);
@@ -112,6 +122,8 @@ int x509_get_sig_params(struct x509_certificate *cert)
pr_devel("==>%s()\n", __func__);
+ if (cert->unsupported_crypto)
+ return -ENOPKG;
if (cert->sig.rsa.s)
return 0;
@@ -124,8 +136,13 @@ int x509_get_sig_params(struct x509_certificate *cert)
* big the hash operational data will be.
*/
tfm = crypto_alloc_shash(hash_algo_name[cert->sig.pkey_hash_algo], 0, 0);
- if (IS_ERR(tfm))
- return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm);
+ if (IS_ERR(tfm)) {
+ if (PTR_ERR(tfm) == -ENOENT) {
+ cert->unsupported_crypto = true;
+ return -ENOPKG;
+ }
+ return PTR_ERR(tfm);
+ }
desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
digest_size = crypto_shash_digestsize(tfm);
@@ -172,6 +189,8 @@ int x509_check_signature(const struct public_key *pub,
return ret;
ret = public_key_verify_signature(pub, &cert->sig);
+ if (ret == -ENOPKG)
+ cert->unsupported_crypto = true;
pr_debug("Cert Verification: %d\n", ret);
return ret;
}
@@ -195,11 +214,11 @@ static int x509_validate_trust(struct x509_certificate *cert,
if (!trust_keyring)
return -EOPNOTSUPP;
- if (ca_keyid && !asymmetric_keyid_match(cert->authority, ca_keyid))
+ if (ca_keyid && !asymmetric_key_id_partial(cert->authority, ca_keyid))
return -EPERM;
- key = x509_request_asymmetric_key(trust_keyring,
- cert->issuer, cert->authority);
+ key = x509_request_asymmetric_key(trust_keyring, cert->authority,
+ false);
if (!IS_ERR(key)) {
if (!use_builtin_keys
|| test_bit(KEY_FLAG_BUILTIN, &key->flags))
@@ -214,9 +233,11 @@ static int x509_validate_trust(struct x509_certificate *cert,
*/
static int x509_key_preparse(struct key_preparsed_payload *prep)
{
+ struct asymmetric_key_ids *kids;
struct x509_certificate *cert;
+ const char *q;
size_t srlen, sulen;
- char *desc = NULL;
+ char *desc = NULL, *p;
int ret;
cert = x509_cert_parse(prep->data, prep->datalen);
@@ -249,19 +270,12 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
pkey_algo_name[cert->sig.pkey_algo],
hash_algo_name[cert->sig.pkey_hash_algo]);
- if (!cert->fingerprint) {
- pr_warn("Cert for '%s' must have a SubjKeyId extension\n",
- cert->subject);
- ret = -EKEYREJECTED;
- goto error_free_cert;
- }
-
cert->pub->algo = pkey_algo[cert->pub->pkey_algo];
cert->pub->id_type = PKEY_ID_X509;
/* Check the signature on the key if it appears to be self-signed */
if (!cert->authority ||
- strcmp(cert->fingerprint, cert->authority) == 0) {
+ asymmetric_key_id_same(cert->skid, cert->authority)) {
ret = x509_check_signature(cert->pub, cert); /* self-signed */
if (ret < 0)
goto error_free_cert;
@@ -273,31 +287,52 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
/* Propose a description */
sulen = strlen(cert->subject);
- srlen = strlen(cert->fingerprint);
+ if (cert->raw_skid) {
+ srlen = cert->raw_skid_size;
+ q = cert->raw_skid;
+ } else {
+ srlen = cert->raw_serial_size;
+ q = cert->raw_serial;
+ }
+ if (srlen > 1 && *q == 0) {
+ srlen--;
+ q++;
+ }
+
ret = -ENOMEM;
- desc = kmalloc(sulen + 2 + srlen + 1, GFP_KERNEL);
+ desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL);
if (!desc)
goto error_free_cert;
- memcpy(desc, cert->subject, sulen);
- desc[sulen] = ':';
- desc[sulen + 1] = ' ';
- memcpy(desc + sulen + 2, cert->fingerprint, srlen);
- desc[sulen + 2 + srlen] = 0;
+ p = memcpy(desc, cert->subject, sulen);
+ p += sulen;
+ *p++ = ':';
+ *p++ = ' ';
+ p = bin2hex(p, q, srlen);
+ *p = 0;
+
+ kids = kmalloc(sizeof(struct asymmetric_key_ids), GFP_KERNEL);
+ if (!kids)
+ goto error_free_desc;
+ kids->id[0] = cert->id;
+ kids->id[1] = cert->skid;
/* We're pinning the module by being linked against it */
__module_get(public_key_subtype.owner);
prep->type_data[0] = &public_key_subtype;
- prep->type_data[1] = cert->fingerprint;
+ prep->type_data[1] = kids;
prep->payload[0] = cert->pub;
prep->description = desc;
prep->quotalen = 100;
/* We've finished with the certificate */
cert->pub = NULL;
- cert->fingerprint = NULL;
+ cert->id = NULL;
+ cert->skid = NULL;
desc = NULL;
ret = 0;
+error_free_desc:
+ kfree(desc);
error_free_cert:
x509_free_certificate(cert);
return ret;
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 820b4009d5f7..3265ce94d282 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -62,12 +62,6 @@ static DEFINE_SPINLOCK(rsxx_ida_lock);
/* --------------------Debugfs Setup ------------------- */
-struct rsxx_cram {
- u32 f_pos;
- u32 offset;
- void *i_private;
-};
-
static int rsxx_attr_pci_regs_show(struct seq_file *m, void *p)
{
struct rsxx_cardinfo *card = m->private;
@@ -184,93 +178,50 @@ static int rsxx_attr_pci_regs_open(struct inode *inode, struct file *file)
static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- struct rsxx_cram *info = fp->private_data;
- struct rsxx_cardinfo *card = info->i_private;
+ struct rsxx_cardinfo *card = file_inode(fp)->i_private;
char *buf;
- int st;
+ ssize_t st;
- buf = kzalloc(sizeof(*buf) * cnt, GFP_KERNEL);
+ buf = kzalloc(cnt, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- info->f_pos = (u32)*ppos + info->offset;
-
- st = rsxx_creg_read(card, CREG_ADD_CRAM + info->f_pos, cnt, buf, 1);
- if (st)
- return st;
-
- st = copy_to_user(ubuf, buf, cnt);
+ st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
+ if (!st)
+ st = copy_to_user(ubuf, buf, cnt);
+ kfree(buf);
if (st)
return st;
-
- info->offset += cnt;
-
- kfree(buf);
-
+ *ppos += cnt;
return cnt;
}
static ssize_t rsxx_cram_write(struct file *fp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- struct rsxx_cram *info = fp->private_data;
- struct rsxx_cardinfo *card = info->i_private;
+ struct rsxx_cardinfo *card = file_inode(fp)->i_private;
char *buf;
- int st;
+ ssize_t st;
- buf = kzalloc(sizeof(*buf) * cnt, GFP_KERNEL);
+ buf = kzalloc(cnt, GFP_KERNEL);
if (!buf)
return -ENOMEM;
st = copy_from_user(buf, ubuf, cnt);
+ if (!st)
+ st = rsxx_creg_write(card, CREG_ADD_CRAM + (u32)*ppos, cnt,
+ buf, 1);
+ kfree(buf);
if (st)
return st;
-
- info->f_pos = (u32)*ppos + info->offset;
-
- st = rsxx_creg_write(card, CREG_ADD_CRAM + info->f_pos, cnt, buf, 1);
- if (st)
- return st;
-
- info->offset += cnt;
-
- kfree(buf);
-
+ *ppos += cnt;
return cnt;
}
-static int rsxx_cram_open(struct inode *inode, struct file *file)
-{
- struct rsxx_cram *info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->i_private = inode->i_private;
- info->f_pos = file->f_pos;
- file->private_data = info;
-
- return 0;
-}
-
-static int rsxx_cram_release(struct inode *inode, struct file *file)
-{
- struct rsxx_cram *info = file->private_data;
-
- if (!info)
- return 0;
-
- kfree(info);
- file->private_data = NULL;
-
- return 0;
-}
-
static const struct file_operations debugfs_cram_fops = {
.owner = THIS_MODULE,
- .open = rsxx_cram_open,
.read = rsxx_cram_read,
.write = rsxx_cram_write,
- .release = rsxx_cram_release,
};
static const struct file_operations debugfs_stats_fops = {
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 5814deb6963d..756b8ec00f16 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -9,6 +9,7 @@
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/genhd.h>
+#include <linux/cdrom.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
@@ -22,8 +23,8 @@
#define DRV_MODULE_NAME "sunvdc"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.0"
-#define DRV_MODULE_RELDATE "June 25, 2007"
+#define DRV_MODULE_VERSION "1.1"
+#define DRV_MODULE_RELDATE "February 13, 2013"
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -32,7 +33,7 @@ MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
-#define VDC_TX_RING_SIZE 256
+#define VDC_TX_RING_SIZE 512
#define WAITING_FOR_LINK_UP 0x01
#define WAITING_FOR_TX_SPACE 0x02
@@ -65,10 +66,10 @@ struct vdc_port {
u64 operations;
u32 vdisk_size;
u8 vdisk_type;
+ u8 vdisk_mtype;
char disk_name[32];
- struct vio_disk_geom geom;
struct vio_disk_vtoc label;
};
@@ -79,9 +80,16 @@ static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
/* Ordered from largest major to lowest */
static struct vio_version vdc_versions[] = {
+ { .major = 1, .minor = 1 },
{ .major = 1, .minor = 0 },
};
+static inline int vdc_version_supported(struct vdc_port *port,
+ u16 major, u16 minor)
+{
+ return port->vio.ver.major == major && port->vio.ver.minor >= minor;
+}
+
#define VDCBLK_NAME "vdisk"
static int vdc_major;
#define PARTITION_SHIFT 3
@@ -94,18 +102,54 @@ static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct gendisk *disk = bdev->bd_disk;
- struct vdc_port *port = disk->private_data;
+ sector_t nsect = get_capacity(disk);
+ sector_t cylinders = nsect;
- geo->heads = (u8) port->geom.num_hd;
- geo->sectors = (u8) port->geom.num_sec;
- geo->cylinders = port->geom.num_cyl;
+ geo->heads = 0xff;
+ geo->sectors = 0x3f;
+ sector_div(cylinders, geo->heads * geo->sectors);
+ geo->cylinders = cylinders;
+ if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect)
+ geo->cylinders = 0xffff;
return 0;
}
+/* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev
+ * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD.
+ * Needed to be able to install inside an ldom from an iso image.
+ */
+static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned command, unsigned long argument)
+{
+ int i;
+ struct gendisk *disk;
+
+ switch (command) {
+ case CDROMMULTISESSION:
+ pr_debug(PFX "Multisession CDs not supported\n");
+ for (i = 0; i < sizeof(struct cdrom_multisession); i++)
+ if (put_user(0, (char __user *)(argument + i)))
+ return -EFAULT;
+ return 0;
+
+ case CDROM_GET_CAPABILITY:
+ disk = bdev->bd_disk;
+
+ if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
+ return 0;
+ return -EINVAL;
+
+ default:
+ pr_debug(PFX "ioctl %08x not supported\n", command);
+ return -EINVAL;
+ }
+}
+
static const struct block_device_operations vdc_fops = {
.owner = THIS_MODULE,
.getgeo = vdc_getgeo,
+ .ioctl = vdc_ioctl,
};
static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
@@ -165,9 +209,9 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
struct vio_disk_attr_info *pkt = arg;
viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
- "xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
+ "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
pkt->tag.stype, pkt->operations,
- pkt->vdisk_size, pkt->vdisk_type,
+ pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
pkt->xfer_mode, pkt->vdisk_block_size,
pkt->max_xfer_size);
@@ -192,8 +236,11 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
}
port->operations = pkt->operations;
- port->vdisk_size = pkt->vdisk_size;
port->vdisk_type = pkt->vdisk_type;
+ if (vdc_version_supported(port, 1, 1)) {
+ port->vdisk_size = pkt->vdisk_size;
+ port->vdisk_mtype = pkt->vdisk_mtype;
+ }
if (pkt->max_xfer_size < port->max_xfer_size)
port->max_xfer_size = pkt->max_xfer_size;
port->vdisk_block_size = pkt->vdisk_block_size;
@@ -236,7 +283,9 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
__blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
- if (blk_queue_stopped(port->disk->queue))
+ /* restart blk queue when ring is half emptied */
+ if (blk_queue_stopped(port->disk->queue) &&
+ vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
blk_start_queue(port->disk->queue);
}
@@ -388,12 +437,6 @@ static int __send_request(struct request *req)
for (i = 0; i < nsg; i++)
len += sg[i].length;
- if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
- blk_stop_queue(port->disk->queue);
- err = -ENOMEM;
- goto out;
- }
-
desc = vio_dring_cur(dr);
err = ldc_map_sg(port->vio.lp, sg, nsg,
@@ -433,21 +476,32 @@ static int __send_request(struct request *req)
port->req_id++;
dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
}
-out:
return err;
}
-static void do_vdc_request(struct request_queue *q)
+static void do_vdc_request(struct request_queue *rq)
{
- while (1) {
- struct request *req = blk_fetch_request(q);
+ struct request *req;
- if (!req)
- break;
+ while ((req = blk_peek_request(rq)) != NULL) {
+ struct vdc_port *port;
+ struct vio_dring_state *dr;
+
+ port = req->rq_disk->private_data;
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ if (unlikely(vdc_tx_dring_avail(dr) < 1))
+ goto wait;
+
+ blk_start_request(req);
- if (__send_request(req) < 0)
- __blk_end_request_all(req, -EIO);
+ if (__send_request(req) < 0) {
+ blk_requeue_request(rq, req);
+wait:
+ /* Avoid pointless unplugs. */
+ blk_stop_queue(rq);
+ break;
+ }
}
}
@@ -663,18 +717,27 @@ static int probe_disk(struct vdc_port *port)
return err;
}
- err = generic_request(port, VD_OP_GET_DISKGEOM,
- &port->geom, sizeof(port->geom));
- if (err < 0) {
- printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
- "error %d\n", err);
- return err;
+ if (vdc_version_supported(port, 1, 1)) {
+ /* vdisk_size should be set during the handshake, if it wasn't
+ * then the underlying disk is reserved by another system
+ */
+ if (port->vdisk_size == -1)
+ return -ENODEV;
+ } else {
+ struct vio_disk_geom geom;
+
+ err = generic_request(port, VD_OP_GET_DISKGEOM,
+ &geom, sizeof(geom));
+ if (err < 0) {
+ printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
+ "error %d\n", err);
+ return err;
+ }
+ port->vdisk_size = ((u64)geom.num_cyl *
+ (u64)geom.num_hd *
+ (u64)geom.num_sec);
}
- port->vdisk_size = ((u64)port->geom.num_cyl *
- (u64)port->geom.num_hd *
- (u64)port->geom.num_sec);
-
q = blk_init_queue(do_vdc_request, &port->vio.lock);
if (!q) {
printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
@@ -691,6 +754,10 @@ static int probe_disk(struct vdc_port *port)
port->disk = g;
+ /* Each segment in a request is up to an aligned page in size. */
+ blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+ blk_queue_max_segment_size(q, PAGE_SIZE);
+
blk_queue_max_segments(q, port->ring_cookies);
blk_queue_max_hw_sectors(q, port->max_xfer_size);
g->major = vdc_major;
@@ -704,9 +771,32 @@ static int probe_disk(struct vdc_port *port)
set_capacity(g, port->vdisk_size);
- printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n",
+ if (vdc_version_supported(port, 1, 1)) {
+ switch (port->vdisk_mtype) {
+ case VD_MEDIA_TYPE_CD:
+ pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
+ g->flags |= GENHD_FL_CD;
+ g->flags |= GENHD_FL_REMOVABLE;
+ set_disk_ro(g, 1);
+ break;
+
+ case VD_MEDIA_TYPE_DVD:
+ pr_info(PFX "Virtual DVD %s\n", port->disk_name);
+ g->flags |= GENHD_FL_CD;
+ g->flags |= GENHD_FL_REMOVABLE;
+ set_disk_ro(g, 1);
+ break;
+
+ case VD_MEDIA_TYPE_FIXED:
+ pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
+ break;
+ }
+ }
+
+ pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
g->disk_name,
- port->vdisk_size, (port->vdisk_size >> (20 - 9)));
+ port->vdisk_size, (port->vdisk_size >> (20 - 9)),
+ port->vio.ver.major, port->vio.ver.minor);
add_disk(g);
@@ -765,6 +855,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
else
snprintf(port->disk_name, sizeof(port->disk_name),
VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
+ port->vdisk_size = -1;
err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
vdc_versions, ARRAY_SIZE(vdc_versions),
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3a8b810b4980..0b13b1c9a01e 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -907,22 +907,17 @@ static int connect_ring(struct backend_info *be)
return 0;
}
-
-/* ** Driver Registration ** */
-
-
static const struct xenbus_device_id xen_blkbk_ids[] = {
{ "vbd" },
{ "" }
};
-
-static DEFINE_XENBUS_DRIVER(xen_blkbk, ,
+static struct xenbus_driver xen_blkbk_driver = {
+ .ids = xen_blkbk_ids,
.probe = xen_blkbk_probe,
.remove = xen_blkbk_remove,
.otherend_changed = frontend_changed
-);
-
+};
int xen_blkif_xenbus_init(void)
{
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 5deb235bd18f..37af03e9d859 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2055,13 +2055,14 @@ static const struct xenbus_device_id blkfront_ids[] = {
{ "" }
};
-static DEFINE_XENBUS_DRIVER(blkfront, ,
+static struct xenbus_driver blkfront_driver = {
+ .ids = blkfront_ids,
.probe = blkfront_probe,
.remove = blkfront_remove,
.resume = blkfront_resume,
.otherend_changed = blkback_changed,
.is_ready = blkfront_is_ready,
-);
+};
static int __init xlblk_init(void)
{
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 5bb5872ffee6..6653473f2757 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -160,13 +160,11 @@ static int vhci_create_device(struct vhci_data *data, __u8 opcode)
}
static inline ssize_t vhci_get_user(struct vhci_data *data,
- const struct iovec *iov,
- unsigned long count)
+ struct iov_iter *from)
{
- size_t len = iov_length(iov, count);
+ size_t len = iov_iter_count(from);
struct sk_buff *skb;
__u8 pkt_type, opcode;
- unsigned long i;
int ret;
if (len < 2 || len > HCI_MAX_FRAME_SIZE)
@@ -176,12 +174,9 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
if (!skb)
return -ENOMEM;
- for (i = 0; i < count; i++) {
- if (copy_from_user(skb_put(skb, iov[i].iov_len),
- iov[i].iov_base, iov[i].iov_len)) {
- kfree_skb(skb);
- return -EFAULT;
- }
+ if (copy_from_iter(skb_put(skb, len), len, from) != len) {
+ kfree_skb(skb);
+ return -EFAULT;
}
pkt_type = *((__u8 *) skb->data);
@@ -294,13 +289,12 @@ static ssize_t vhci_read(struct file *file,
return ret;
}
-static ssize_t vhci_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long count, loff_t pos)
+static ssize_t vhci_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct vhci_data *data = file->private_data;
- return vhci_get_user(data, iov, count);
+ return vhci_get_user(data, from);
}
static unsigned int vhci_poll(struct file *file, poll_table *wait)
@@ -365,7 +359,7 @@ static int vhci_release(struct inode *inode, struct file *file)
static const struct file_operations vhci_fops = {
.owner = THIS_MODULE,
.read = vhci_read,
- .aio_write = vhci_write,
+ .write_iter = vhci_write,
.poll = vhci_poll,
.open = vhci_open,
.release = vhci_release,
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 917403fe10da..524b707894ef 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -622,53 +622,23 @@ static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
}
-static ssize_t read_zero(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
{
- size_t written;
-
- if (!count)
- return 0;
-
- if (!access_ok(VERIFY_WRITE, buf, count))
- return -EFAULT;
-
- written = 0;
- while (count) {
- unsigned long unwritten;
- size_t chunk = count;
+ size_t written = 0;
+ while (iov_iter_count(iter)) {
+ size_t chunk = iov_iter_count(iter), n;
if (chunk > PAGE_SIZE)
chunk = PAGE_SIZE; /* Just for latency reasons */
- unwritten = __clear_user(buf, chunk);
- written += chunk - unwritten;
- if (unwritten)
- break;
+ n = iov_iter_zero(chunk, iter);
+ if (!n && iov_iter_count(iter))
+ return written ? written : -EFAULT;
+ written += n;
if (signal_pending(current))
return written ? written : -ERESTARTSYS;
- buf += chunk;
- count -= chunk;
cond_resched();
}
- return written ? written : -EFAULT;
-}
-
-static ssize_t aio_read_zero(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
-{
- size_t written = 0;
- unsigned long i;
- ssize_t ret;
-
- for (i = 0; i < nr_segs; i++) {
- ret = read_zero(iocb->ki_filp, iov[i].iov_base, iov[i].iov_len,
- &pos);
- if (ret < 0)
- break;
- written += ret;
- }
-
- return written ? written : -EFAULT;
+ return written;
}
static int mmap_zero(struct file *file, struct vm_area_struct *vma)
@@ -738,7 +708,6 @@ static int open_port(struct inode *inode, struct file *filp)
#define zero_lseek null_lseek
#define full_lseek null_lseek
#define write_zero write_null
-#define read_full read_zero
#define aio_write_zero aio_write_null
#define open_mem open_port
#define open_kmem open_mem
@@ -783,9 +752,9 @@ static const struct file_operations port_fops = {
static const struct file_operations zero_fops = {
.llseek = zero_lseek,
- .read = read_zero,
+ .read = new_sync_read,
.write = write_zero,
- .aio_read = aio_read_zero,
+ .read_iter = read_iter_zero,
.aio_write = aio_write_zero,
.mmap = mmap_zero,
};
@@ -802,7 +771,8 @@ static struct backing_dev_info zero_bdi = {
static const struct file_operations full_fops = {
.llseek = full_lseek,
- .read = read_full,
+ .read = new_sync_read,
+ .read_iter = read_iter_zero,
.write = write_full,
};
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 2064b4527040..441b44e54226 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -367,12 +367,13 @@ static const struct xenbus_device_id tpmfront_ids[] = {
};
MODULE_ALIAS("xen:vtpm");
-static DEFINE_XENBUS_DRIVER(tpmfront, ,
- .probe = tpmfront_probe,
- .remove = tpmfront_remove,
- .resume = tpmfront_resume,
- .otherend_changed = backend_changed,
- );
+static struct xenbus_driver tpmfront_driver = {
+ .ids = tpmfront_ids,
+ .probe = tpmfront_probe,
+ .remove = tpmfront_remove,
+ .resume = tpmfront_resume,
+ .otherend_changed = backend_changed,
+};
static int __init xen_tpmfront_init(void)
{
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 7615180d7ee3..1f49d97a70ea 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -611,7 +611,7 @@ static int __init pmac_cpufreq_setup(void)
struct device_node *cpunode;
const u32 *value;
- if (strstr(cmd_line, "nocpufreq"))
+ if (strstr(boot_command_line, "nocpufreq"))
return 0;
/* Get first CPU node */
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index ee9df5e3f5eb..125150dc6e81 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -223,8 +223,14 @@ void cpuidle_uninstall_idle_handler(void)
{
if (enabled_devices) {
initialized = 0;
- kick_all_cpus_sync();
+ wake_up_all_idle_cpus();
}
+
+ /*
+ * Make sure external observers (such as the scheduler)
+ * are done looking at pointed idle states.
+ */
+ synchronize_rcu();
}
/**
@@ -530,11 +536,6 @@ EXPORT_SYMBOL_GPL(cpuidle_register);
#ifdef CONFIG_SMP
-static void smp_callback(void *v)
-{
- /* we already woke the CPU up, nothing more to do */
-}
-
/*
* This function gets called when a part of the kernel has a new latency
* requirement. This means we need to get all processors out of their C-state,
@@ -544,7 +545,7 @@ static void smp_callback(void *v)
static int cpuidle_latency_notify(struct notifier_block *b,
unsigned long l, void *v)
{
- smp_call_function(smp_callback, NULL, 1);
+ wake_up_all_idle_cpus();
return NOTIFY_OK;
}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index f3014c448e1e..5be225c2ba98 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -799,7 +799,7 @@ static int dma_buf_describe(struct seq_file *s)
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
buf_obj->size,
buf_obj->file->f_flags, buf_obj->file->f_mode,
- (long)(buf_obj->file->f_count.counter),
+ file_count(buf_obj->file),
buf_obj->exp_name);
seq_puts(s, "\tAttached Devices:\n");
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 0034c4844428..e9bb1af67c8d 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -52,36 +52,6 @@ static int probed;
#define GET_BITFIELD(v, lo, hi) \
(((v) & GENMASK_ULL(hi, lo)) >> (lo))
-/*
- * sbridge Memory Controller Registers
- */
-
-/*
- * FIXME: For now, let's order by device function, as it makes
- * easier for driver's development process. This table should be
- * moved to pci_id.h when submitted upstream
- */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
-
- /*
- * Currently, unused, but will be needed in the future
- * implementations, as they hold the error counters
- */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */
-#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */
-
/* Devices 12 Function 6, Offsets 0x80 to 0xcc */
static const u32 sbridge_dram_rule[] = {
0x80, 0x88, 0x90, 0x98, 0xa0,
@@ -283,8 +253,9 @@ static const u32 correrrthrsld[] = {
* sbridge structs
*/
-#define NUM_CHANNELS 4
-#define MAX_DIMMS 3 /* Max DIMMS per channel */
+#define NUM_CHANNELS 4
+#define MAX_DIMMS 3 /* Max DIMMS per channel */
+#define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */
enum type {
SANDY_BRIDGE,
@@ -529,7 +500,7 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = {
* pci_device_id table for which devices we are looking for
*/
static const struct pci_device_id sbridge_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0)},
{0,} /* 0 terminated list. */
@@ -1991,6 +1962,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
/* FIXME: need support for channel mask */
+ if (channel == CHANNEL_UNSPECIFIED)
+ channel = -1;
+
/* Call the helper to output message */
edac_mc_handle_error(tp_event, mci, core_err_cnt,
m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 0dc57d5ecd10..3a02e5e3e9f3 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -35,7 +35,7 @@
* of extra utility/tracking out of our acquire-ctx. This is provided
* by drm_modeset_lock / drm_modeset_acquire_ctx.
*
- * For basic principles of ww_mutex, see: Documentation/ww-mutex-design.txt
+ * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
*
* The basic usage pattern is to:
*
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index 47ca88623753..6544b84f0303 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -40,12 +40,12 @@
#include "nouveau_usif.h"
static void
-nvkm_client_unmap(void *priv, void *ptr, u32 size)
+nvkm_client_unmap(void *priv, void __iomem *ptr, u32 size)
{
iounmap(ptr);
}
-static void *
+static void __iomem *
nvkm_client_map(void *priv, u64 handle, u32 size)
{
return ioremap(handle, size);
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.h b/drivers/gpu/drm/nouveau/nvif/driver.h
index b72a8f0c2758..ac4bdb3ea506 100644
--- a/drivers/gpu/drm/nouveau/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/nvif/driver.h
@@ -9,8 +9,8 @@ struct nvif_driver {
int (*suspend)(void *priv);
int (*resume)(void *priv);
int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack);
- void *(*map)(void *priv, u64 handle, u32 size);
- void (*unmap)(void *priv, void *ptr, u32 size);
+ void __iomem *(*map)(void *priv, u64 handle, u32 size);
+ void (*unmap)(void *priv, void __iomem *ptr, u32 size);
bool keep;
};
diff --git a/drivers/gpu/drm/nouveau/nvif/object.h b/drivers/gpu/drm/nouveau/nvif/object.h
index fac3a3bbec44..fe519179b76c 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/nvif/object.h
@@ -14,7 +14,7 @@ struct nvif_object {
void *priv; /*XXX: hack */
void (*dtor)(struct nvif_object *);
struct {
- void *ptr;
+ void __iomem *ptr;
u32 size;
} map;
};
@@ -42,7 +42,7 @@ void nvif_object_unmap(struct nvif_object *);
struct nvif_object *_object = nvif_object(a); \
u32 _data; \
if (likely(_object->map.ptr)) \
- _data = ioread##b##_native((u8 *)_object->map.ptr + (c)); \
+ _data = ioread##b##_native((u8 __iomem *)_object->map.ptr + (c)); \
else \
_data = nvif_object_rd(_object, (b) / 8, (c)); \
_data; \
@@ -50,7 +50,7 @@ void nvif_object_unmap(struct nvif_object *);
#define nvif_wr(a,b,c,d) ({ \
struct nvif_object *_object = nvif_object(a); \
if (likely(_object->map.ptr)) \
- iowrite##b##_native((d), (u8 *)_object->map.ptr + (c)); \
+ iowrite##b##_native((d), (u8 __iomem *)_object->map.ptr + (c)); \
else \
nvif_object_wr(_object, (b) / 8, (c), (d)); \
})
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 77711623b973..7bcbf863656e 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -400,7 +400,6 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
}
schedule();
remove_wait_queue(&vga_wait_queue, &wait);
- set_current_state(TASK_RUNNING);
}
return rc;
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index f00d048aa583..5286d7ce1f9e 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -280,8 +280,8 @@ config SENSORS_K10TEMP
If you say yes here you get support for the temperature
sensor(s) inside your CPU. Supported are later revisions of
the AMD Family 10h and all revisions of the AMD Family 11h,
- 12h (Llano), 14h (Brazos), 15h (Bulldozer/Trinity/Kaveri) and
- 16h (Kabini/Mullins) microarchitectures.
+ 12h (Llano), 14h (Brazos), 15h (Bulldozer/Trinity/Kaveri/Carrizo)
+ and 16h (Kabini/Mullins) microarchitectures.
This driver can also be built as a module. If so, the module
will be called k10temp.
@@ -839,6 +839,16 @@ config SENSORS_MCP3021
This driver can also be built as a module. If so, the module
will be called mcp3021.
+config SENSORS_MENF21BMC_HWMON
+ tristate "MEN 14F021P00 BMC Hardware Monitoring"
+ depends on MFD_MENF21BMC
+ help
+ Say Y here to include support for the MEN 14F021P00 BMC
+ hardware monitoring.
+
+ This driver can also be built as a module. If so the module
+ will be called menf21bmc_hwmon.
+
config SENSORS_ADCXX
tristate "National Semiconductor ADCxxxSxxx"
depends on SPI_MASTER
@@ -1077,6 +1087,7 @@ config SENSORS_PC87427
config SENSORS_NTC_THERMISTOR
tristate "NTC thermistor support from Murata"
depends on !OF || IIO=n || IIO
+ depends on THERMAL || !THERMAL_OF
help
This driver supports NTC thermistors sensor reading and its
interpretation. The driver can also monitor the temperature and
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index be28152c9848..c90a7611efaa 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -115,6 +115,7 @@ obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
obj-$(CONFIG_SENSORS_MAX6697) += max6697.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
+obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o
obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o
obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
diff --git a/drivers/hwmon/ab8500.c b/drivers/hwmon/ab8500.c
index d844dc806853..8b6a4f4c8774 100644
--- a/drivers/hwmon/ab8500.c
+++ b/drivers/hwmon/ab8500.c
@@ -6,7 +6,7 @@
*
* When the AB8500 thermal warning temperature is reached (threshold cannot
* be changed by SW), an interrupt is set, and if no further action is taken
- * within a certain time frame, pm_power off will be called.
+ * within a certain time frame, kernel_power_off will be called.
*
* When AB8500 thermal shutdown temperature is reached a hardware shutdown of
* the AB8500 will occur.
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/power/ab8500.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "abx500.h"
@@ -106,7 +107,7 @@ static void ab8500_thermal_power_off(struct work_struct *work)
dev_warn(&abx500_data->pdev->dev, "Power off due to critical temp\n");
- pm_power_off();
+ kernel_power_off();
}
static ssize_t ab8500_show_name(struct device *dev,
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index 126516414c11..f155b8380481 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -184,20 +184,18 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
return -EINVAL;
for_each_child_of_node(client->dev.of_node, node) {
- const __be32 *property;
- int len;
+ u32 pval;
unsigned int channel;
unsigned int pga = ADS1015_DEFAULT_PGA;
unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE;
- property = of_get_property(node, "reg", &len);
- if (!property || len != sizeof(int)) {
+ if (of_property_read_u32(node, "reg", &pval)) {
dev_err(&client->dev, "invalid reg on %s\n",
node->full_name);
continue;
}
- channel = be32_to_cpup(property);
+ channel = pval;
if (channel >= ADS1015_CHANNELS) {
dev_err(&client->dev,
"invalid channel index %d on %s\n",
@@ -205,20 +203,17 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
continue;
}
- property = of_get_property(node, "ti,gain", &len);
- if (property && len == sizeof(int)) {
- pga = be32_to_cpup(property);
+ if (!of_property_read_u32(node, "ti,gain", &pval)) {
+ pga = pval;
if (pga > 6) {
- dev_err(&client->dev,
- "invalid gain on %s\n",
+ dev_err(&client->dev, "invalid gain on %s\n",
node->full_name);
return -EINVAL;
}
}
- property = of_get_property(node, "ti,datarate", &len);
- if (property && len == sizeof(int)) {
- data_rate = be32_to_cpup(property);
+ if (!of_property_read_u32(node, "ti,datarate", &pval)) {
+ data_rate = pval;
if (data_rate > 7) {
dev_err(&client->dev,
"invalid data_rate on %s\n",
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index d14ab3c45daa..692b3f34d88c 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -26,7 +26,6 @@
struct da9052_hwmon {
struct da9052 *da9052;
- struct device *class_device;
struct mutex hwmon_lock;
};
@@ -190,13 +189,6 @@ static ssize_t da9052_read_vbbat(struct device *dev,
return sprintf(buf, "%d\n", vbbat_reg_to_mv(ret));
}
-static ssize_t da9052_hwmon_show_name(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- return sprintf(buf, "da9052\n");
-}
-
static ssize_t show_label(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -243,10 +235,7 @@ static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO, da9052_read_tjunc, NULL,
static SENSOR_DEVICE_ATTR(temp8_label, S_IRUGO, show_label, NULL,
DA9052_ADC_TJUNC);
-static DEVICE_ATTR(name, S_IRUGO, da9052_hwmon_show_name, NULL);
-
-static struct attribute *da9052_attr[] = {
- &dev_attr_name.attr,
+static struct attribute *da9052_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_label.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
@@ -268,54 +257,29 @@ static struct attribute *da9052_attr[] = {
NULL
};
-static const struct attribute_group da9052_attr_group = {.attrs = da9052_attr};
+ATTRIBUTE_GROUPS(da9052);
static int da9052_hwmon_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct da9052_hwmon *hwmon;
- int ret;
+ struct device *hwmon_dev;
- hwmon = devm_kzalloc(&pdev->dev, sizeof(struct da9052_hwmon),
- GFP_KERNEL);
+ hwmon = devm_kzalloc(dev, sizeof(struct da9052_hwmon), GFP_KERNEL);
if (!hwmon)
return -ENOMEM;
mutex_init(&hwmon->hwmon_lock);
hwmon->da9052 = dev_get_drvdata(pdev->dev.parent);
- platform_set_drvdata(pdev, hwmon);
-
- ret = sysfs_create_group(&pdev->dev.kobj, &da9052_attr_group);
- if (ret)
- goto err_mem;
-
- hwmon->class_device = hwmon_device_register(&pdev->dev);
- if (IS_ERR(hwmon->class_device)) {
- ret = PTR_ERR(hwmon->class_device);
- goto err_sysfs;
- }
-
- return 0;
-
-err_sysfs:
- sysfs_remove_group(&pdev->dev.kobj, &da9052_attr_group);
-err_mem:
- return ret;
-}
-
-static int da9052_hwmon_remove(struct platform_device *pdev)
-{
- struct da9052_hwmon *hwmon = platform_get_drvdata(pdev);
-
- hwmon_device_unregister(hwmon->class_device);
- sysfs_remove_group(&pdev->dev.kobj, &da9052_attr_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, "da9052",
+ hwmon,
+ da9052_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static struct platform_driver da9052_hwmon_driver = {
.probe = da9052_hwmon_probe,
- .remove = da9052_hwmon_remove,
.driver = {
.name = "da9052-hwmon",
.owner = THIS_MODULE,
diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
index 35eb7738d711..9916a3fb4bb9 100644
--- a/drivers/hwmon/da9055-hwmon.c
+++ b/drivers/hwmon/da9055-hwmon.c
@@ -36,7 +36,6 @@
struct da9055_hwmon {
struct da9055 *da9055;
- struct device *class_device;
struct mutex hwmon_lock;
struct mutex irq_lock;
struct completion done;
@@ -200,13 +199,6 @@ static ssize_t da9055_read_tjunc(struct device *dev,
+ 3076332, 10000));
}
-static ssize_t da9055_hwmon_show_name(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- return sprintf(buf, "da9055\n");
-}
-
static ssize_t show_label(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -236,10 +228,7 @@ static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, da9055_read_tjunc, NULL,
static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL,
DA9055_ADC_TJUNC);
-static DEVICE_ATTR(name, S_IRUGO, da9055_hwmon_show_name, NULL);
-
-static struct attribute *da9055_attr[] = {
- &dev_attr_name.attr,
+static struct attribute *da9055_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_label.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
@@ -254,15 +243,16 @@ static struct attribute *da9055_attr[] = {
NULL
};
-static const struct attribute_group da9055_attr_group = {.attrs = da9055_attr};
+ATTRIBUTE_GROUPS(da9055);
static int da9055_hwmon_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct da9055_hwmon *hwmon;
+ struct device *hwmon_dev;
int hwmon_irq, ret;
- hwmon = devm_kzalloc(&pdev->dev, sizeof(struct da9055_hwmon),
- GFP_KERNEL);
+ hwmon = devm_kzalloc(dev, sizeof(struct da9055_hwmon), GFP_KERNEL);
if (!hwmon)
return -ENOMEM;
@@ -272,8 +262,6 @@ static int da9055_hwmon_probe(struct platform_device *pdev)
init_completion(&hwmon->done);
hwmon->da9055 = dev_get_drvdata(pdev->dev.parent);
- platform_set_drvdata(pdev, hwmon);
-
hwmon_irq = platform_get_irq_byname(pdev, "HWMON");
if (hwmon_irq < 0)
return hwmon_irq;
@@ -288,36 +276,14 @@ static int da9055_hwmon_probe(struct platform_device *pdev)
return ret;
}
- ret = sysfs_create_group(&pdev->dev.kobj, &da9055_attr_group);
- if (ret)
- return ret;
-
- hwmon->class_device = hwmon_device_register(&pdev->dev);
- if (IS_ERR(hwmon->class_device)) {
- ret = PTR_ERR(hwmon->class_device);
- goto err;
- }
-
- return 0;
-
-err:
- sysfs_remove_group(&pdev->dev.kobj, &da9055_attr_group);
- return ret;
-}
-
-static int da9055_hwmon_remove(struct platform_device *pdev)
-{
- struct da9055_hwmon *hwmon = platform_get_drvdata(pdev);
-
- sysfs_remove_group(&pdev->dev.kobj, &da9055_attr_group);
- hwmon_device_unregister(hwmon->class_device);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, "da9055",
+ hwmon,
+ da9055_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static struct platform_driver da9055_hwmon_driver = {
.probe = da9055_hwmon_probe,
- .remove = da9055_hwmon_remove,
.driver = {
.name = "da9055-hwmon",
.owner = THIS_MODULE,
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index f7b46f68ef43..1e7bdcdcb295 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -33,6 +33,9 @@ static bool force;
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
+/* Provide lock for writing to NB_SMU_IND_ADDR */
+static DEFINE_MUTEX(nb_smu_ind_mutex);
+
/* CPUID function 0x80000001, ebx */
#define CPUID_PKGTYPE_MASK 0xf0000000
#define CPUID_PKGTYPE_F 0x00000000
@@ -51,13 +54,38 @@ MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
#define REG_NORTHBRIDGE_CAPABILITIES 0xe8
#define NB_CAP_HTC 0x00000400
+/*
+ * For F15h M60h, functionality of REG_REPORTED_TEMPERATURE
+ * has been moved to D0F0xBC_xD820_0CA4 [Reported Temperature
+ * Control]
+ */
+#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
+#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F3 0x1573
+
+static void amd_nb_smu_index_read(struct pci_dev *pdev, unsigned int devfn,
+ int offset, u32 *val)
+{
+ mutex_lock(&nb_smu_ind_mutex);
+ pci_bus_write_config_dword(pdev->bus, devfn,
+ 0xb8, offset);
+ pci_bus_read_config_dword(pdev->bus, devfn,
+ 0xbc, val);
+ mutex_unlock(&nb_smu_ind_mutex);
+}
+
static ssize_t show_temp(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 regval;
-
- pci_read_config_dword(to_pci_dev(dev),
- REG_REPORTED_TEMPERATURE, &regval);
+ struct pci_dev *pdev = dev_get_drvdata(dev);
+
+ if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model == 0x60) {
+ amd_nb_smu_index_read(pdev, PCI_DEVFN(0, 0),
+ F15H_M60H_REPORTED_TEMP_CTRL_OFFSET,
+ &regval);
+ } else {
+ pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, &regval);
+ }
return sprintf(buf, "%u\n", (regval >> 21) * 125);
}
@@ -75,7 +103,7 @@ static ssize_t show_temp_crit(struct device *dev,
u32 regval;
int value;
- pci_read_config_dword(to_pci_dev(dev),
+ pci_read_config_dword(dev_get_drvdata(dev),
REG_HARDWARE_THERMAL_CONTROL, &regval);
value = ((regval >> 16) & 0x7f) * 500 + 52000;
if (show_hyst)
@@ -83,17 +111,43 @@ static ssize_t show_temp_crit(struct device *dev,
return sprintf(buf, "%d\n", value);
}
-static ssize_t show_name(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "k10temp\n");
-}
-
static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
static DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, NULL);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1);
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
+static umode_t k10temp_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct pci_dev *pdev = dev_get_drvdata(dev);
+
+ if (index >= 2) {
+ u32 reg_caps, reg_htc;
+
+ pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
+ &reg_caps);
+ pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL,
+ &reg_htc);
+ if (!(reg_caps & NB_CAP_HTC) || !(reg_htc & HTC_ENABLE))
+ return 0;
+ }
+ return attr->mode;
+}
+
+static struct attribute *k10temp_attrs[] = {
+ &dev_attr_temp1_input.attr,
+ &dev_attr_temp1_max.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group k10temp_group = {
+ .attrs = k10temp_attrs,
+ .is_visible = k10temp_is_visible,
+};
+__ATTRIBUTE_GROUPS(k10temp);
static bool has_erratum_319(struct pci_dev *pdev)
{
@@ -132,76 +186,23 @@ static bool has_erratum_319(struct pci_dev *pdev)
static int k10temp_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- struct device *hwmon_dev;
- u32 reg_caps, reg_htc;
int unreliable = has_erratum_319(pdev);
- int err;
-
- if (unreliable && !force) {
- dev_err(&pdev->dev,
- "unreliable CPU thermal sensor; monitoring disabled\n");
- err = -ENODEV;
- goto exit;
- }
-
- err = device_create_file(&pdev->dev, &dev_attr_temp1_input);
- if (err)
- goto exit;
- err = device_create_file(&pdev->dev, &dev_attr_temp1_max);
- if (err)
- goto exit_remove;
-
- pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, &reg_caps);
- pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, &reg_htc);
- if ((reg_caps & NB_CAP_HTC) && (reg_htc & HTC_ENABLE)) {
- err = device_create_file(&pdev->dev,
- &sensor_dev_attr_temp1_crit.dev_attr);
- if (err)
- goto exit_remove;
- err = device_create_file(&pdev->dev,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr);
- if (err)
- goto exit_remove;
- }
-
- err = device_create_file(&pdev->dev, &dev_attr_name);
- if (err)
- goto exit_remove;
-
- hwmon_dev = hwmon_device_register(&pdev->dev);
- if (IS_ERR(hwmon_dev)) {
- err = PTR_ERR(hwmon_dev);
- goto exit_remove;
- }
- pci_set_drvdata(pdev, hwmon_dev);
+ struct device *dev = &pdev->dev;
+ struct device *hwmon_dev;
- if (unreliable && force)
- dev_warn(&pdev->dev,
+ if (unreliable) {
+ if (!force) {
+ dev_err(dev,
+ "unreliable CPU thermal sensor; monitoring disabled\n");
+ return -ENODEV;
+ }
+ dev_warn(dev,
"unreliable CPU thermal sensor; check erratum 319\n");
- return 0;
-
-exit_remove:
- device_remove_file(&pdev->dev, &dev_attr_name);
- device_remove_file(&pdev->dev, &dev_attr_temp1_input);
- device_remove_file(&pdev->dev, &dev_attr_temp1_max);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp1_crit.dev_attr);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr);
-exit:
- return err;
-}
+ }
-static void k10temp_remove(struct pci_dev *pdev)
-{
- hwmon_device_unregister(pci_get_drvdata(pdev));
- device_remove_file(&pdev->dev, &dev_attr_name);
- device_remove_file(&pdev->dev, &dev_attr_temp1_input);
- device_remove_file(&pdev->dev, &dev_attr_temp1_max);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp1_crit.dev_attr);
- device_remove_file(&pdev->dev,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr);
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", pdev,
+ k10temp_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct pci_device_id k10temp_id_table[] = {
@@ -211,6 +212,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
{}
@@ -221,7 +223,6 @@ static struct pci_driver k10temp_driver = {
.name = "k10temp",
.id_table = k10temp_id_table,
.probe = k10temp_probe,
- .remove = k10temp_remove,
};
module_pci_driver(k10temp_driver);
diff --git a/drivers/hwmon/menf21bmc_hwmon.c b/drivers/hwmon/menf21bmc_hwmon.c
new file mode 100644
index 000000000000..c92229d321c9
--- /dev/null
+++ b/drivers/hwmon/menf21bmc_hwmon.c
@@ -0,0 +1,230 @@
+/*
+ * MEN 14F021P00 Board Management Controller (BMC) hwmon driver.
+ *
+ * This is the core hwmon driver of the MEN 14F021P00 BMC.
+ * The BMC monitors the board voltages which can be access with this
+ * driver through sysfs.
+ *
+ * Copyright (C) 2014 MEN Mikro Elektronik Nuernberg GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+
+#define DRV_NAME "menf21bmc_hwmon"
+
+#define BMC_VOLT_COUNT 5
+#define MENF21BMC_V33 0
+#define MENF21BMC_V5 1
+#define MENF21BMC_V12 2
+#define MENF21BMC_V5_SB 3
+#define MENF21BMC_VBAT 4
+
+#define IDX_TO_VOLT_MIN_CMD(idx) (0x40 + idx)
+#define IDX_TO_VOLT_MAX_CMD(idx) (0x50 + idx)
+#define IDX_TO_VOLT_INP_CMD(idx) (0x60 + idx)
+
+struct menf21bmc_hwmon {
+ bool valid;
+ struct i2c_client *i2c_client;
+ unsigned long last_update;
+ int in_val[BMC_VOLT_COUNT];
+ int in_min[BMC_VOLT_COUNT];
+ int in_max[BMC_VOLT_COUNT];
+};
+
+static const char *const input_names[] = {
+ [MENF21BMC_V33] = "MON_3_3V",
+ [MENF21BMC_V5] = "MON_5V",
+ [MENF21BMC_V12] = "MON_12V",
+ [MENF21BMC_V5_SB] = "5V_STANDBY",
+ [MENF21BMC_VBAT] = "VBAT"
+};
+
+static struct menf21bmc_hwmon *menf21bmc_hwmon_update(struct device *dev)
+{
+ int i;
+ int val;
+ struct menf21bmc_hwmon *drv_data = dev_get_drvdata(dev);
+ struct menf21bmc_hwmon *data_ret = drv_data;
+
+ if (time_after(jiffies, drv_data->last_update + HZ)
+ || !drv_data->valid) {
+ for (i = 0; i < BMC_VOLT_COUNT; i++) {
+ val = i2c_smbus_read_word_data(drv_data->i2c_client,
+ IDX_TO_VOLT_INP_CMD(i));
+ if (val < 0) {
+ data_ret = ERR_PTR(val);
+ goto abort;
+ }
+ drv_data->in_val[i] = val;
+ }
+ drv_data->last_update = jiffies;
+ drv_data->valid = true;
+ }
+abort:
+ return data_ret;
+}
+
+static int menf21bmc_hwmon_get_volt_limits(struct menf21bmc_hwmon *drv_data)
+{
+ int i, val;
+
+ for (i = 0; i < BMC_VOLT_COUNT; i++) {
+ val = i2c_smbus_read_word_data(drv_data->i2c_client,
+ IDX_TO_VOLT_MIN_CMD(i));
+ if (val < 0)
+ return val;
+
+ drv_data->in_min[i] = val;
+
+ val = i2c_smbus_read_word_data(drv_data->i2c_client,
+ IDX_TO_VOLT_MAX_CMD(i));
+ if (val < 0)
+ return val;
+
+ drv_data->in_max[i] = val;
+ }
+ return 0;
+}
+
+static ssize_t
+show_label(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ return sprintf(buf, "%s\n", input_names[attr->index]);
+}
+
+static ssize_t
+show_in(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct menf21bmc_hwmon *drv_data = menf21bmc_hwmon_update(dev);
+
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
+ return sprintf(buf, "%d\n", drv_data->in_val[attr->index]);
+}
+
+static ssize_t
+show_min(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct menf21bmc_hwmon *drv_data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", drv_data->in_min[attr->index]);
+}
+
+static ssize_t
+show_max(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct menf21bmc_hwmon *drv_data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", drv_data->in_max[attr->index]);
+}
+
+#define create_voltage_sysfs(idx) \
+static SENSOR_DEVICE_ATTR(in##idx##_input, S_IRUGO, \
+ show_in, NULL, idx); \
+static SENSOR_DEVICE_ATTR(in##idx##_min, S_IRUGO, \
+ show_min, NULL, idx); \
+static SENSOR_DEVICE_ATTR(in##idx##_max, S_IRUGO, \
+ show_max, NULL, idx); \
+static SENSOR_DEVICE_ATTR(in##idx##_label, S_IRUGO, \
+ show_label, NULL, idx);
+
+create_voltage_sysfs(0);
+create_voltage_sysfs(1);
+create_voltage_sysfs(2);
+create_voltage_sysfs(3);
+create_voltage_sysfs(4);
+
+static struct attribute *menf21bmc_hwmon_attrs[] = {
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in0_min.dev_attr.attr,
+ &sensor_dev_attr_in0_max.dev_attr.attr,
+ &sensor_dev_attr_in0_label.dev_attr.attr,
+
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_min.dev_attr.attr,
+ &sensor_dev_attr_in1_max.dev_attr.attr,
+ &sensor_dev_attr_in1_label.dev_attr.attr,
+
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_min.dev_attr.attr,
+ &sensor_dev_attr_in2_max.dev_attr.attr,
+ &sensor_dev_attr_in2_label.dev_attr.attr,
+
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in3_min.dev_attr.attr,
+ &sensor_dev_attr_in3_max.dev_attr.attr,
+ &sensor_dev_attr_in3_label.dev_attr.attr,
+
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in4_min.dev_attr.attr,
+ &sensor_dev_attr_in4_max.dev_attr.attr,
+ &sensor_dev_attr_in4_label.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(menf21bmc_hwmon);
+
+static int menf21bmc_hwmon_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct menf21bmc_hwmon *drv_data;
+ struct i2c_client *i2c_client = to_i2c_client(pdev->dev.parent);
+ struct device *hwmon_dev;
+
+ drv_data = devm_kzalloc(&pdev->dev, sizeof(struct menf21bmc_hwmon),
+ GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ drv_data->i2c_client = i2c_client;
+
+ ret = menf21bmc_hwmon_get_volt_limits(drv_data);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read sensor limits");
+ return ret;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "menf21bmc", drv_data,
+ menf21bmc_hwmon_groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ dev_info(&pdev->dev, "MEN 14F021P00 BMC hwmon device enabled");
+
+ return 0;
+}
+
+static struct platform_driver menf21bmc_hwmon = {
+ .probe = menf21bmc_hwmon_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(menf21bmc_hwmon);
+
+MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
+MODULE_DESCRIPTION("MEN 14F021P00 BMC hwmon");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:menf21bmc_hwmon");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index bd410722cd4b..4ff89b2482e4 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -38,6 +38,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/thermal.h>
struct ntc_compensation {
int temp_c;
@@ -182,6 +183,7 @@ struct ntc_data {
struct device *dev;
int n_comp;
char name[PLATFORM_NAME_SIZE];
+ struct thermal_zone_device *tz;
};
#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
@@ -428,6 +430,20 @@ static int ntc_thermistor_get_ohm(struct ntc_data *data)
return -EINVAL;
}
+static int ntc_read_temp(void *dev, long *temp)
+{
+ struct ntc_data *data = dev_get_drvdata(dev);
+ int ohm;
+
+ ohm = ntc_thermistor_get_ohm(data);
+ if (ohm < 0)
+ return ohm;
+
+ *temp = get_temp_mc(data, ohm);
+
+ return 0;
+}
+
static ssize_t ntc_show_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -562,6 +578,13 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Thermistor type: %s successfully probed.\n",
pdev_id->name);
+ data->tz = thermal_zone_of_sensor_register(data->dev, 0, data->dev,
+ ntc_read_temp, NULL);
+ if (IS_ERR(data->tz)) {
+ dev_dbg(&pdev->dev, "Failed to register to thermal fw.\n");
+ data->tz = NULL;
+ }
+
return 0;
err_after_sysfs:
sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
@@ -578,6 +601,8 @@ static int ntc_thermistor_remove(struct platform_device *pdev)
sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
ntc_iio_channel_release(pdata);
+ thermal_zone_of_sensor_unregister(data->dev, data->tz);
+
return 0;
}
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index bd89e87bd6ae..221f0931bf1c 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -100,8 +100,6 @@ static u8 smsc47b397_reg_temp[] = {0x25, 0x26, 0x27, 0x80};
struct smsc47b397_data {
unsigned short addr;
- const char *name;
- struct device *hwmon_dev;
struct mutex lock;
struct mutex update_lock;
@@ -202,15 +200,7 @@ static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2);
static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3);
-static ssize_t show_name(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct smsc47b397_data *data = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", data->name);
-}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-
-static struct attribute *smsc47b397_attributes[] = {
+static struct attribute *smsc47b397_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
@@ -220,23 +210,10 @@ static struct attribute *smsc47b397_attributes[] = {
&sensor_dev_attr_fan3_input.dev_attr.attr,
&sensor_dev_attr_fan4_input.dev_attr.attr,
- &dev_attr_name.attr,
NULL
};
-static const struct attribute_group smsc47b397_group = {
- .attrs = smsc47b397_attributes,
-};
-
-static int smsc47b397_remove(struct platform_device *pdev)
-{
- struct smsc47b397_data *data = platform_get_drvdata(pdev);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&pdev->dev.kobj, &smsc47b397_group);
-
- return 0;
-}
+ATTRIBUTE_GROUPS(smsc47b397);
static int smsc47b397_probe(struct platform_device *pdev);
@@ -246,15 +223,14 @@ static struct platform_driver smsc47b397_driver = {
.name = DRVNAME,
},
.probe = smsc47b397_probe,
- .remove = smsc47b397_remove,
};
static int smsc47b397_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct smsc47b397_data *data;
+ struct device *hwmon_dev;
struct resource *res;
- int err = 0;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!devm_request_region(dev, res->start, SMSC_EXTENT,
@@ -270,26 +246,13 @@ static int smsc47b397_probe(struct platform_device *pdev)
return -ENOMEM;
data->addr = res->start;
- data->name = "smsc47b397";
mutex_init(&data->lock);
mutex_init(&data->update_lock);
- platform_set_drvdata(pdev, data);
-
- err = sysfs_create_group(&dev->kobj, &smsc47b397_group);
- if (err)
- return err;
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto error_remove;
- }
-
- return 0;
-
-error_remove:
- sysfs_remove_group(&dev->kobj, &smsc47b397_group);
- return err;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, "smsc47b397",
+ data,
+ smsc47b397_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static int __init smsc47b397_device_add(unsigned short address)
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index e0c404bdc4a8..4977082e081f 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -82,7 +82,6 @@ static int create_file(const char *name, umode_t mode,
{
int error;
- *dentry = NULL;
mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry))
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index cab610ccd50e..81854586c081 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -89,7 +89,6 @@ static int create_file(const char *name, umode_t mode,
{
int error;
- *dentry = NULL;
mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry))
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index fbfdc10573be..1af28b06c713 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -365,12 +365,13 @@ static const struct xenbus_device_id xenkbd_ids[] = {
{ "" }
};
-static DEFINE_XENBUS_DRIVER(xenkbd, ,
+static struct xenbus_driver xenkbd_driver = {
+ .ids = xenkbd_ids,
.probe = xenkbd_probe,
.remove = xenkbd_remove,
.resume = xenkbd_resume,
.otherend_changed = xenkbd_backend_changed,
-);
+};
static int __init xenkbd_init(void)
{
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 90e108f9e22e..a210338cfeb1 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -468,6 +468,15 @@ config LEDS_OT200
This option enables support for the LEDs on the Bachmann OT200.
Say Y to enable LEDs on the Bachmann OT200.
+config LEDS_MENF21BMC
+ tristate "LED support for the MEN 14F021P00 BMC"
+ depends on LEDS_CLASS && MFD_MENF21BMC
+ help
+ Say Y here to include support for the MEN 14F021P00 BMC LEDs.
+
+ This driver can also be built as a module. If so the module
+ will be called leds-menf21bmc.
+
comment "LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)"
config LEDS_BLINKM
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 822dd83ef97a..a2b164741465 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o
obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
obj-$(CONFIG_LEDS_VERSATILE) += leds-versatile.o
+obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/leds-menf21bmc.c b/drivers/leds/leds-menf21bmc.c
new file mode 100644
index 000000000000..89dd57769e3b
--- /dev/null
+++ b/drivers/leds/leds-menf21bmc.c
@@ -0,0 +1,131 @@
+/*
+ * MEN 14F021P00 Board Management Controller (BMC) LEDs Driver.
+ *
+ * This is the core LED driver of the MEN 14F021P00 BMC.
+ * There are four LEDs available which can be switched on and off.
+ * STATUS LED, HOT SWAP LED, USER LED 1, USER LED 2
+ *
+ * Copyright (C) 2014 MEN Mikro Elektronik Nuernberg GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/i2c.h>
+
+#define BMC_CMD_LED_GET_SET 0xA0
+#define BMC_BIT_LED_STATUS BIT(0)
+#define BMC_BIT_LED_HOTSWAP BIT(1)
+#define BMC_BIT_LED_USER1 BIT(2)
+#define BMC_BIT_LED_USER2 BIT(3)
+
+struct menf21bmc_led {
+ struct led_classdev cdev;
+ u8 led_bit;
+ const char *name;
+ struct i2c_client *i2c_client;
+};
+
+static struct menf21bmc_led leds[] = {
+ {
+ .name = "menf21bmc:led_status",
+ .led_bit = BMC_BIT_LED_STATUS,
+ },
+ {
+ .name = "menf21bmc:led_hotswap",
+ .led_bit = BMC_BIT_LED_HOTSWAP,
+ },
+ {
+ .name = "menf21bmc:led_user1",
+ .led_bit = BMC_BIT_LED_USER1,
+ },
+ {
+ .name = "menf21bmc:led_user2",
+ .led_bit = BMC_BIT_LED_USER2,
+ }
+};
+
+static DEFINE_MUTEX(led_lock);
+
+static void
+menf21bmc_led_set(struct led_classdev *led_cdev, enum led_brightness value)
+{
+ int led_val;
+ struct menf21bmc_led *led = container_of(led_cdev,
+ struct menf21bmc_led, cdev);
+
+ mutex_lock(&led_lock);
+ led_val = i2c_smbus_read_byte_data(led->i2c_client,
+ BMC_CMD_LED_GET_SET);
+ if (led_val < 0)
+ goto err_out;
+
+ if (value == LED_OFF)
+ led_val &= ~led->led_bit;
+ else
+ led_val |= led->led_bit;
+
+ i2c_smbus_write_byte_data(led->i2c_client,
+ BMC_CMD_LED_GET_SET, led_val);
+err_out:
+ mutex_unlock(&led_lock);
+}
+
+static int menf21bmc_led_probe(struct platform_device *pdev)
+{
+ int i;
+ int ret;
+ struct i2c_client *i2c_client = to_i2c_client(pdev->dev.parent);
+
+ for (i = 0; i < ARRAY_SIZE(leds); i++) {
+ leds[i].cdev.name = leds[i].name;
+ leds[i].cdev.brightness_set = menf21bmc_led_set;
+ leds[i].i2c_client = i2c_client;
+ ret = led_classdev_register(&pdev->dev, &leds[i].cdev);
+ if (ret < 0)
+ goto err_free_leds;
+ }
+ dev_info(&pdev->dev, "MEN 140F21P00 BMC LED device enabled\n");
+
+ return 0;
+
+err_free_leds:
+ dev_err(&pdev->dev, "failed to register LED device\n");
+
+ for (i = i - 1; i >= 0; i--)
+ led_classdev_unregister(&leds[i].cdev);
+
+ return ret;
+}
+
+static int menf21bmc_led_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(leds); i++)
+ led_classdev_unregister(&leds[i].cdev);
+
+ return 0;
+}
+
+static struct platform_driver menf21bmc_led = {
+ .probe = menf21bmc_led_probe,
+ .remove = menf21bmc_led_remove,
+ .driver = {
+ .name = "menf21bmc_led",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(menf21bmc_led);
+
+MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
+MODULE_DESCRIPTION("MEN 14F021P00 BMC led driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:menf21bmc_led");
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 9e9c56758a08..226179b975a0 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -411,6 +411,7 @@ adb_poll(void)
return;
adb_controller->poll();
}
+EXPORT_SYMBOL(adb_poll);
static void adb_sync_req_done(struct adb_request *req)
{
@@ -460,6 +461,7 @@ adb_request(struct adb_request *req, void (*done)(struct adb_request *),
return rc;
}
+EXPORT_SYMBOL(adb_request);
/* Ultimately this should return the number of devices with
the given default id.
@@ -495,6 +497,7 @@ adb_register(int default_id, int handler_id, struct adb_ids *ids,
mutex_unlock(&adb_handler_mutex);
return ids->nids;
}
+EXPORT_SYMBOL(adb_register);
int
adb_unregister(int index)
@@ -516,6 +519,7 @@ adb_unregister(int index)
mutex_unlock(&adb_handler_mutex);
return ret;
}
+EXPORT_SYMBOL(adb_unregister);
void
adb_input(unsigned char *buf, int nb, int autopoll)
@@ -582,6 +586,7 @@ adb_try_handler_change(int address, int new_id)
mutex_unlock(&adb_handler_mutex);
return ret;
}
+EXPORT_SYMBOL(adb_try_handler_change);
int
adb_get_infos(int address, int *original_address, int *handler_id)
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index d61f271d2207..bad18130f125 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -379,6 +379,7 @@ cuda_request(struct adb_request *req, void (*done)(struct adb_request *),
req->reply_expected = 1;
return cuda_write(req);
}
+EXPORT_SYMBOL(cuda_request);
static int
cuda_write(struct adb_request *req)
@@ -441,6 +442,7 @@ cuda_poll(void)
if (cuda_irq)
enable_irq(cuda_irq);
}
+EXPORT_SYMBOL(cuda_poll);
static irqreturn_t
cuda_interrupt(int irq, void *arg)
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index ab472c557d18..0505559f0965 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -720,7 +720,6 @@ static void __wait_for_free_buffer(struct dm_bufio_client *c)
io_schedule();
- set_task_state(current, TASK_RUNNING);
remove_wait_queue(&c->free_buffer_wait, &wait);
dm_bufio_lock(c);
diff --git a/drivers/media/common/b2c2/flexcop.h b/drivers/media/common/b2c2/flexcop.h
index 897b10c85ad9..8942bdacbf61 100644
--- a/drivers/media/common/b2c2/flexcop.h
+++ b/drivers/media/common/b2c2/flexcop.h
@@ -4,7 +4,7 @@
* see flexcop.c for copyright information
*/
#ifndef __FLEXCOP_H__
-#define __FLEXCOP_H___
+#define __FLEXCOP_H__
#define FC_LOG_PREFIX "b2c2-flexcop"
#include "flexcop-common.h"
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index 6c47f3fe9b0f..b7d63933dae6 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -311,7 +311,6 @@ static int fops_mmap(struct file *file, struct vm_area_struct * vma)
}
default:
BUG();
- return 0;
}
if (mutex_lock_interruptible(vdev->lock))
@@ -399,7 +398,6 @@ static ssize_t fops_read(struct file *file, char __user *data, size_t count, lof
return -EINVAL;
default:
BUG();
- return 0;
}
}
@@ -423,7 +421,6 @@ static ssize_t fops_write(struct file *file, const char __user *data, size_t cou
return -EINVAL;
default:
BUG();
- return -EINVAL;
}
}
diff --git a/drivers/media/common/siano/sms-cards.c b/drivers/media/common/siano/sms-cards.c
index 82769993eeb7..82c7a1289f05 100644
--- a/drivers/media/common/siano/sms-cards.c
+++ b/drivers/media/common/siano/sms-cards.c
@@ -157,6 +157,12 @@ static struct sms_board sms_boards[] = {
.type = SMS_DENVER_2160,
.default_mode = DEVICE_MODE_DAB_TDMB,
},
+ [SMS1XXX_BOARD_PCTV_77E] = {
+ .name = "Hauppauge microStick 77e",
+ .type = SMS_NOVA_B0,
+ .fw[DEVICE_MODE_DVBT_BDA] = SMS_FW_DVB_NOVA_12MHZ_B0,
+ .default_mode = DEVICE_MODE_DVBT_BDA,
+ },
};
struct sms_board *sms_get_board(unsigned id)
diff --git a/drivers/media/common/siano/sms-cards.h b/drivers/media/common/siano/sms-cards.h
index c63b544c49c5..4c4caddf9869 100644
--- a/drivers/media/common/siano/sms-cards.h
+++ b/drivers/media/common/siano/sms-cards.h
@@ -45,6 +45,7 @@
#define SMS1XXX_BOARD_SIANO_RIO 18
#define SMS1XXX_BOARD_SIANO_DENVER_1530 19
#define SMS1XXX_BOARD_SIANO_DENVER_2160 20
+#define SMS1XXX_BOARD_PCTV_77E 21
struct sms_board_gpio_cfg {
int lna_vhf_exist;
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index 050984c5b1e3..a3677438205e 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -2129,8 +2129,6 @@ int smscore_gpio_get_level(struct smscore_device_t *coredev, u8 pin_num,
static int __init smscore_module_init(void)
{
- int rc = 0;
-
INIT_LIST_HEAD(&g_smscore_notifyees);
INIT_LIST_HEAD(&g_smscore_devices);
kmutex_init(&g_smscore_deviceslock);
@@ -2138,7 +2136,7 @@ static int __init smscore_module_init(void)
INIT_LIST_HEAD(&g_smscore_registry);
kmutex_init(&g_smscore_registrylock);
- return rc;
+ return 0;
}
static void __exit smscore_module_exit(void)
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index c0363f1b6c90..abff803ad69a 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -1087,8 +1087,8 @@ static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
struct dmxdev_filter *dmxdevfilter = file->private_data;
unsigned int mask = 0;
- if (!dmxdevfilter)
- return -EINVAL;
+ if ((!dmxdevfilter) || dmxdevfilter->dev->exit)
+ return POLLERR;
poll_wait(file, &dmxdevfilter->buffer.queue, wait);
@@ -1181,6 +1181,9 @@ static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
dprintk("function : %s\n", __func__);
+ if (dmxdev->exit)
+ return POLLERR;
+
poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 12ce19c98ded..e07a84e7bc56 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -144,6 +144,7 @@
#define USB_PID_ITETECH_IT9135 0x9135
#define USB_PID_ITETECH_IT9135_9005 0x9005
#define USB_PID_ITETECH_IT9135_9006 0x9006
+#define USB_PID_ITETECH_IT9303 0x9306
#define USB_PID_KWORLD_399U 0xe399
#define USB_PID_KWORLD_399U_2 0xe400
#define USB_PID_KWORLD_395U 0xe396
@@ -244,6 +245,7 @@
#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d
+#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012
#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index c2a6a0a85813..b8579ee68bd6 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -1934,15 +1934,13 @@ static int dvb_frontend_ioctl_properties(struct file *file,
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int err = 0;
- struct dtv_properties *tvps = NULL;
+ struct dtv_properties *tvps = parg;
struct dtv_property *tvp = NULL;
int i;
dev_dbg(fe->dvb->device, "%s:\n", __func__);
- if(cmd == FE_SET_PROPERTY) {
- tvps = (struct dtv_properties __user *)parg;
-
+ if (cmd == FE_SET_PROPERTY) {
dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num);
dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props);
@@ -1957,7 +1955,8 @@ static int dvb_frontend_ioctl_properties(struct file *file,
goto out;
}
- if (copy_from_user(tvp, tvps->props, tvps->num * sizeof(struct dtv_property))) {
+ if (copy_from_user(tvp, (void __user *)tvps->props,
+ tvps->num * sizeof(struct dtv_property))) {
err = -EFAULT;
goto out;
}
@@ -1972,10 +1971,7 @@ static int dvb_frontend_ioctl_properties(struct file *file,
if (c->state == DTV_TUNE)
dev_dbg(fe->dvb->device, "%s: Property cache is full, tuning\n", __func__);
- } else
- if(cmd == FE_GET_PROPERTY) {
- tvps = (struct dtv_properties __user *)parg;
-
+ } else if (cmd == FE_GET_PROPERTY) {
dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num);
dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props);
@@ -1990,7 +1986,8 @@ static int dvb_frontend_ioctl_properties(struct file *file,
goto out;
}
- if (copy_from_user(tvp, tvps->props, tvps->num * sizeof(struct dtv_property))) {
+ if (copy_from_user(tvp, (void __user *)tvps->props,
+ tvps->num * sizeof(struct dtv_property))) {
err = -EFAULT;
goto out;
}
@@ -2012,7 +2009,8 @@ static int dvb_frontend_ioctl_properties(struct file *file,
(tvp + i)->result = err;
}
- if (copy_to_user(tvps->props, tvp, tvps->num * sizeof(struct dtv_property))) {
+ if (copy_to_user((void __user *)tvps->props, tvp,
+ tvps->num * sizeof(struct dtv_property))) {
err = -EFAULT;
goto out;
}
@@ -2072,6 +2070,23 @@ static int dtv_set_frontend(struct dvb_frontend *fe)
case SYS_DVBC_ANNEX_C:
rolloff = 113;
break;
+ case SYS_DVBS:
+ case SYS_TURBO:
+ rolloff = 135;
+ break;
+ case SYS_DVBS2:
+ switch (c->rolloff) {
+ case ROLLOFF_20:
+ rolloff = 120;
+ break;
+ case ROLLOFF_25:
+ rolloff = 125;
+ break;
+ default:
+ case ROLLOFF_35:
+ rolloff = 135;
+ }
+ break;
default:
break;
}
@@ -2550,7 +2565,9 @@ int dvb_frontend_suspend(struct dvb_frontend *fe)
dev_dbg(fe->dvb->device, "%s: adap=%d fe=%d\n", __func__, fe->dvb->num,
fe->id);
- if (fe->ops.tuner_ops.sleep)
+ if (fe->ops.tuner_ops.suspend)
+ ret = fe->ops.tuner_ops.suspend(fe);
+ else if (fe->ops.tuner_ops.sleep)
ret = fe->ops.tuner_ops.sleep(fe);
if (fe->ops.sleep)
@@ -2572,7 +2589,9 @@ int dvb_frontend_resume(struct dvb_frontend *fe)
if (fe->ops.init)
ret = fe->ops.init(fe);
- if (fe->ops.tuner_ops.init)
+ if (fe->ops.tuner_ops.resume)
+ ret = fe->ops.tuner_ops.resume(fe);
+ else if (fe->ops.tuner_ops.init)
ret = fe->ops.tuner_ops.init(fe);
fe->exit = DVB_FE_NO_EXIT;
diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h
index d398de4b6ef4..816269e5f706 100644
--- a/drivers/media/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb-core/dvb_frontend.h
@@ -201,6 +201,8 @@ struct dvb_tuner_ops {
int (*release)(struct dvb_frontend *fe);
int (*init)(struct dvb_frontend *fe);
int (*sleep)(struct dvb_frontend *fe);
+ int (*suspend)(struct dvb_frontend *fe);
+ int (*resume)(struct dvb_frontend *fe);
/** This is for simple PLLs - set all parameters in one go. */
int (*set_params)(struct dvb_frontend *fe);
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
index a5712cd7c65f..1100e98a7b1d 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.c
+++ b/drivers/media/dvb-core/dvb_ringbuffer.c
@@ -166,6 +166,31 @@ ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t
return len;
}
+ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
+ const u8 __user *buf, size_t len)
+{
+ int status;
+ size_t todo = len;
+ size_t split;
+
+ split = (rbuf->pwrite + len > rbuf->size) ? rbuf->size - rbuf->pwrite : 0;
+
+ if (split > 0) {
+ status = copy_from_user(rbuf->data+rbuf->pwrite, buf, split);
+ if (status)
+ return len - todo;
+ buf += split;
+ todo -= split;
+ rbuf->pwrite = 0;
+ }
+ status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo);
+ if (status)
+ return len - todo;
+ rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size;
+
+ return len;
+}
+
ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf, size_t len)
{
int status;
@@ -297,3 +322,4 @@ EXPORT_SYMBOL(dvb_ringbuffer_flush_spinlock_wakeup);
EXPORT_SYMBOL(dvb_ringbuffer_read_user);
EXPORT_SYMBOL(dvb_ringbuffer_read);
EXPORT_SYMBOL(dvb_ringbuffer_write);
+EXPORT_SYMBOL(dvb_ringbuffer_write_user);
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb-core/dvb_ringbuffer.h
index 41f04dae69b6..9e1e11b7c39c 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.h
+++ b/drivers/media/dvb-core/dvb_ringbuffer.h
@@ -133,6 +133,8 @@ extern void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf,
*/
extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf,
size_t len);
+extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
+ const u8 __user *buf, size_t len);
/**
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index fe0ddcca192c..5a134547e325 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -471,6 +471,11 @@ config DVB_SI2168
help
Say Y when you want to support this frontend.
+config DVB_AS102_FE
+ tristate
+ depends on DVB_CORE
+ default DVB_AS102
+
comment "DVB-C (cable) frontends"
depends on DVB_CORE
@@ -643,6 +648,14 @@ config DVB_MB86A20S
A driver for Fujitsu mb86a20s ISDB-T/ISDB-Tsb demodulator.
Say Y when you want to support this frontend.
+config DVB_TC90522
+ tristate "Toshiba TC90522"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A Toshiba TC90522 2xISDB-T + 2xISDB-S demodulator.
+ Say Y when you want to support this frontend.
+
comment "Digital terrestrial only tuners/PLL"
depends on DVB_CORE
@@ -720,6 +733,13 @@ config DVB_A8293
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
+config DVB_SP2
+ tristate "CIMaX SP2"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ CIMaX SP2/SP2HF Common Interface module.
+
config DVB_LGS8GL5
tristate "Silicon Legend LGS-8GL5 demodulator (OFDM)"
depends on DVB_CORE && I2C
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index edf103d45920..ba59df63d050 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -107,10 +107,12 @@ obj-$(CONFIG_DVB_DRXK) += drxk.o
obj-$(CONFIG_DVB_TDA18271C2DD) += tda18271c2dd.o
obj-$(CONFIG_DVB_SI2165) += si2165.o
obj-$(CONFIG_DVB_A8293) += a8293.o
+obj-$(CONFIG_DVB_SP2) += sp2.o
obj-$(CONFIG_DVB_TDA10071) += tda10071.o
obj-$(CONFIG_DVB_RTL2830) += rtl2830.o
obj-$(CONFIG_DVB_RTL2832) += rtl2832.o
obj-$(CONFIG_DVB_RTL2832_SDR) += rtl2832_sdr.o
obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o
obj-$(CONFIG_DVB_AF9033) += af9033.o
-
+obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o
+obj-$(CONFIG_DVB_TC90522) += tc90522.o
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index ecf6388d2200..8001690d7576 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -683,7 +683,7 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
switch (c->transmission_mode) {
case TRANSMISSION_MODE_AUTO:
- auto_mode = 1;
+ auto_mode = true;
break;
case TRANSMISSION_MODE_2K:
break;
@@ -693,12 +693,12 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
default:
dev_dbg(&state->i2c->dev, "%s: invalid transmission_mode\n",
__func__);
- auto_mode = 1;
+ auto_mode = true;
}
switch (c->guard_interval) {
case GUARD_INTERVAL_AUTO:
- auto_mode = 1;
+ auto_mode = true;
break;
case GUARD_INTERVAL_1_32:
break;
@@ -714,12 +714,12 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
default:
dev_dbg(&state->i2c->dev, "%s: invalid guard_interval\n",
__func__);
- auto_mode = 1;
+ auto_mode = true;
}
switch (c->hierarchy) {
case HIERARCHY_AUTO:
- auto_mode = 1;
+ auto_mode = true;
break;
case HIERARCHY_NONE:
break;
@@ -734,12 +734,12 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
break;
default:
dev_dbg(&state->i2c->dev, "%s: invalid hierarchy\n", __func__);
- auto_mode = 1;
+ auto_mode = true;
}
switch (c->modulation) {
case QAM_AUTO:
- auto_mode = 1;
+ auto_mode = true;
break;
case QPSK:
break;
@@ -751,7 +751,7 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
break;
default:
dev_dbg(&state->i2c->dev, "%s: invalid modulation\n", __func__);
- auto_mode = 1;
+ auto_mode = true;
}
/* Use HP. How and which case we can switch to LP? */
@@ -759,7 +759,7 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
switch (c->code_rate_HP) {
case FEC_AUTO:
- auto_mode = 1;
+ auto_mode = true;
break;
case FEC_1_2:
break;
@@ -778,12 +778,12 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
default:
dev_dbg(&state->i2c->dev, "%s: invalid code_rate_HP\n",
__func__);
- auto_mode = 1;
+ auto_mode = true;
}
switch (c->code_rate_LP) {
case FEC_AUTO:
- auto_mode = 1;
+ auto_mode = true;
break;
case FEC_1_2:
break;
@@ -804,7 +804,7 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
default:
dev_dbg(&state->i2c->dev, "%s: invalid code_rate_LP\n",
__func__);
- auto_mode = 1;
+ auto_mode = true;
}
switch (c->bandwidth_hz) {
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 5c90ea683a7e..63a89c1c59ff 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -24,29 +24,35 @@
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 64
-struct af9033_state {
- struct i2c_adapter *i2c;
+struct af9033_dev {
+ struct i2c_client *client;
struct dvb_frontend fe;
struct af9033_config cfg;
+ bool is_af9035;
+ bool is_it9135;
u32 bandwidth_hz;
bool ts_mode_parallel;
bool ts_mode_serial;
- u32 ber;
- u32 ucb;
- unsigned long last_stat_check;
+ fe_status_t fe_status;
+ u64 post_bit_error_prev; /* for old read_ber we return (curr - prev) */
+ u64 post_bit_error;
+ u64 post_bit_count;
+ u64 error_block_count;
+ u64 total_block_count;
+ struct delayed_work stat_work;
};
/* write multiple registers */
-static int af9033_wr_regs(struct af9033_state *state, u32 reg, const u8 *val,
+static int af9033_wr_regs(struct af9033_dev *dev, u32 reg, const u8 *val,
int len)
{
int ret;
u8 buf[MAX_XFER_SIZE];
struct i2c_msg msg[1] = {
{
- .addr = state->cfg.i2c_addr,
+ .addr = dev->client->addr,
.flags = 0,
.len = 3 + len,
.buf = buf,
@@ -54,9 +60,9 @@ static int af9033_wr_regs(struct af9033_state *state, u32 reg, const u8 *val,
};
if (3 + len > sizeof(buf)) {
- dev_warn(&state->i2c->dev,
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
+ dev_warn(&dev->client->dev,
+ "i2c wr reg=%04x: len=%d is too big!\n",
+ reg, len);
return -EINVAL;
}
@@ -65,12 +71,12 @@ static int af9033_wr_regs(struct af9033_state *state, u32 reg, const u8 *val,
buf[2] = (reg >> 0) & 0xff;
memcpy(&buf[3], val, len);
- ret = i2c_transfer(state->i2c, msg, 1);
+ ret = i2c_transfer(dev->client->adapter, msg, 1);
if (ret == 1) {
ret = 0;
} else {
- dev_warn(&state->i2c->dev, "%s: i2c wr failed=%d reg=%06x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
+ dev_warn(&dev->client->dev, "i2c wr failed=%d reg=%06x len=%d\n",
+ ret, reg, len);
ret = -EREMOTEIO;
}
@@ -78,31 +84,31 @@ static int af9033_wr_regs(struct af9033_state *state, u32 reg, const u8 *val,
}
/* read multiple registers */
-static int af9033_rd_regs(struct af9033_state *state, u32 reg, u8 *val, int len)
+static int af9033_rd_regs(struct af9033_dev *dev, u32 reg, u8 *val, int len)
{
int ret;
u8 buf[3] = { (reg >> 16) & 0xff, (reg >> 8) & 0xff,
(reg >> 0) & 0xff };
struct i2c_msg msg[2] = {
{
- .addr = state->cfg.i2c_addr,
+ .addr = dev->client->addr,
.flags = 0,
.len = sizeof(buf),
.buf = buf
}, {
- .addr = state->cfg.i2c_addr,
+ .addr = dev->client->addr,
.flags = I2C_M_RD,
.len = len,
.buf = val
}
};
- ret = i2c_transfer(state->i2c, msg, 2);
+ ret = i2c_transfer(dev->client->adapter, msg, 2);
if (ret == 2) {
ret = 0;
} else {
- dev_warn(&state->i2c->dev, "%s: i2c rd failed=%d reg=%06x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
+ dev_warn(&dev->client->dev, "i2c rd failed=%d reg=%06x len=%d\n",
+ ret, reg, len);
ret = -EREMOTEIO;
}
@@ -111,19 +117,19 @@ static int af9033_rd_regs(struct af9033_state *state, u32 reg, u8 *val, int len)
/* write single register */
-static int af9033_wr_reg(struct af9033_state *state, u32 reg, u8 val)
+static int af9033_wr_reg(struct af9033_dev *dev, u32 reg, u8 val)
{
- return af9033_wr_regs(state, reg, &val, 1);
+ return af9033_wr_regs(dev, reg, &val, 1);
}
/* read single register */
-static int af9033_rd_reg(struct af9033_state *state, u32 reg, u8 *val)
+static int af9033_rd_reg(struct af9033_dev *dev, u32 reg, u8 *val)
{
- return af9033_rd_regs(state, reg, val, 1);
+ return af9033_rd_regs(dev, reg, val, 1);
}
/* write single register with mask */
-static int af9033_wr_reg_mask(struct af9033_state *state, u32 reg, u8 val,
+static int af9033_wr_reg_mask(struct af9033_dev *dev, u32 reg, u8 val,
u8 mask)
{
int ret;
@@ -131,7 +137,7 @@ static int af9033_wr_reg_mask(struct af9033_state *state, u32 reg, u8 val,
/* no need for read if whole reg is written */
if (mask != 0xff) {
- ret = af9033_rd_regs(state, reg, &tmp, 1);
+ ret = af9033_rd_regs(dev, reg, &tmp, 1);
if (ret)
return ret;
@@ -140,17 +146,17 @@ static int af9033_wr_reg_mask(struct af9033_state *state, u32 reg, u8 val,
val |= tmp;
}
- return af9033_wr_regs(state, reg, &val, 1);
+ return af9033_wr_regs(dev, reg, &val, 1);
}
/* read single register with mask */
-static int af9033_rd_reg_mask(struct af9033_state *state, u32 reg, u8 *val,
+static int af9033_rd_reg_mask(struct af9033_dev *dev, u32 reg, u8 *val,
u8 mask)
{
int ret, i;
u8 tmp;
- ret = af9033_rd_regs(state, reg, &tmp, 1);
+ ret = af9033_rd_regs(dev, reg, &tmp, 1);
if (ret)
return ret;
@@ -167,18 +173,17 @@ static int af9033_rd_reg_mask(struct af9033_state *state, u32 reg, u8 *val,
}
/* write reg val table using reg addr auto increment */
-static int af9033_wr_reg_val_tab(struct af9033_state *state,
+static int af9033_wr_reg_val_tab(struct af9033_dev *dev,
const struct reg_val *tab, int tab_len)
{
#define MAX_TAB_LEN 212
int ret, i, j;
u8 buf[1 + MAX_TAB_LEN];
- dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
+ dev_dbg(&dev->client->dev, "tab_len=%d\n", tab_len);
if (tab_len > sizeof(buf)) {
- dev_warn(&state->i2c->dev, "%s: tab len %d is too big\n",
- KBUILD_MODNAME, tab_len);
+ dev_warn(&dev->client->dev, "tab len %d is too big\n", tab_len);
return -EINVAL;
}
@@ -186,7 +191,7 @@ static int af9033_wr_reg_val_tab(struct af9033_state *state,
buf[j] = tab[i].val;
if (i == tab_len - 1 || tab[i].reg != tab[i + 1].reg - 1) {
- ret = af9033_wr_regs(state, tab[i].reg - j, buf, j + 1);
+ ret = af9033_wr_regs(dev, tab[i].reg - j, buf, j + 1);
if (ret < 0)
goto err;
@@ -199,16 +204,16 @@ static int af9033_wr_reg_val_tab(struct af9033_state *state,
return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
return ret;
}
-static u32 af9033_div(struct af9033_state *state, u32 a, u32 b, u32 x)
+static u32 af9033_div(struct af9033_dev *dev, u32 a, u32 b, u32 x)
{
u32 r = 0, c = 0, i;
- dev_dbg(&state->i2c->dev, "%s: a=%d b=%d x=%d\n", __func__, a, b, x);
+ dev_dbg(&dev->client->dev, "a=%d b=%d x=%d\n", a, b, x);
if (a > b) {
c = a / b;
@@ -225,22 +230,15 @@ static u32 af9033_div(struct af9033_state *state, u32 a, u32 b, u32 x)
}
r = (c << (u32)x) + r;
- dev_dbg(&state->i2c->dev, "%s: a=%d b=%d x=%d r=%d r=%x\n",
- __func__, a, b, x, r, r);
+ dev_dbg(&dev->client->dev, "a=%d b=%d x=%d r=%d r=%x\n", a, b, x, r, r);
return r;
}
-static void af9033_release(struct dvb_frontend *fe)
-{
- struct af9033_state *state = fe->demodulator_priv;
-
- kfree(state);
-}
-
static int af9033_init(struct dvb_frontend *fe)
{
- struct af9033_state *state = fe->demodulator_priv;
+ struct af9033_dev *dev = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i, len;
const struct reg_val *init;
u8 buf[4];
@@ -248,7 +246,7 @@ static int af9033_init(struct dvb_frontend *fe)
struct reg_val_mask tab[] = {
{ 0x80fb24, 0x00, 0x08 },
{ 0x80004c, 0x00, 0xff },
- { 0x00f641, state->cfg.tuner, 0xff },
+ { 0x00f641, dev->cfg.tuner, 0xff },
{ 0x80f5ca, 0x01, 0x01 },
{ 0x80f715, 0x01, 0x01 },
{ 0x00f41f, 0x04, 0x04 },
@@ -267,88 +265,82 @@ static int af9033_init(struct dvb_frontend *fe)
{ 0x00d830, 0x01, 0xff },
{ 0x00d831, 0x00, 0xff },
{ 0x00d832, 0x00, 0xff },
- { 0x80f985, state->ts_mode_serial, 0x01 },
- { 0x80f986, state->ts_mode_parallel, 0x01 },
+ { 0x80f985, dev->ts_mode_serial, 0x01 },
+ { 0x80f986, dev->ts_mode_parallel, 0x01 },
{ 0x00d827, 0x00, 0xff },
{ 0x00d829, 0x00, 0xff },
- { 0x800045, state->cfg.adc_multiplier, 0xff },
+ { 0x800045, dev->cfg.adc_multiplier, 0xff },
};
/* program clock control */
- clock_cw = af9033_div(state, state->cfg.clock, 1000000ul, 19ul);
+ clock_cw = af9033_div(dev, dev->cfg.clock, 1000000ul, 19ul);
buf[0] = (clock_cw >> 0) & 0xff;
buf[1] = (clock_cw >> 8) & 0xff;
buf[2] = (clock_cw >> 16) & 0xff;
buf[3] = (clock_cw >> 24) & 0xff;
- dev_dbg(&state->i2c->dev, "%s: clock=%d clock_cw=%08x\n",
- __func__, state->cfg.clock, clock_cw);
+ dev_dbg(&dev->client->dev, "clock=%d clock_cw=%08x\n",
+ dev->cfg.clock, clock_cw);
- ret = af9033_wr_regs(state, 0x800025, buf, 4);
+ ret = af9033_wr_regs(dev, 0x800025, buf, 4);
if (ret < 0)
goto err;
/* program ADC control */
for (i = 0; i < ARRAY_SIZE(clock_adc_lut); i++) {
- if (clock_adc_lut[i].clock == state->cfg.clock)
+ if (clock_adc_lut[i].clock == dev->cfg.clock)
break;
}
- adc_cw = af9033_div(state, clock_adc_lut[i].adc, 1000000ul, 19ul);
+ adc_cw = af9033_div(dev, clock_adc_lut[i].adc, 1000000ul, 19ul);
buf[0] = (adc_cw >> 0) & 0xff;
buf[1] = (adc_cw >> 8) & 0xff;
buf[2] = (adc_cw >> 16) & 0xff;
- dev_dbg(&state->i2c->dev, "%s: adc=%d adc_cw=%06x\n",
- __func__, clock_adc_lut[i].adc, adc_cw);
+ dev_dbg(&dev->client->dev, "adc=%d adc_cw=%06x\n",
+ clock_adc_lut[i].adc, adc_cw);
- ret = af9033_wr_regs(state, 0x80f1cd, buf, 3);
+ ret = af9033_wr_regs(dev, 0x80f1cd, buf, 3);
if (ret < 0)
goto err;
/* program register table */
for (i = 0; i < ARRAY_SIZE(tab); i++) {
- ret = af9033_wr_reg_mask(state, tab[i].reg, tab[i].val,
+ ret = af9033_wr_reg_mask(dev, tab[i].reg, tab[i].val,
tab[i].mask);
if (ret < 0)
goto err;
}
- /* feed clock to RF tuner */
- switch (state->cfg.tuner) {
- case AF9033_TUNER_IT9135_38:
- case AF9033_TUNER_IT9135_51:
- case AF9033_TUNER_IT9135_52:
- case AF9033_TUNER_IT9135_60:
- case AF9033_TUNER_IT9135_61:
- case AF9033_TUNER_IT9135_62:
- ret = af9033_wr_reg(state, 0x80fba8, 0x00);
+ /* clock output */
+ if (dev->cfg.dyn0_clk) {
+ ret = af9033_wr_reg(dev, 0x80fba8, 0x00);
if (ret < 0)
goto err;
}
/* settings for TS interface */
- if (state->cfg.ts_mode == AF9033_TS_MODE_USB) {
- ret = af9033_wr_reg_mask(state, 0x80f9a5, 0x00, 0x01);
+ if (dev->cfg.ts_mode == AF9033_TS_MODE_USB) {
+ ret = af9033_wr_reg_mask(dev, 0x80f9a5, 0x00, 0x01);
if (ret < 0)
goto err;
- ret = af9033_wr_reg_mask(state, 0x80f9b5, 0x01, 0x01);
+ ret = af9033_wr_reg_mask(dev, 0x80f9b5, 0x01, 0x01);
if (ret < 0)
goto err;
} else {
- ret = af9033_wr_reg_mask(state, 0x80f990, 0x00, 0x01);
+ ret = af9033_wr_reg_mask(dev, 0x80f990, 0x00, 0x01);
if (ret < 0)
goto err;
- ret = af9033_wr_reg_mask(state, 0x80f9b5, 0x00, 0x01);
+ ret = af9033_wr_reg_mask(dev, 0x80f9b5, 0x00, 0x01);
if (ret < 0)
goto err;
}
/* load OFSM settings */
- dev_dbg(&state->i2c->dev, "%s: load ofsm settings\n", __func__);
- switch (state->cfg.tuner) {
+ dev_dbg(&dev->client->dev, "load ofsm settings\n");
+ switch (dev->cfg.tuner) {
case AF9033_TUNER_IT9135_38:
case AF9033_TUNER_IT9135_51:
case AF9033_TUNER_IT9135_52:
@@ -367,14 +359,13 @@ static int af9033_init(struct dvb_frontend *fe)
break;
}
- ret = af9033_wr_reg_val_tab(state, init, len);
+ ret = af9033_wr_reg_val_tab(dev, init, len);
if (ret < 0)
goto err;
/* load tuner specific settings */
- dev_dbg(&state->i2c->dev, "%s: load tuner specific settings\n",
- __func__);
- switch (state->cfg.tuner) {
+ dev_dbg(&dev->client->dev, "load tuner specific settings\n");
+ switch (dev->cfg.tuner) {
case AF9033_TUNER_TUA9001:
len = ARRAY_SIZE(tuner_init_tua9001);
init = tuner_init_tua9001;
@@ -424,90 +415,108 @@ static int af9033_init(struct dvb_frontend *fe)
init = tuner_init_it9135_62;
break;
default:
- dev_dbg(&state->i2c->dev, "%s: unsupported tuner ID=%d\n",
- __func__, state->cfg.tuner);
+ dev_dbg(&dev->client->dev, "unsupported tuner ID=%d\n",
+ dev->cfg.tuner);
ret = -ENODEV;
goto err;
}
- ret = af9033_wr_reg_val_tab(state, init, len);
+ ret = af9033_wr_reg_val_tab(dev, init, len);
if (ret < 0)
goto err;
- if (state->cfg.ts_mode == AF9033_TS_MODE_SERIAL) {
- ret = af9033_wr_reg_mask(state, 0x00d91c, 0x01, 0x01);
+ if (dev->cfg.ts_mode == AF9033_TS_MODE_SERIAL) {
+ ret = af9033_wr_reg_mask(dev, 0x00d91c, 0x01, 0x01);
if (ret < 0)
goto err;
- ret = af9033_wr_reg_mask(state, 0x00d917, 0x00, 0x01);
+ ret = af9033_wr_reg_mask(dev, 0x00d917, 0x00, 0x01);
if (ret < 0)
goto err;
- ret = af9033_wr_reg_mask(state, 0x00d916, 0x00, 0x01);
+ ret = af9033_wr_reg_mask(dev, 0x00d916, 0x00, 0x01);
if (ret < 0)
goto err;
}
- switch (state->cfg.tuner) {
+ switch (dev->cfg.tuner) {
case AF9033_TUNER_IT9135_60:
case AF9033_TUNER_IT9135_61:
case AF9033_TUNER_IT9135_62:
- ret = af9033_wr_reg(state, 0x800000, 0x01);
+ ret = af9033_wr_reg(dev, 0x800000, 0x01);
if (ret < 0)
goto err;
}
- state->bandwidth_hz = 0; /* force to program all parameters */
+ dev->bandwidth_hz = 0; /* force to program all parameters */
+ /* init stats here in order signal app which stats are supported */
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.len = 1;
+ c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_error.len = 1;
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.len = 1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ /* start statistics polling */
+ schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000));
return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
return ret;
}
static int af9033_sleep(struct dvb_frontend *fe)
{
- struct af9033_state *state = fe->demodulator_priv;
+ struct af9033_dev *dev = fe->demodulator_priv;
int ret, i;
u8 tmp;
- ret = af9033_wr_reg(state, 0x80004c, 1);
+ /* stop statistics polling */
+ cancel_delayed_work_sync(&dev->stat_work);
+
+ ret = af9033_wr_reg(dev, 0x80004c, 1);
if (ret < 0)
goto err;
- ret = af9033_wr_reg(state, 0x800000, 0);
+ ret = af9033_wr_reg(dev, 0x800000, 0);
if (ret < 0)
goto err;
for (i = 100, tmp = 1; i && tmp; i--) {
- ret = af9033_rd_reg(state, 0x80004c, &tmp);
+ ret = af9033_rd_reg(dev, 0x80004c, &tmp);
if (ret < 0)
goto err;
usleep_range(200, 10000);
}
- dev_dbg(&state->i2c->dev, "%s: loop=%d\n", __func__, i);
+ dev_dbg(&dev->client->dev, "loop=%d\n", i);
if (i == 0) {
ret = -ETIMEDOUT;
goto err;
}
- ret = af9033_wr_reg_mask(state, 0x80fb24, 0x08, 0x08);
+ ret = af9033_wr_reg_mask(dev, 0x80fb24, 0x08, 0x08);
if (ret < 0)
goto err;
/* prevent current leak (?) */
- if (state->cfg.ts_mode == AF9033_TS_MODE_SERIAL) {
+ if (dev->cfg.ts_mode == AF9033_TS_MODE_SERIAL) {
/* enable parallel TS */
- ret = af9033_wr_reg_mask(state, 0x00d917, 0x00, 0x01);
+ ret = af9033_wr_reg_mask(dev, 0x00d917, 0x00, 0x01);
if (ret < 0)
goto err;
- ret = af9033_wr_reg_mask(state, 0x00d916, 0x01, 0x01);
+ ret = af9033_wr_reg_mask(dev, 0x00d916, 0x01, 0x01);
if (ret < 0)
goto err;
}
@@ -515,7 +524,7 @@ static int af9033_sleep(struct dvb_frontend *fe)
return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -533,14 +542,14 @@ static int af9033_get_tune_settings(struct dvb_frontend *fe,
static int af9033_set_frontend(struct dvb_frontend *fe)
{
- struct af9033_state *state = fe->demodulator_priv;
+ struct af9033_dev *dev = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i, spec_inv, sampling_freq;
u8 tmp, buf[3], bandwidth_reg_val;
u32 if_frequency, freq_cw, adc_freq;
- dev_dbg(&state->i2c->dev, "%s: frequency=%d bandwidth_hz=%d\n",
- __func__, c->frequency, c->bandwidth_hz);
+ dev_dbg(&dev->client->dev, "frequency=%d bandwidth_hz=%d\n",
+ c->frequency, c->bandwidth_hz);
/* check bandwidth */
switch (c->bandwidth_hz) {
@@ -554,8 +563,7 @@ static int af9033_set_frontend(struct dvb_frontend *fe)
bandwidth_reg_val = 0x02;
break;
default:
- dev_dbg(&state->i2c->dev, "%s: invalid bandwidth_hz\n",
- __func__);
+ dev_dbg(&dev->client->dev, "invalid bandwidth_hz\n");
ret = -EINVAL;
goto err;
}
@@ -565,23 +573,23 @@ static int af9033_set_frontend(struct dvb_frontend *fe)
fe->ops.tuner_ops.set_params(fe);
/* program CFOE coefficients */
- if (c->bandwidth_hz != state->bandwidth_hz) {
+ if (c->bandwidth_hz != dev->bandwidth_hz) {
for (i = 0; i < ARRAY_SIZE(coeff_lut); i++) {
- if (coeff_lut[i].clock == state->cfg.clock &&
+ if (coeff_lut[i].clock == dev->cfg.clock &&
coeff_lut[i].bandwidth_hz == c->bandwidth_hz) {
break;
}
}
- ret = af9033_wr_regs(state, 0x800001,
+ ret = af9033_wr_regs(dev, 0x800001,
coeff_lut[i].val, sizeof(coeff_lut[i].val));
}
/* program frequency control */
- if (c->bandwidth_hz != state->bandwidth_hz) {
- spec_inv = state->cfg.spec_inv ? -1 : 1;
+ if (c->bandwidth_hz != dev->bandwidth_hz) {
+ spec_inv = dev->cfg.spec_inv ? -1 : 1;
for (i = 0; i < ARRAY_SIZE(clock_adc_lut); i++) {
- if (clock_adc_lut[i].clock == state->cfg.clock)
+ if (clock_adc_lut[i].clock == dev->cfg.clock)
break;
}
adc_freq = clock_adc_lut[i].adc;
@@ -602,12 +610,12 @@ static int af9033_set_frontend(struct dvb_frontend *fe)
else
sampling_freq *= -1;
- freq_cw = af9033_div(state, sampling_freq, adc_freq, 23ul);
+ freq_cw = af9033_div(dev, sampling_freq, adc_freq, 23ul);
if (spec_inv == -1)
freq_cw = 0x800000 - freq_cw;
- if (state->cfg.adc_multiplier == AF9033_ADC_MULTIPLIER_2X)
+ if (dev->cfg.adc_multiplier == AF9033_ADC_MULTIPLIER_2X)
freq_cw /= 2;
buf[0] = (freq_cw >> 0) & 0xff;
@@ -618,26 +626,26 @@ static int af9033_set_frontend(struct dvb_frontend *fe)
if (if_frequency == 0)
buf[2] = 0;
- ret = af9033_wr_regs(state, 0x800029, buf, 3);
+ ret = af9033_wr_regs(dev, 0x800029, buf, 3);
if (ret < 0)
goto err;
- state->bandwidth_hz = c->bandwidth_hz;
+ dev->bandwidth_hz = c->bandwidth_hz;
}
- ret = af9033_wr_reg_mask(state, 0x80f904, bandwidth_reg_val, 0x03);
+ ret = af9033_wr_reg_mask(dev, 0x80f904, bandwidth_reg_val, 0x03);
if (ret < 0)
goto err;
- ret = af9033_wr_reg(state, 0x800040, 0x00);
+ ret = af9033_wr_reg(dev, 0x800040, 0x00);
if (ret < 0)
goto err;
- ret = af9033_wr_reg(state, 0x800047, 0x00);
+ ret = af9033_wr_reg(dev, 0x800047, 0x00);
if (ret < 0)
goto err;
- ret = af9033_wr_reg_mask(state, 0x80f999, 0x00, 0x01);
+ ret = af9033_wr_reg_mask(dev, 0x80f999, 0x00, 0x01);
if (ret < 0)
goto err;
@@ -646,33 +654,33 @@ static int af9033_set_frontend(struct dvb_frontend *fe)
else
tmp = 0x01; /* UHF */
- ret = af9033_wr_reg(state, 0x80004b, tmp);
+ ret = af9033_wr_reg(dev, 0x80004b, tmp);
if (ret < 0)
goto err;
- ret = af9033_wr_reg(state, 0x800000, 0x00);
+ ret = af9033_wr_reg(dev, 0x800000, 0x00);
if (ret < 0)
goto err;
return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
return ret;
}
static int af9033_get_frontend(struct dvb_frontend *fe)
{
- struct af9033_state *state = fe->demodulator_priv;
+ struct af9033_dev *dev = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
u8 buf[8];
- dev_dbg(&state->i2c->dev, "%s:\n", __func__);
+ dev_dbg(&dev->client->dev, "\n");
/* read all needed registers */
- ret = af9033_rd_regs(state, 0x80f900, buf, sizeof(buf));
+ ret = af9033_rd_regs(dev, 0x80f900, buf, sizeof(buf));
if (ret < 0)
goto err;
@@ -784,21 +792,21 @@ static int af9033_get_frontend(struct dvb_frontend *fe)
return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
return ret;
}
static int af9033_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
- struct af9033_state *state = fe->demodulator_priv;
+ struct af9033_dev *dev = fe->demodulator_priv;
int ret;
u8 tmp;
*status = 0;
/* radio channel status, 0=no result, 1=has signal, 2=no signal */
- ret = af9033_rd_reg(state, 0x800047, &tmp);
+ ret = af9033_rd_reg(dev, 0x800047, &tmp);
if (ret < 0)
goto err;
@@ -808,7 +816,7 @@ static int af9033_read_status(struct dvb_frontend *fe, fe_status_t *status)
if (tmp != 0x02) {
/* TPS lock */
- ret = af9033_rd_reg_mask(state, 0x80f5a9, &tmp, 0x01);
+ ret = af9033_rd_reg_mask(dev, 0x80f5a9, &tmp, 0x01);
if (ret < 0)
goto err;
@@ -817,7 +825,7 @@ static int af9033_read_status(struct dvb_frontend *fe, fe_status_t *status)
FE_HAS_VITERBI;
/* full lock */
- ret = af9033_rd_reg_mask(state, 0x80f999, &tmp, 0x01);
+ ret = af9033_rd_reg_mask(dev, 0x80f999, &tmp, 0x01);
if (ret < 0)
goto err;
@@ -827,76 +835,38 @@ static int af9033_read_status(struct dvb_frontend *fe, fe_status_t *status)
FE_HAS_LOCK;
}
+ dev->fe_status = *status;
+
return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
return ret;
}
static int af9033_read_snr(struct dvb_frontend *fe, u16 *snr)
{
- struct af9033_state *state = fe->demodulator_priv;
- int ret, i, len;
- u8 buf[3], tmp;
- u32 snr_val;
- const struct val_snr *uninitialized_var(snr_lut);
-
- /* read value */
- ret = af9033_rd_regs(state, 0x80002c, buf, 3);
- if (ret < 0)
- goto err;
+ struct af9033_dev *dev = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
- snr_val = (buf[2] << 16) | (buf[1] << 8) | buf[0];
-
- /* read current modulation */
- ret = af9033_rd_reg(state, 0x80f903, &tmp);
- if (ret < 0)
- goto err;
-
- switch ((tmp >> 0) & 3) {
- case 0:
- len = ARRAY_SIZE(qpsk_snr_lut);
- snr_lut = qpsk_snr_lut;
- break;
- case 1:
- len = ARRAY_SIZE(qam16_snr_lut);
- snr_lut = qam16_snr_lut;
- break;
- case 2:
- len = ARRAY_SIZE(qam64_snr_lut);
- snr_lut = qam64_snr_lut;
- break;
- default:
- goto err;
- }
-
- for (i = 0; i < len; i++) {
- tmp = snr_lut[i].snr;
-
- if (snr_val < snr_lut[i].val)
- break;
- }
-
- *snr = tmp * 10; /* dB/10 */
+ /* use DVBv5 CNR */
+ if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL)
+ *snr = div_s64(c->cnr.stat[0].svalue, 100); /* 1000x => 10x */
+ else
+ *snr = 0;
return 0;
-
-err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
-
- return ret;
}
static int af9033_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
- struct af9033_state *state = fe->demodulator_priv;
+ struct af9033_dev *dev = fe->demodulator_priv;
int ret;
u8 strength2;
/* read signal strength of 0-100 scale */
- ret = af9033_rd_reg(state, 0x800048, &strength2);
+ ret = af9033_rd_reg(dev, 0x800048, &strength2);
if (ret < 0)
goto err;
@@ -906,244 +876,225 @@ static int af9033_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
-
- return ret;
-}
-
-static int af9033_update_ch_stat(struct af9033_state *state)
-{
- int ret = 0;
- u32 err_cnt, bit_cnt;
- u16 abort_cnt;
- u8 buf[7];
-
- /* only update data every half second */
- if (time_after(jiffies, state->last_stat_check + msecs_to_jiffies(500))) {
- ret = af9033_rd_regs(state, 0x800032, buf, sizeof(buf));
- if (ret < 0)
- goto err;
- /* in 8 byte packets? */
- abort_cnt = (buf[1] << 8) + buf[0];
- /* in bits */
- err_cnt = (buf[4] << 16) + (buf[3] << 8) + buf[2];
- /* in 8 byte packets? always(?) 0x2710 = 10000 */
- bit_cnt = (buf[6] << 8) + buf[5];
-
- if (bit_cnt < abort_cnt) {
- abort_cnt = 1000;
- state->ber = 0xffffffff;
- } else {
- /* 8 byte packets, that have not been rejected already */
- bit_cnt -= (u32)abort_cnt;
- if (bit_cnt == 0) {
- state->ber = 0xffffffff;
- } else {
- err_cnt -= (u32)abort_cnt * 8 * 8;
- bit_cnt *= 8 * 8;
- state->ber = err_cnt * (0xffffffff / bit_cnt);
- }
- }
- state->ucb += abort_cnt;
- state->last_stat_check = jiffies;
- }
-
- return 0;
-err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
return ret;
}
static int af9033_read_ber(struct dvb_frontend *fe, u32 *ber)
{
- struct af9033_state *state = fe->demodulator_priv;
- int ret;
-
- ret = af9033_update_ch_stat(state);
- if (ret < 0)
- return ret;
+ struct af9033_dev *dev = fe->demodulator_priv;
- *ber = state->ber;
+ *ber = (dev->post_bit_error - dev->post_bit_error_prev);
+ dev->post_bit_error_prev = dev->post_bit_error;
return 0;
}
static int af9033_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
- struct af9033_state *state = fe->demodulator_priv;
- int ret;
-
- ret = af9033_update_ch_stat(state);
- if (ret < 0)
- return ret;
-
- *ucblocks = state->ucb;
+ struct af9033_dev *dev = fe->demodulator_priv;
+ *ucblocks = dev->error_block_count;
return 0;
}
static int af9033_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
- struct af9033_state *state = fe->demodulator_priv;
+ struct af9033_dev *dev = fe->demodulator_priv;
int ret;
- dev_dbg(&state->i2c->dev, "%s: enable=%d\n", __func__, enable);
+ dev_dbg(&dev->client->dev, "enable=%d\n", enable);
- ret = af9033_wr_reg_mask(state, 0x00fa04, enable, 0x01);
+ ret = af9033_wr_reg_mask(dev, 0x00fa04, enable, 0x01);
if (ret < 0)
goto err;
return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
return ret;
}
static int af9033_pid_filter_ctrl(struct dvb_frontend *fe, int onoff)
{
- struct af9033_state *state = fe->demodulator_priv;
+ struct af9033_dev *dev = fe->demodulator_priv;
int ret;
- dev_dbg(&state->i2c->dev, "%s: onoff=%d\n", __func__, onoff);
+ dev_dbg(&dev->client->dev, "onoff=%d\n", onoff);
- ret = af9033_wr_reg_mask(state, 0x80f993, onoff, 0x01);
+ ret = af9033_wr_reg_mask(dev, 0x80f993, onoff, 0x01);
if (ret < 0)
goto err;
return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
return ret;
}
-static int af9033_pid_filter(struct dvb_frontend *fe, int index, u16 pid, int onoff)
+static int af9033_pid_filter(struct dvb_frontend *fe, int index, u16 pid,
+ int onoff)
{
- struct af9033_state *state = fe->demodulator_priv;
+ struct af9033_dev *dev = fe->demodulator_priv;
int ret;
u8 wbuf[2] = {(pid >> 0) & 0xff, (pid >> 8) & 0xff};
- dev_dbg(&state->i2c->dev, "%s: index=%d pid=%04x onoff=%d\n",
- __func__, index, pid, onoff);
+ dev_dbg(&dev->client->dev, "index=%d pid=%04x onoff=%d\n",
+ index, pid, onoff);
if (pid > 0x1fff)
return 0;
- ret = af9033_wr_regs(state, 0x80f996, wbuf, 2);
+ ret = af9033_wr_regs(dev, 0x80f996, wbuf, 2);
if (ret < 0)
goto err;
- ret = af9033_wr_reg(state, 0x80f994, onoff);
+ ret = af9033_wr_reg(dev, 0x80f994, onoff);
if (ret < 0)
goto err;
- ret = af9033_wr_reg(state, 0x80f995, index);
+ ret = af9033_wr_reg(dev, 0x80f995, index);
if (ret < 0)
goto err;
return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
return ret;
}
-static struct dvb_frontend_ops af9033_ops;
-
-struct dvb_frontend *af9033_attach(const struct af9033_config *config,
- struct i2c_adapter *i2c,
- struct af9033_ops *ops)
+static void af9033_stat_work(struct work_struct *work)
{
- int ret;
- struct af9033_state *state;
- u8 buf[8];
+ struct af9033_dev *dev = container_of(work, struct af9033_dev, stat_work.work);
+ struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
+ int ret, tmp, i, len;
+ u8 u8tmp, buf[7];
+
+ dev_dbg(&dev->client->dev, "\n");
+
+ /* signal strength */
+ if (dev->fe_status & FE_HAS_SIGNAL) {
+ if (dev->is_af9035) {
+ ret = af9033_rd_reg(dev, 0x80004a, &u8tmp);
+ tmp = -u8tmp * 1000;
+ } else {
+ ret = af9033_rd_reg(dev, 0x8000f7, &u8tmp);
+ tmp = (u8tmp - 100) * 1000;
+ }
+ if (ret)
+ goto err;
- dev_dbg(&i2c->dev, "%s:\n", __func__);
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ c->strength.stat[0].svalue = tmp;
+ } else {
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
- /* allocate memory for the internal state */
- state = kzalloc(sizeof(struct af9033_state), GFP_KERNEL);
- if (state == NULL)
- goto err;
+ /* CNR */
+ if (dev->fe_status & FE_HAS_VITERBI) {
+ u32 snr_val;
+ const struct val_snr *snr_lut;
- /* setup the state */
- state->i2c = i2c;
- memcpy(&state->cfg, config, sizeof(struct af9033_config));
+ /* read value */
+ ret = af9033_rd_regs(dev, 0x80002c, buf, 3);
+ if (ret)
+ goto err;
- if (state->cfg.clock != 12000000) {
- dev_err(&state->i2c->dev, "%s: af9033: unsupported clock=%d, " \
- "only 12000000 Hz is supported currently\n",
- KBUILD_MODNAME, state->cfg.clock);
- goto err;
- }
+ snr_val = (buf[2] << 16) | (buf[1] << 8) | (buf[0] << 0);
- /* firmware version */
- ret = af9033_rd_regs(state, 0x0083e9, &buf[0], 4);
- if (ret < 0)
- goto err;
+ /* read current modulation */
+ ret = af9033_rd_reg(dev, 0x80f903, &u8tmp);
+ if (ret)
+ goto err;
- ret = af9033_rd_regs(state, 0x804191, &buf[4], 4);
- if (ret < 0)
- goto err;
+ switch ((u8tmp >> 0) & 3) {
+ case 0:
+ len = ARRAY_SIZE(qpsk_snr_lut);
+ snr_lut = qpsk_snr_lut;
+ break;
+ case 1:
+ len = ARRAY_SIZE(qam16_snr_lut);
+ snr_lut = qam16_snr_lut;
+ break;
+ case 2:
+ len = ARRAY_SIZE(qam64_snr_lut);
+ snr_lut = qam64_snr_lut;
+ break;
+ default:
+ goto err_schedule_delayed_work;
+ }
- dev_info(&state->i2c->dev, "%s: firmware version: LINK=%d.%d.%d.%d " \
- "OFDM=%d.%d.%d.%d\n", KBUILD_MODNAME, buf[0], buf[1],
- buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
+ for (i = 0; i < len; i++) {
+ tmp = snr_lut[i].snr * 1000;
+ if (snr_val < snr_lut[i].val)
+ break;
+ }
- /* sleep */
- switch (state->cfg.tuner) {
- case AF9033_TUNER_IT9135_38:
- case AF9033_TUNER_IT9135_51:
- case AF9033_TUNER_IT9135_52:
- case AF9033_TUNER_IT9135_60:
- case AF9033_TUNER_IT9135_61:
- case AF9033_TUNER_IT9135_62:
- /* IT9135 did not like to sleep at that early */
- break;
- default:
- ret = af9033_wr_reg(state, 0x80004c, 1);
- if (ret < 0)
- goto err;
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].svalue = tmp;
+ } else {
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
- ret = af9033_wr_reg(state, 0x800000, 0);
- if (ret < 0)
+ /* UCB/PER/BER */
+ if (dev->fe_status & FE_HAS_LOCK) {
+ /* outer FEC, 204 byte packets */
+ u16 abort_packet_count, rsd_packet_count;
+ /* inner FEC, bits */
+ u32 rsd_bit_err_count;
+
+ /*
+ * Packet count used for measurement is 10000
+ * (rsd_packet_count). Maybe it should be increased?
+ */
+
+ ret = af9033_rd_regs(dev, 0x800032, buf, 7);
+ if (ret)
goto err;
- }
- /* configure internal TS mode */
- switch (state->cfg.ts_mode) {
- case AF9033_TS_MODE_PARALLEL:
- state->ts_mode_parallel = true;
- break;
- case AF9033_TS_MODE_SERIAL:
- state->ts_mode_serial = true;
- break;
- case AF9033_TS_MODE_USB:
- /* usb mode for AF9035 */
- default:
- break;
- }
+ abort_packet_count = (buf[1] << 8) | (buf[0] << 0);
+ rsd_bit_err_count = (buf[4] << 16) | (buf[3] << 8) | buf[2];
+ rsd_packet_count = (buf[6] << 8) | (buf[5] << 0);
- /* create dvb_frontend */
- memcpy(&state->fe.ops, &af9033_ops, sizeof(struct dvb_frontend_ops));
- state->fe.demodulator_priv = state;
+ dev->error_block_count += abort_packet_count;
+ dev->total_block_count += rsd_packet_count;
+ dev->post_bit_error += rsd_bit_err_count;
+ dev->post_bit_count += rsd_packet_count * 204 * 8;
- if (ops) {
- ops->pid_filter = af9033_pid_filter;
- ops->pid_filter_ctrl = af9033_pid_filter_ctrl;
- }
+ c->block_count.len = 1;
+ c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_count.stat[0].uvalue = dev->total_block_count;
+
+ c->block_error.len = 1;
+ c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[0].uvalue = dev->error_block_count;
+
+ c->post_bit_count.len = 1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue = dev->post_bit_count;
- return &state->fe;
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
+ }
+err_schedule_delayed_work:
+ schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000));
+ return;
err:
- kfree(state);
- return NULL;
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
}
-EXPORT_SYMBOL(af9033_attach);
static struct dvb_frontend_ops af9033_ops = {
.delsys = { SYS_DVBT },
@@ -1170,8 +1121,6 @@ static struct dvb_frontend_ops af9033_ops = {
FE_CAN_MUTE_TS
},
- .release = af9033_release,
-
.init = af9033_init,
.sleep = af9033_sleep,
@@ -1188,6 +1137,150 @@ static struct dvb_frontend_ops af9033_ops = {
.i2c_gate_ctrl = af9033_i2c_gate_ctrl,
};
+static int af9033_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct af9033_config *cfg = client->dev.platform_data;
+ struct af9033_dev *dev;
+ int ret;
+ u8 buf[8];
+ u32 reg;
+
+ /* allocate memory for the internal state */
+ dev = kzalloc(sizeof(struct af9033_dev), GFP_KERNEL);
+ if (dev == NULL) {
+ ret = -ENOMEM;
+ dev_err(&client->dev, "Could not allocate memory for state\n");
+ goto err;
+ }
+
+ /* setup the state */
+ dev->client = client;
+ INIT_DELAYED_WORK(&dev->stat_work, af9033_stat_work);
+ memcpy(&dev->cfg, cfg, sizeof(struct af9033_config));
+
+ if (dev->cfg.clock != 12000000) {
+ ret = -ENODEV;
+ dev_err(&dev->client->dev,
+ "unsupported clock %d Hz, only 12000000 Hz is supported currently\n",
+ dev->cfg.clock);
+ goto err_kfree;
+ }
+
+ /* firmware version */
+ switch (dev->cfg.tuner) {
+ case AF9033_TUNER_IT9135_38:
+ case AF9033_TUNER_IT9135_51:
+ case AF9033_TUNER_IT9135_52:
+ case AF9033_TUNER_IT9135_60:
+ case AF9033_TUNER_IT9135_61:
+ case AF9033_TUNER_IT9135_62:
+ dev->is_it9135 = true;
+ reg = 0x004bfc;
+ break;
+ default:
+ dev->is_af9035 = true;
+ reg = 0x0083e9;
+ break;
+ }
+
+ ret = af9033_rd_regs(dev, reg, &buf[0], 4);
+ if (ret < 0)
+ goto err_kfree;
+
+ ret = af9033_rd_regs(dev, 0x804191, &buf[4], 4);
+ if (ret < 0)
+ goto err_kfree;
+
+ dev_info(&dev->client->dev,
+ "firmware version: LINK %d.%d.%d.%d - OFDM %d.%d.%d.%d\n",
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+ buf[7]);
+
+ /* sleep */
+ switch (dev->cfg.tuner) {
+ case AF9033_TUNER_IT9135_38:
+ case AF9033_TUNER_IT9135_51:
+ case AF9033_TUNER_IT9135_52:
+ case AF9033_TUNER_IT9135_60:
+ case AF9033_TUNER_IT9135_61:
+ case AF9033_TUNER_IT9135_62:
+ /* IT9135 did not like to sleep at that early */
+ break;
+ default:
+ ret = af9033_wr_reg(dev, 0x80004c, 1);
+ if (ret < 0)
+ goto err_kfree;
+
+ ret = af9033_wr_reg(dev, 0x800000, 0);
+ if (ret < 0)
+ goto err_kfree;
+ }
+
+ /* configure internal TS mode */
+ switch (dev->cfg.ts_mode) {
+ case AF9033_TS_MODE_PARALLEL:
+ dev->ts_mode_parallel = true;
+ break;
+ case AF9033_TS_MODE_SERIAL:
+ dev->ts_mode_serial = true;
+ break;
+ case AF9033_TS_MODE_USB:
+ /* usb mode for AF9035 */
+ default:
+ break;
+ }
+
+ /* create dvb_frontend */
+ memcpy(&dev->fe.ops, &af9033_ops, sizeof(struct dvb_frontend_ops));
+ dev->fe.demodulator_priv = dev;
+ *cfg->fe = &dev->fe;
+ if (cfg->ops) {
+ cfg->ops->pid_filter = af9033_pid_filter;
+ cfg->ops->pid_filter_ctrl = af9033_pid_filter_ctrl;
+ }
+ i2c_set_clientdata(client, dev);
+
+ dev_info(&dev->client->dev, "Afatech AF9033 successfully attached\n");
+ return 0;
+err_kfree:
+ kfree(dev);
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int af9033_remove(struct i2c_client *client)
+{
+ struct af9033_dev *dev = i2c_get_clientdata(client);
+
+ dev_dbg(&dev->client->dev, "\n");
+
+ dev->fe.ops.release = NULL;
+ dev->fe.demodulator_priv = NULL;
+ kfree(dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id af9033_id_table[] = {
+ {"af9033", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, af9033_id_table);
+
+static struct i2c_driver af9033_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "af9033",
+ },
+ .probe = af9033_probe,
+ .remove = af9033_remove,
+ .id_table = af9033_id_table,
+};
+
+module_i2c_driver(af9033_driver);
+
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Afatech AF9033 DVB-T demodulator driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
index 539f4db678b8..6ad22b69a636 100644
--- a/drivers/media/dvb-frontends/af9033.h
+++ b/drivers/media/dvb-frontends/af9033.h
@@ -24,13 +24,12 @@
#include <linux/kconfig.h>
+/*
+ * I2C address (TODO: are these in 8-bit format?)
+ * 0x38, 0x3a, 0x3c, 0x3e
+ */
struct af9033_config {
/*
- * I2C address
- */
- u8 i2c_addr;
-
- /*
* clock Hz
* 12000000, 22000000, 24000000, 34000000, 32000000, 28000000, 26000000,
* 30000000, 36000000, 20480000, 16384000
@@ -75,8 +74,23 @@ struct af9033_config {
* input spectrum inversion
*/
bool spec_inv;
-};
+ /*
+ *
+ */
+ bool dyn0_clk;
+
+ /*
+ * PID filter ops
+ */
+ struct af9033_ops *ops;
+
+ /*
+ * frontend
+ * returned by that driver
+ */
+ struct dvb_frontend **fe;
+};
struct af9033_ops {
int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
@@ -84,36 +98,4 @@ struct af9033_ops {
int onoff);
};
-
-#if IS_ENABLED(CONFIG_DVB_AF9033)
-extern
-struct dvb_frontend *af9033_attach(const struct af9033_config *config,
- struct i2c_adapter *i2c,
- struct af9033_ops *ops);
-
-#else
-static inline
-struct dvb_frontend *af9033_attach(const struct af9033_config *config,
- struct i2c_adapter *i2c,
- struct af9033_ops *ops)
-{
- pr_warn("%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
-
-static inline int af9033_pid_filter_ctrl(struct dvb_frontend *fe, int onoff)
-{
- pr_warn("%s: driver disabled by Kconfig\n", __func__);
- return -ENODEV;
-}
-
-static inline int af9033_pid_filter(struct dvb_frontend *fe, int index, u16 pid,
- int onoff)
-{
- pr_warn("%s: driver disabled by Kconfig\n", __func__);
- return -ENODEV;
-}
-
-#endif
-
#endif /* AF9033_H */
diff --git a/drivers/media/dvb-frontends/af9033_priv.h b/drivers/media/dvb-frontends/af9033_priv.h
index ded7b67d7526..c12c92cb5855 100644
--- a/drivers/media/dvb-frontends/af9033_priv.h
+++ b/drivers/media/dvb-frontends/af9033_priv.h
@@ -24,6 +24,7 @@
#include "dvb_frontend.h"
#include "af9033.h"
+#include <linux/math64.h>
struct reg_val {
u32 reg;
diff --git a/drivers/media/dvb-frontends/as102_fe.c b/drivers/media/dvb-frontends/as102_fe.c
new file mode 100644
index 000000000000..493665899565
--- /dev/null
+++ b/drivers/media/dvb-frontends/as102_fe.c
@@ -0,0 +1,480 @@
+/*
+ * Abilis Systems Single DVB-T Receiver
+ * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
+ * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dvb_frontend.h>
+
+#include "as102_fe.h"
+
+struct as102_state {
+ struct dvb_frontend frontend;
+ struct as10x_demod_stats demod_stats;
+
+ const struct as102_fe_ops *ops;
+ void *priv;
+ uint8_t elna_cfg;
+
+ /* signal strength */
+ uint16_t signal_strength;
+ /* bit error rate */
+ uint32_t ber;
+};
+
+static uint8_t as102_fe_get_code_rate(fe_code_rate_t arg)
+{
+ uint8_t c;
+
+ switch (arg) {
+ case FEC_1_2:
+ c = CODE_RATE_1_2;
+ break;
+ case FEC_2_3:
+ c = CODE_RATE_2_3;
+ break;
+ case FEC_3_4:
+ c = CODE_RATE_3_4;
+ break;
+ case FEC_5_6:
+ c = CODE_RATE_5_6;
+ break;
+ case FEC_7_8:
+ c = CODE_RATE_7_8;
+ break;
+ default:
+ c = CODE_RATE_UNKNOWN;
+ break;
+ }
+
+ return c;
+}
+
+static int as102_fe_set_frontend(struct dvb_frontend *fe)
+{
+ struct as102_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct as10x_tune_args tune_args = { 0 };
+
+ /* set frequency */
+ tune_args.freq = c->frequency / 1000;
+
+ /* fix interleaving_mode */
+ tune_args.interleaving_mode = INTLV_NATIVE;
+
+ switch (c->bandwidth_hz) {
+ case 8000000:
+ tune_args.bandwidth = BW_8_MHZ;
+ break;
+ case 7000000:
+ tune_args.bandwidth = BW_7_MHZ;
+ break;
+ case 6000000:
+ tune_args.bandwidth = BW_6_MHZ;
+ break;
+ default:
+ tune_args.bandwidth = BW_8_MHZ;
+ }
+
+ switch (c->guard_interval) {
+ case GUARD_INTERVAL_1_32:
+ tune_args.guard_interval = GUARD_INT_1_32;
+ break;
+ case GUARD_INTERVAL_1_16:
+ tune_args.guard_interval = GUARD_INT_1_16;
+ break;
+ case GUARD_INTERVAL_1_8:
+ tune_args.guard_interval = GUARD_INT_1_8;
+ break;
+ case GUARD_INTERVAL_1_4:
+ tune_args.guard_interval = GUARD_INT_1_4;
+ break;
+ case GUARD_INTERVAL_AUTO:
+ default:
+ tune_args.guard_interval = GUARD_UNKNOWN;
+ break;
+ }
+
+ switch (c->modulation) {
+ case QPSK:
+ tune_args.modulation = CONST_QPSK;
+ break;
+ case QAM_16:
+ tune_args.modulation = CONST_QAM16;
+ break;
+ case QAM_64:
+ tune_args.modulation = CONST_QAM64;
+ break;
+ default:
+ tune_args.modulation = CONST_UNKNOWN;
+ break;
+ }
+
+ switch (c->transmission_mode) {
+ case TRANSMISSION_MODE_2K:
+ tune_args.transmission_mode = TRANS_MODE_2K;
+ break;
+ case TRANSMISSION_MODE_8K:
+ tune_args.transmission_mode = TRANS_MODE_8K;
+ break;
+ default:
+ tune_args.transmission_mode = TRANS_MODE_UNKNOWN;
+ }
+
+ switch (c->hierarchy) {
+ case HIERARCHY_NONE:
+ tune_args.hierarchy = HIER_NONE;
+ break;
+ case HIERARCHY_1:
+ tune_args.hierarchy = HIER_ALPHA_1;
+ break;
+ case HIERARCHY_2:
+ tune_args.hierarchy = HIER_ALPHA_2;
+ break;
+ case HIERARCHY_4:
+ tune_args.hierarchy = HIER_ALPHA_4;
+ break;
+ case HIERARCHY_AUTO:
+ tune_args.hierarchy = HIER_UNKNOWN;
+ break;
+ }
+
+ pr_debug("as102: tuner parameters: freq: %d bw: 0x%02x gi: 0x%02x\n",
+ c->frequency,
+ tune_args.bandwidth,
+ tune_args.guard_interval);
+
+ /*
+ * Detect a hierarchy selection
+ * if HP/LP are both set to FEC_NONE, HP will be selected.
+ */
+ if ((tune_args.hierarchy != HIER_NONE) &&
+ ((c->code_rate_LP == FEC_NONE) ||
+ (c->code_rate_HP == FEC_NONE))) {
+
+ if (c->code_rate_LP == FEC_NONE) {
+ tune_args.hier_select = HIER_HIGH_PRIORITY;
+ tune_args.code_rate =
+ as102_fe_get_code_rate(c->code_rate_HP);
+ }
+
+ if (c->code_rate_HP == FEC_NONE) {
+ tune_args.hier_select = HIER_LOW_PRIORITY;
+ tune_args.code_rate =
+ as102_fe_get_code_rate(c->code_rate_LP);
+ }
+
+ pr_debug("as102: \thierarchy: 0x%02x selected: %s code_rate_%s: 0x%02x\n",
+ tune_args.hierarchy,
+ tune_args.hier_select == HIER_HIGH_PRIORITY ?
+ "HP" : "LP",
+ tune_args.hier_select == HIER_HIGH_PRIORITY ?
+ "HP" : "LP",
+ tune_args.code_rate);
+ } else {
+ tune_args.code_rate =
+ as102_fe_get_code_rate(c->code_rate_HP);
+ }
+
+ /* Set frontend arguments */
+ return state->ops->set_tune(state->priv, &tune_args);
+}
+
+static int as102_fe_get_frontend(struct dvb_frontend *fe)
+{
+ struct as102_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret = 0;
+ struct as10x_tps tps = { 0 };
+
+ /* send abilis command: GET_TPS */
+ ret = state->ops->get_tps(state->priv, &tps);
+ if (ret < 0)
+ return ret;
+
+ /* extract constellation */
+ switch (tps.modulation) {
+ case CONST_QPSK:
+ c->modulation = QPSK;
+ break;
+ case CONST_QAM16:
+ c->modulation = QAM_16;
+ break;
+ case CONST_QAM64:
+ c->modulation = QAM_64;
+ break;
+ }
+
+ /* extract hierarchy */
+ switch (tps.hierarchy) {
+ case HIER_NONE:
+ c->hierarchy = HIERARCHY_NONE;
+ break;
+ case HIER_ALPHA_1:
+ c->hierarchy = HIERARCHY_1;
+ break;
+ case HIER_ALPHA_2:
+ c->hierarchy = HIERARCHY_2;
+ break;
+ case HIER_ALPHA_4:
+ c->hierarchy = HIERARCHY_4;
+ break;
+ }
+
+ /* extract code rate HP */
+ switch (tps.code_rate_HP) {
+ case CODE_RATE_1_2:
+ c->code_rate_HP = FEC_1_2;
+ break;
+ case CODE_RATE_2_3:
+ c->code_rate_HP = FEC_2_3;
+ break;
+ case CODE_RATE_3_4:
+ c->code_rate_HP = FEC_3_4;
+ break;
+ case CODE_RATE_5_6:
+ c->code_rate_HP = FEC_5_6;
+ break;
+ case CODE_RATE_7_8:
+ c->code_rate_HP = FEC_7_8;
+ break;
+ }
+
+ /* extract code rate LP */
+ switch (tps.code_rate_LP) {
+ case CODE_RATE_1_2:
+ c->code_rate_LP = FEC_1_2;
+ break;
+ case CODE_RATE_2_3:
+ c->code_rate_LP = FEC_2_3;
+ break;
+ case CODE_RATE_3_4:
+ c->code_rate_LP = FEC_3_4;
+ break;
+ case CODE_RATE_5_6:
+ c->code_rate_LP = FEC_5_6;
+ break;
+ case CODE_RATE_7_8:
+ c->code_rate_LP = FEC_7_8;
+ break;
+ }
+
+ /* extract guard interval */
+ switch (tps.guard_interval) {
+ case GUARD_INT_1_32:
+ c->guard_interval = GUARD_INTERVAL_1_32;
+ break;
+ case GUARD_INT_1_16:
+ c->guard_interval = GUARD_INTERVAL_1_16;
+ break;
+ case GUARD_INT_1_8:
+ c->guard_interval = GUARD_INTERVAL_1_8;
+ break;
+ case GUARD_INT_1_4:
+ c->guard_interval = GUARD_INTERVAL_1_4;
+ break;
+ }
+
+ /* extract transmission mode */
+ switch (tps.transmission_mode) {
+ case TRANS_MODE_2K:
+ c->transmission_mode = TRANSMISSION_MODE_2K;
+ break;
+ case TRANS_MODE_8K:
+ c->transmission_mode = TRANSMISSION_MODE_8K;
+ break;
+ }
+
+ return 0;
+}
+
+static int as102_fe_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *settings) {
+
+ settings->min_delay_ms = 1000;
+
+ return 0;
+}
+
+static int as102_fe_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ int ret = 0;
+ struct as102_state *state = fe->demodulator_priv;
+ struct as10x_tune_status tstate = { 0 };
+
+ /* send abilis command: GET_TUNE_STATUS */
+ ret = state->ops->get_status(state->priv, &tstate);
+ if (ret < 0)
+ return ret;
+
+ state->signal_strength = tstate.signal_strength;
+ state->ber = tstate.BER;
+
+ switch (tstate.tune_state) {
+ case TUNE_STATUS_SIGNAL_DVB_OK:
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
+ break;
+ case TUNE_STATUS_STREAM_DETECTED:
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_SYNC |
+ FE_HAS_VITERBI;
+ break;
+ case TUNE_STATUS_STREAM_TUNED:
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_SYNC |
+ FE_HAS_LOCK | FE_HAS_VITERBI;
+ break;
+ default:
+ *status = TUNE_STATUS_NOT_TUNED;
+ }
+
+ pr_debug("as102: tuner status: 0x%02x, strength %d, per: %d, ber: %d\n",
+ tstate.tune_state, tstate.signal_strength,
+ tstate.PER, tstate.BER);
+
+ if (!(*status & FE_HAS_LOCK)) {
+ memset(&state->demod_stats, 0, sizeof(state->demod_stats));
+ return 0;
+ }
+
+ ret = state->ops->get_stats(state->priv, &state->demod_stats);
+ if (ret < 0)
+ memset(&state->demod_stats, 0, sizeof(state->demod_stats));
+
+ return ret;
+}
+
+/*
+ * Note:
+ * - in AS102 SNR=MER
+ * - the SNR will be returned in linear terms, i.e. not in dB
+ * - the accuracy equals ±2dB for a SNR range from 4dB to 30dB
+ * - the accuracy is >2dB for SNR values outside this range
+ */
+static int as102_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct as102_state *state = fe->demodulator_priv;
+
+ *snr = state->demod_stats.mer;
+
+ return 0;
+}
+
+static int as102_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct as102_state *state = fe->demodulator_priv;
+
+ *ber = state->ber;
+
+ return 0;
+}
+
+static int as102_fe_read_signal_strength(struct dvb_frontend *fe,
+ u16 *strength)
+{
+ struct as102_state *state = fe->demodulator_priv;
+
+ *strength = (((0xffff * 400) * state->signal_strength + 41000) * 2);
+
+ return 0;
+}
+
+static int as102_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ struct as102_state *state = fe->demodulator_priv;
+
+ if (state->demod_stats.has_started)
+ *ucblocks = state->demod_stats.bad_frame_count;
+ else
+ *ucblocks = 0;
+
+ return 0;
+}
+
+static int as102_fe_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
+{
+ struct as102_state *state = fe->demodulator_priv;
+
+ return state->ops->stream_ctrl(state->priv, acquire,
+ state->elna_cfg);
+}
+
+static void as102_fe_release(struct dvb_frontend *fe)
+{
+ struct as102_state *state = fe->demodulator_priv;
+
+ kfree(state);
+}
+
+
+static struct dvb_frontend_ops as102_fe_ops = {
+ .delsys = { SYS_DVBT },
+ .info = {
+ .name = "Abilis AS102 DVB-T",
+ .frequency_min = 174000000,
+ .frequency_max = 862000000,
+ .frequency_stepsize = 166667,
+ .caps = FE_CAN_INVERSION_AUTO
+ | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4
+ | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO
+ | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QPSK
+ | FE_CAN_QAM_AUTO
+ | FE_CAN_TRANSMISSION_MODE_AUTO
+ | FE_CAN_GUARD_INTERVAL_AUTO
+ | FE_CAN_HIERARCHY_AUTO
+ | FE_CAN_RECOVER
+ | FE_CAN_MUTE_TS
+ },
+
+ .set_frontend = as102_fe_set_frontend,
+ .get_frontend = as102_fe_get_frontend,
+ .get_tune_settings = as102_fe_get_tune_settings,
+
+ .read_status = as102_fe_read_status,
+ .read_snr = as102_fe_read_snr,
+ .read_ber = as102_fe_read_ber,
+ .read_signal_strength = as102_fe_read_signal_strength,
+ .read_ucblocks = as102_fe_read_ucblocks,
+ .ts_bus_ctrl = as102_fe_ts_bus_ctrl,
+ .release = as102_fe_release,
+};
+
+struct dvb_frontend *as102_attach(const char *name,
+ const struct as102_fe_ops *ops,
+ void *priv,
+ uint8_t elna_cfg)
+{
+ struct as102_state *state;
+ struct dvb_frontend *fe;
+
+ state = kzalloc(sizeof(struct as102_state), GFP_KERNEL);
+ if (state == NULL) {
+ pr_err("%s: unable to allocate memory for state\n", __func__);
+ return NULL;
+ }
+ fe = &state->frontend;
+ fe->demodulator_priv = state;
+ state->ops = ops;
+ state->priv = priv;
+ state->elna_cfg = elna_cfg;
+
+ /* init frontend callback ops */
+ memcpy(&fe->ops, &as102_fe_ops, sizeof(struct dvb_frontend_ops));
+ strncpy(fe->ops.info.name, name, sizeof(fe->ops.info.name));
+
+ return fe;
+
+}
+EXPORT_SYMBOL_GPL(as102_attach);
+
+MODULE_DESCRIPTION("as102-fe");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pierrick Hascoet <pierrick.hascoet@abilis.com>");
diff --git a/drivers/media/dvb-frontends/as102_fe.h b/drivers/media/dvb-frontends/as102_fe.h
new file mode 100644
index 000000000000..a7c91430ca3d
--- /dev/null
+++ b/drivers/media/dvb-frontends/as102_fe.h
@@ -0,0 +1,29 @@
+/*
+ * Abilis Systems Single DVB-T Receiver
+ * Copyright (C) 2014 Mauro Carvalho Chehab <m.chehab@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "as102_fe_types.h"
+
+struct as102_fe_ops {
+ int (*set_tune)(void *priv, struct as10x_tune_args *tune_args);
+ int (*get_tps)(void *priv, struct as10x_tps *tps);
+ int (*get_status)(void *priv, struct as10x_tune_status *tstate);
+ int (*get_stats)(void *priv, struct as10x_demod_stats *demod_stats);
+ int (*stream_ctrl)(void *priv, int acquire, uint32_t elna_cfg);
+};
+
+struct dvb_frontend *as102_attach(const char *name,
+ const struct as102_fe_ops *ops,
+ void *priv,
+ uint8_t elna_cfg);
diff --git a/drivers/staging/media/as102/as10x_types.h b/drivers/media/dvb-frontends/as102_fe_types.h
index af26e057d9a2..80a5398b580f 100644
--- a/drivers/staging/media/as102/as10x_types.h
+++ b/drivers/media/dvb-frontends/as102_fe_types.h
@@ -11,16 +11,10 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _AS10X_TYPES_H_
#define _AS10X_TYPES_H_
-#include "as10x_handle.h"
-
/*********************************/
/* MACRO DEFINITIONS */
/*********************************/
diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
index 39a29dd29519..638c7aa0fb7e 100644
--- a/drivers/media/dvb-frontends/bcm3510.c
+++ b/drivers/media/dvb-frontends/bcm3510.c
@@ -639,12 +639,12 @@ static int bcm3510_download_firmware(struct dvb_frontend* fe)
err("could not load firmware (%s): %d",BCM3510_DEFAULT_FIRMWARE,ret);
return ret;
}
- deb_info("got firmware: %zd\n",fw->size);
+ deb_info("got firmware: %zu\n", fw->size);
b = fw->data;
for (i = 0; i < fw->size;) {
- addr = le16_to_cpu( *( (u16 *)&b[i] ) );
- len = le16_to_cpu( *( (u16 *)&b[i+2] ) );
+ addr = le16_to_cpu(*((__le16 *)&b[i]));
+ len = le16_to_cpu(*((__le16 *)&b[i+2]));
deb_info("firmware chunk, addr: 0x%04x, len: 0x%04x, total length: 0x%04zx\n",addr,len,fw->size);
if ((ret = bcm3510_write_ram(st,addr,&b[i+4],len)) < 0) {
err("firmware download failed: %d\n",ret);
diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c
index 0f4657e01cde..149fdca3fb44 100644
--- a/drivers/media/dvb-frontends/cxd2820r_c.c
+++ b/drivers/media/dvb-frontends/cxd2820r_c.c
@@ -65,7 +65,7 @@ int cxd2820r_set_frontend_c(struct dvb_frontend *fe)
}
priv->delivery_system = SYS_DVBC_ANNEX_A;
- priv->ber_running = 0; /* tune stops BER counter */
+ priv->ber_running = false; /* tune stops BER counter */
/* program IF frequency */
if (fe->ops.tuner_ops.get_if_frequency) {
@@ -168,7 +168,7 @@ int cxd2820r_read_ber_c(struct dvb_frontend *fe, u32 *ber)
start_ber = 1;
}
} else {
- priv->ber_running = 1;
+ priv->ber_running = true;
start_ber = 1;
}
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index 51ef89312615..422e84bbb008 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -564,10 +564,10 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
/* check if we have a valid signal */
if (status & FE_HAS_LOCK) {
- priv->last_tune_failed = 0;
+ priv->last_tune_failed = false;
return DVBFE_ALGO_SEARCH_SUCCESS;
} else {
- priv->last_tune_failed = 1;
+ priv->last_tune_failed = true;
return DVBFE_ALGO_SEARCH_AGAIN;
}
diff --git a/drivers/media/dvb-frontends/cxd2820r_t.c b/drivers/media/dvb-frontends/cxd2820r_t.c
index 9b5a45b907bc..51401d036530 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t.c
@@ -89,7 +89,7 @@ int cxd2820r_set_frontend_t(struct dvb_frontend *fe)
}
priv->delivery_system = SYS_DVBT;
- priv->ber_running = 0; /* tune stops BER counter */
+ priv->ber_running = false; /* tune stops BER counter */
/* program IF frequency */
if (fe->ops.tuner_ops.get_if_frequency) {
@@ -272,7 +272,7 @@ int cxd2820r_read_ber_t(struct dvb_frontend *fe, u32 *ber)
start_ber = 1;
}
} else {
- priv->ber_running = 1;
+ priv->ber_running = true;
start_ber = 1;
}
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 661760d60232..589134e95175 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -2559,7 +2559,7 @@ static void dib7090_setHostBusMux(struct dib7000p_state *state, int mode)
dib7000p_write_word(state, 1288, reg_1288);
}
-int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff)
+static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff)
{
struct dib7000p_state *state = fe->demodulator_priv;
u16 reg_1287;
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 7ca7a21df183..5ec221ffdfca 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -2174,7 +2174,7 @@ int drxj_dap_atomic_read_reg32(struct i2c_device_addr *dev_addr,
u32 addr,
u32 *data, u32 flags)
{
- u8 buf[sizeof(*data)];
+ u8 buf[sizeof(*data)] = { 0 };
int rc = -EIO;
u32 word = 0;
@@ -4193,7 +4193,7 @@ int drxj_dap_scu_atomic_read_reg16(struct i2c_device_addr *dev_addr,
u32 addr,
u16 *data, u32 flags)
{
- u8 buf[2];
+ u8 buf[2] = { 0 };
int rc = -EIO;
u16 word = 0;
@@ -10667,7 +10667,7 @@ ctrl_sig_quality(struct drx_demod_instance *demod,
enum drx_standard standard = ext_attr->standard;
int rc;
u32 ber, cnt, err, pkt;
- u16 mer, strength;
+ u16 mer, strength = 0;
rc = get_sig_strength(demod, &strength);
if (rc < 0) {
@@ -11602,7 +11602,7 @@ static u16 drx_u_code_compute_crc(u8 *block_data, u16 nr_words)
u32 carry = 0;
while (i < nr_words) {
- crc_word |= (u32)be16_to_cpu(*(u32 *)(block_data));
+ crc_word |= (u32)be16_to_cpu(*(__be16 *)(block_data));
for (j = 0; j < 16; j++) {
crc_word <<= 1;
if (carry != 0)
@@ -11629,7 +11629,7 @@ static int drx_check_firmware(struct drx_demod_instance *demod, u8 *mc_data,
int i;
unsigned count = 2 * sizeof(u16);
u32 mc_dev_type, mc_version, mc_base_version;
- u16 mc_nr_of_blks = be16_to_cpu(*(u32 *)(mc_data + sizeof(u16)));
+ u16 mc_nr_of_blks = be16_to_cpu(*(__be16 *)(mc_data + sizeof(u16)));
/*
* Scan microcode blocks first for version info
@@ -11647,13 +11647,13 @@ static int drx_check_firmware(struct drx_demod_instance *demod, u8 *mc_data,
goto eof;
/* Process block header */
- block_hdr.addr = be32_to_cpu(*(u32 *)(mc_data + count));
+ block_hdr.addr = be32_to_cpu(*(__be32 *)(mc_data + count));
count += sizeof(u32);
- block_hdr.size = be16_to_cpu(*(u32 *)(mc_data + count));
+ block_hdr.size = be16_to_cpu(*(__be16 *)(mc_data + count));
count += sizeof(u16);
- block_hdr.flags = be16_to_cpu(*(u32 *)(mc_data + count));
+ block_hdr.flags = be16_to_cpu(*(__be16 *)(mc_data + count));
count += sizeof(u16);
- block_hdr.CRC = be16_to_cpu(*(u32 *)(mc_data + count));
+ block_hdr.CRC = be16_to_cpu(*(__be16 *)(mc_data + count));
count += sizeof(u16);
pr_debug("%u: addr %u, size %u, flags 0x%04x, CRC 0x%04x\n",
@@ -11667,7 +11667,7 @@ static int drx_check_firmware(struct drx_demod_instance *demod, u8 *mc_data,
if (block_hdr.addr + sizeof(u16) > size)
goto eof;
- auxtype = be16_to_cpu(*(u32 *)(auxblk));
+ auxtype = be16_to_cpu(*(__be16 *)(auxblk));
/* Aux block. Check type */
if (DRX_ISMCVERTYPE(auxtype)) {
@@ -11675,11 +11675,11 @@ static int drx_check_firmware(struct drx_demod_instance *demod, u8 *mc_data,
goto eof;
auxblk += sizeof(u16);
- mc_dev_type = be32_to_cpu(*(u32 *)(auxblk));
+ mc_dev_type = be32_to_cpu(*(__be32 *)(auxblk));
auxblk += sizeof(u32);
- mc_version = be32_to_cpu(*(u32 *)(auxblk));
+ mc_version = be32_to_cpu(*(__be32 *)(auxblk));
auxblk += sizeof(u32);
- mc_base_version = be32_to_cpu(*(u32 *)(auxblk));
+ mc_base_version = be32_to_cpu(*(__be32 *)(auxblk));
DRX_ATTR_MCRECORD(demod).aux_type = auxtype;
DRX_ATTR_MCRECORD(demod).mc_dev_type = mc_dev_type;
@@ -11765,9 +11765,9 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
mc_data = (void *)mc_data_init;
/* Check data */
- mc_magic_word = be16_to_cpu(*(u32 *)(mc_data));
+ mc_magic_word = be16_to_cpu(*(__be16 *)(mc_data));
mc_data += sizeof(u16);
- mc_nr_of_blks = be16_to_cpu(*(u32 *)(mc_data));
+ mc_nr_of_blks = be16_to_cpu(*(__be16 *)(mc_data));
mc_data += sizeof(u16);
if ((mc_magic_word != DRX_UCODE_MAGIC_WORD) || (mc_nr_of_blks == 0)) {
@@ -11791,13 +11791,13 @@ static int drx_ctrl_u_code(struct drx_demod_instance *demod,
u16 mc_block_nr_bytes = 0;
/* Process block header */
- block_hdr.addr = be32_to_cpu(*(u32 *)(mc_data));
+ block_hdr.addr = be32_to_cpu(*(__be32 *)(mc_data));
mc_data += sizeof(u32);
- block_hdr.size = be16_to_cpu(*(u32 *)(mc_data));
+ block_hdr.size = be16_to_cpu(*(__be16 *)(mc_data));
mc_data += sizeof(u16);
- block_hdr.flags = be16_to_cpu(*(u32 *)(mc_data));
+ block_hdr.flags = be16_to_cpu(*(__be16 *)(mc_data));
mc_data += sizeof(u16);
- block_hdr.CRC = be16_to_cpu(*(u32 *)(mc_data));
+ block_hdr.CRC = be16_to_cpu(*(__be16 *)(mc_data));
mc_data += sizeof(u16);
pr_debug("%u: addr %u, size %u, flags 0x%04x, CRC 0x%04x\n",
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index ae2276db77bc..687e893d29fe 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -2628,10 +2628,11 @@ static int DRXD_init(struct drxd_state *state, const u8 *fw, u32 fw_size)
break;
/* Apply I2c address patch to B1 */
- if (!state->type_A && state->m_HiI2cPatch != NULL)
+ if (!state->type_A && state->m_HiI2cPatch != NULL) {
status = WriteTable(state, state->m_HiI2cPatch);
if (status < 0)
break;
+ }
if (state->type_A) {
/* HI firmware patch for UIO readout,
@@ -2830,14 +2831,8 @@ static int drxd_read_status(struct dvb_frontend *fe, fe_status_t * status)
static int drxd_init(struct dvb_frontend *fe)
{
struct drxd_state *state = fe->demodulator_priv;
- int err = 0;
-/* if (request_firmware(&state->fw, "drxd.fw", state->dev)<0) */
return DRXD_init(state, NULL, 0);
-
- err = DRXD_init(state, state->fw->data, state->fw->size);
- release_firmware(state->fw);
- return err;
}
static int drxd_config_i2c(struct dvb_frontend *fe, int onoff)
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index cce94a75b2e1..672195147d01 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -1028,7 +1028,7 @@ static int hi_command(struct drxk_state *state, u16 cmd, u16 *p_result)
((state->m_hi_cfg_ctrl) &
SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__M) ==
SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ);
- if (powerdown_cmd == false) {
+ if (!powerdown_cmd) {
/* Wait until command rdy */
u32 retry_count = 0;
u16 wait_cmd;
@@ -1129,7 +1129,7 @@ static int mpegts_configure_pins(struct drxk_state *state, bool mpeg_enable)
if (status < 0)
goto error;
- if (mpeg_enable == false) {
+ if (!mpeg_enable) {
/* Set MPEG TS pads to inputmode */
status = write16(state, SIO_PDR_MSTRT_CFG__A, 0x0000);
if (status < 0)
@@ -1190,7 +1190,7 @@ static int mpegts_configure_pins(struct drxk_state *state, bool mpeg_enable)
if (status < 0)
goto error;
- if (state->m_enable_parallel == true) {
+ if (state->m_enable_parallel) {
/* parallel -> enable MD1 to MD7 */
status = write16(state, SIO_PDR_MD1_CFG__A,
sio_pdr_mdx_cfg);
@@ -1392,7 +1392,7 @@ static int dvbt_enable_ofdm_token_ring(struct drxk_state *state, bool enable)
dprintk(1, "\n");
- if (enable == false) {
+ if (!enable) {
desired_ctrl = SIO_OFDM_SH_OFDM_RING_ENABLE_OFF;
desired_status = SIO_OFDM_SH_OFDM_RING_STATUS_DOWN;
}
@@ -2012,7 +2012,7 @@ static int mpegts_dto_setup(struct drxk_state *state,
goto error;
fec_oc_reg_mode &= (~FEC_OC_MODE_PARITY__M);
fec_oc_reg_ipr_mode &= (~FEC_OC_IPR_MODE_MVAL_DIS_PAR__M);
- if (state->m_insert_rs_byte == true) {
+ if (state->m_insert_rs_byte) {
/* enable parity symbol forward */
fec_oc_reg_mode |= FEC_OC_MODE_PARITY__M;
/* MVAL disable during parity bytes */
@@ -2023,7 +2023,7 @@ static int mpegts_dto_setup(struct drxk_state *state,
/* Check serial or parallel output */
fec_oc_reg_ipr_mode &= (~(FEC_OC_IPR_MODE_SERIAL__M));
- if (state->m_enable_parallel == false) {
+ if (!state->m_enable_parallel) {
/* MPEG data output is serial -> set ipr_mode[0] */
fec_oc_reg_ipr_mode |= FEC_OC_IPR_MODE_SERIAL__M;
}
@@ -2136,19 +2136,19 @@ static int mpegts_configure_polarity(struct drxk_state *state)
/* Control selective inversion of output bits */
fec_oc_reg_ipr_invert &= (~(invert_data_mask));
- if (state->m_invert_data == true)
+ if (state->m_invert_data)
fec_oc_reg_ipr_invert |= invert_data_mask;
fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MERR__M));
- if (state->m_invert_err == true)
+ if (state->m_invert_err)
fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MERR__M;
fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MSTRT__M));
- if (state->m_invert_str == true)
+ if (state->m_invert_str)
fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MSTRT__M;
fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MVAL__M));
- if (state->m_invert_val == true)
+ if (state->m_invert_val)
fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MVAL__M;
fec_oc_reg_ipr_invert &= (~(FEC_OC_IPR_INVERT_MCLK__M));
- if (state->m_invert_clk == true)
+ if (state->m_invert_clk)
fec_oc_reg_ipr_invert |= FEC_OC_IPR_INVERT_MCLK__M;
return write16(state, FEC_OC_IPR_INVERT__A, fec_oc_reg_ipr_invert);
@@ -2220,12 +2220,13 @@ static int set_agc_rf(struct drxk_state *state,
}
/* Set TOP, only if IF-AGC is in AUTO mode */
- if (p_if_agc_settings->ctrl_mode == DRXK_AGC_CTRL_AUTO)
+ if (p_if_agc_settings->ctrl_mode == DRXK_AGC_CTRL_AUTO) {
status = write16(state,
SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A,
p_agc_cfg->top);
if (status < 0)
goto error;
+ }
/* Cut-Off current */
status = write16(state, SCU_RAM_AGC_RF_IACCU_HI_CO__A,
@@ -3352,7 +3353,7 @@ static int dvbt_ctrl_set_inc_enable(struct drxk_state *state, bool *enabled)
int status;
dprintk(1, "\n");
- if (*enabled == true)
+ if (*enabled)
status = write16(state, IQM_CF_BYPASSDET__A, 0);
else
status = write16(state, IQM_CF_BYPASSDET__A, 1);
@@ -3368,7 +3369,7 @@ static int dvbt_ctrl_set_fr_enable(struct drxk_state *state, bool *enabled)
int status;
dprintk(1, "\n");
- if (*enabled == true) {
+ if (*enabled) {
/* write mask to 1 */
status = write16(state, OFDM_SC_RA_RAM_FR_THRES_8K__A,
DEFAULT_FR_THRES_8K);
@@ -6794,11 +6795,11 @@ struct dvb_frontend *drxk_attach(const struct drxk_config *config,
state->enable_merr_cfg = config->enable_merr_cfg;
if (config->dynamic_clk) {
- state->m_dvbt_static_clk = 0;
- state->m_dvbc_static_clk = 0;
+ state->m_dvbt_static_clk = false;
+ state->m_dvbc_static_clk = false;
} else {
- state->m_dvbt_static_clk = 1;
- state->m_dvbc_static_clk = 1;
+ state->m_dvbt_static_clk = true;
+ state->m_dvbc_static_clk = true;
}
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index dfe0c2f7f1ef..81657e94c5a4 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -159,6 +159,7 @@ static int m88ds3103_wr_reg_val_tab(struct m88ds3103_priv *priv,
{
int ret, i, j;
u8 buf[83];
+
dev_dbg(&priv->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
if (tab_len > 83) {
@@ -247,8 +248,9 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
u8 u8tmp, u8tmp1, u8tmp2;
u8 buf[2];
u16 u16tmp, divide_ratio;
- u32 tuner_frequency, target_mclk, ts_clk;
+ u32 tuner_frequency, target_mclk;
s32 s32tmp;
+
dev_dbg(&priv->i2c->dev,
"%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
__func__, c->delivery_system,
@@ -316,9 +318,6 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
target_mclk = 144000;
break;
case M88DS3103_TS_PARALLEL:
- case M88DS3103_TS_PARALLEL_12:
- case M88DS3103_TS_PARALLEL_16:
- case M88DS3103_TS_PARALLEL_19_2:
case M88DS3103_TS_CI:
if (c->symbol_rate < 18000000)
target_mclk = 96000;
@@ -352,33 +351,17 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
switch (priv->cfg->ts_mode) {
case M88DS3103_TS_SERIAL:
u8tmp1 = 0x00;
- ts_clk = 0;
- u8tmp = 0x46;
+ u8tmp = 0x06;
break;
case M88DS3103_TS_SERIAL_D7:
u8tmp1 = 0x20;
- ts_clk = 0;
- u8tmp = 0x46;
+ u8tmp = 0x06;
break;
case M88DS3103_TS_PARALLEL:
- ts_clk = 24000;
- u8tmp = 0x42;
- break;
- case M88DS3103_TS_PARALLEL_12:
- ts_clk = 12000;
- u8tmp = 0x42;
- break;
- case M88DS3103_TS_PARALLEL_16:
- ts_clk = 16000;
- u8tmp = 0x42;
- break;
- case M88DS3103_TS_PARALLEL_19_2:
- ts_clk = 19200;
- u8tmp = 0x42;
+ u8tmp = 0x02;
break;
case M88DS3103_TS_CI:
- ts_clk = 6000;
- u8tmp = 0x43;
+ u8tmp = 0x03;
break;
default:
dev_dbg(&priv->i2c->dev, "%s: invalid ts_mode\n", __func__);
@@ -386,6 +369,9 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
goto err;
}
+ if (priv->cfg->ts_clk_pol)
+ u8tmp |= 0x40;
+
/* TS mode */
ret = m88ds3103_wr_reg(priv, 0xfd, u8tmp);
if (ret)
@@ -399,8 +385,8 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
goto err;
}
- if (ts_clk) {
- divide_ratio = DIV_ROUND_UP(target_mclk, ts_clk);
+ if (priv->cfg->ts_clk) {
+ divide_ratio = DIV_ROUND_UP(target_mclk, priv->cfg->ts_clk);
u8tmp1 = divide_ratio / 2;
u8tmp2 = DIV_ROUND_UP(divide_ratio, 2);
} else {
@@ -411,7 +397,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
dev_dbg(&priv->i2c->dev,
"%s: target_mclk=%d ts_clk=%d divide_ratio=%d\n",
- __func__, target_mclk, ts_clk, divide_ratio);
+ __func__, target_mclk, priv->cfg->ts_clk, divide_ratio);
u8tmp1--;
u8tmp2--;
@@ -536,6 +522,7 @@ static int m88ds3103_init(struct dvb_frontend *fe)
const struct firmware *fw = NULL;
u8 *fw_file = M88DS3103_FIRMWARE;
u8 u8tmp;
+
dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
/* set cold state by default */
@@ -648,6 +635,7 @@ static int m88ds3103_sleep(struct dvb_frontend *fe)
{
struct m88ds3103_priv *priv = fe->demodulator_priv;
int ret;
+
dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
priv->delivery_system = SYS_UNDEFINED;
@@ -682,6 +670,7 @@ static int m88ds3103_get_frontend(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
u8 buf[3];
+
dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
if (!priv->warm || !(priv->fe_status & FE_HAS_LOCK)) {
@@ -857,6 +846,7 @@ static int m88ds3103_read_snr(struct dvb_frontend *fe, u16 *snr)
u8 buf[3];
u16 noise, signal;
u32 noise_tot, signal_tot;
+
dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
/* reports SNR in resolution of 0.1 dB */
@@ -933,6 +923,7 @@ static int m88ds3103_read_ber(struct dvb_frontend *fe, u32 *ber)
int ret;
unsigned int utmp;
u8 buf[3], u8tmp;
+
dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
switch (c->delivery_system) {
@@ -1013,6 +1004,7 @@ static int m88ds3103_set_tone(struct dvb_frontend *fe,
struct m88ds3103_priv *priv = fe->demodulator_priv;
int ret;
u8 u8tmp, tone, reg_a1_mask;
+
dev_dbg(&priv->i2c->dev, "%s: fe_sec_tone_mode=%d\n", __func__,
fe_sec_tone_mode);
@@ -1053,12 +1045,64 @@ err:
return ret;
}
+static int m88ds3103_set_voltage(struct dvb_frontend *fe,
+ fe_sec_voltage_t fe_sec_voltage)
+{
+ struct m88ds3103_priv *priv = fe->demodulator_priv;
+ int ret;
+ u8 u8tmp;
+ bool voltage_sel, voltage_dis;
+
+ dev_dbg(&priv->i2c->dev, "%s: fe_sec_voltage=%d\n", __func__,
+ fe_sec_voltage);
+
+ if (!priv->warm) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ switch (fe_sec_voltage) {
+ case SEC_VOLTAGE_18:
+ voltage_sel = true;
+ voltage_dis = false;
+ break;
+ case SEC_VOLTAGE_13:
+ voltage_sel = false;
+ voltage_dis = false;
+ break;
+ case SEC_VOLTAGE_OFF:
+ voltage_sel = false;
+ voltage_dis = true;
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_voltage\n",
+ __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* output pin polarity */
+ voltage_sel ^= priv->cfg->lnb_hv_pol;
+ voltage_dis ^= priv->cfg->lnb_en_pol;
+
+ u8tmp = voltage_dis << 1 | voltage_sel << 0;
+ ret = m88ds3103_wr_reg_mask(priv, 0xa2, u8tmp, 0x03);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe,
struct dvb_diseqc_master_cmd *diseqc_cmd)
{
struct m88ds3103_priv *priv = fe->demodulator_priv;
int ret, i;
u8 u8tmp;
+
dev_dbg(&priv->i2c->dev, "%s: msg=%*ph\n", __func__,
diseqc_cmd->msg_len, diseqc_cmd->msg);
@@ -1130,6 +1174,7 @@ static int m88ds3103_diseqc_send_burst(struct dvb_frontend *fe,
struct m88ds3103_priv *priv = fe->demodulator_priv;
int ret, i;
u8 u8tmp, burst;
+
dev_dbg(&priv->i2c->dev, "%s: fe_sec_mini_cmd=%d\n", __func__,
fe_sec_mini_cmd);
@@ -1202,6 +1247,7 @@ static int m88ds3103_get_tune_settings(struct dvb_frontend *fe,
static void m88ds3103_release(struct dvb_frontend *fe)
{
struct m88ds3103_priv *priv = fe->demodulator_priv;
+
i2c_del_mux_adapter(priv->i2c_adapter);
kfree(priv);
}
@@ -1370,6 +1416,7 @@ static struct dvb_frontend_ops m88ds3103_ops = {
.diseqc_send_burst = m88ds3103_diseqc_send_burst,
.set_tone = m88ds3103_set_tone,
+ .set_voltage = m88ds3103_set_voltage,
};
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/dvb-frontends/m88ds3103.h b/drivers/media/dvb-frontends/m88ds3103.h
index bbb7e3aa5675..9b3b4962da7c 100644
--- a/drivers/media/dvb-frontends/m88ds3103.h
+++ b/drivers/media/dvb-frontends/m88ds3103.h
@@ -47,14 +47,23 @@ struct m88ds3103_config {
*/
#define M88DS3103_TS_SERIAL 0 /* TS output pin D0, normal */
#define M88DS3103_TS_SERIAL_D7 1 /* TS output pin D7 */
-#define M88DS3103_TS_PARALLEL 2 /* 24 MHz, normal */
-#define M88DS3103_TS_PARALLEL_12 3 /* 12 MHz */
-#define M88DS3103_TS_PARALLEL_16 4 /* 16 MHz */
-#define M88DS3103_TS_PARALLEL_19_2 5 /* 19.2 MHz */
-#define M88DS3103_TS_CI 6 /* 6 MHz */
+#define M88DS3103_TS_PARALLEL 2 /* TS Parallel mode */
+#define M88DS3103_TS_CI 3 /* TS CI Mode */
u8 ts_mode;
/*
+ * TS clk in KHz
+ * Default: 0.
+ */
+ u32 ts_clk;
+
+ /*
+ * TS clk polarity.
+ * Default: 0. 1-active at falling edge; 0-active at rising edge.
+ */
+ u8 ts_clk_pol:1;
+
+ /*
* spectrum inversion
* Default: 0
*/
@@ -86,6 +95,22 @@ struct m88ds3103_config {
* Default: none, must set
*/
u8 agc;
+
+ /*
+ * LNB H/V pin polarity
+ * Default: 0.
+ * 1: pin high set to VOLTAGE_13, pin low to set VOLTAGE_18.
+ * 0: pin high set to VOLTAGE_18, pin low to set VOLTAGE_13.
+ */
+ u8 lnb_hv_pol:1;
+
+ /*
+ * LNB enable pin polarity
+ * Default: 0.
+ * 1: pin high to enable, pin low to disable.
+ * 0: pin high to disable, pin low to enable.
+ */
+ u8 lnb_en_pol:1;
};
/*
diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c
index 9ae40abfd71a..3ddea4471d2b 100644
--- a/drivers/media/dvb-frontends/mb86a16.c
+++ b/drivers/media/dvb-frontends/mb86a16.c
@@ -28,7 +28,7 @@
#include "mb86a16.h"
#include "mb86a16_priv.h"
-unsigned int verbose = 5;
+static unsigned int verbose = 5;
module_param(verbose, int, 0644);
#define ABS(x) ((x) < 0 ? (-x) : (x))
@@ -115,9 +115,11 @@ static int mb86a16_read(struct mb86a16_state *state, u8 reg, u8 *val)
};
ret = i2c_transfer(state->i2c_adap, msg, 2);
if (ret != 2) {
- dprintk(verbose, MB86A16_ERROR, 1, "read error(reg=0x%02x, ret=0x%i)",
+ dprintk(verbose, MB86A16_ERROR, 1, "read error(reg=0x%02x, ret=%i)",
reg, ret);
+ if (ret < 0)
+ return ret;
return -EREMOTEIO;
}
*val = b1[0];
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index b931179c70a4..e6f165a5b90d 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -33,7 +33,7 @@ enum mb86a20s_bandwidth {
MB86A20S_3SEG = 3,
};
-u8 mb86a20s_subchannel[] = {
+static u8 mb86a20s_subchannel[] = {
0xb0, 0xc0, 0xd0, 0xe0,
0xf0, 0x00, 0x10, 0x20,
};
@@ -1228,7 +1228,7 @@ struct linear_segments {
* All tables below return a dB/1000 measurement
*/
-static struct linear_segments cnr_to_db_table[] = {
+static const struct linear_segments cnr_to_db_table[] = {
{ 19648, 0},
{ 18187, 1000},
{ 16534, 2000},
@@ -1262,7 +1262,7 @@ static struct linear_segments cnr_to_db_table[] = {
{ 788, 30000},
};
-static struct linear_segments cnr_64qam_table[] = {
+static const struct linear_segments cnr_64qam_table[] = {
{ 3922688, 0},
{ 3920384, 1000},
{ 3902720, 2000},
@@ -1296,7 +1296,7 @@ static struct linear_segments cnr_64qam_table[] = {
{ 388864, 30000},
};
-static struct linear_segments cnr_16qam_table[] = {
+static const struct linear_segments cnr_16qam_table[] = {
{ 5314816, 0},
{ 5219072, 1000},
{ 5118720, 2000},
@@ -1330,7 +1330,7 @@ static struct linear_segments cnr_16qam_table[] = {
{ 95744, 30000},
};
-struct linear_segments cnr_qpsk_table[] = {
+static const struct linear_segments cnr_qpsk_table[] = {
{ 2834176, 0},
{ 2683648, 1000},
{ 2536960, 2000},
@@ -1364,7 +1364,7 @@ struct linear_segments cnr_qpsk_table[] = {
{ 11520, 30000},
};
-static u32 interpolate_value(u32 value, struct linear_segments *segments,
+static u32 interpolate_value(u32 value, const struct linear_segments *segments,
unsigned len)
{
u64 tmp64;
@@ -1448,7 +1448,7 @@ static int mb86a20s_get_blk_error_layer_CNR(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u32 mer, cnr;
int rc, val, layer;
- struct linear_segments *segs;
+ const struct linear_segments *segs;
unsigned segs_len;
dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index a74ac0ddb833..2163490c1e6b 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -103,7 +103,7 @@ static int mt312_write(struct mt312_state *state, const enum mt312_reg_addr reg,
if (1 + count > sizeof(buf)) {
printk(KERN_WARNING
- "mt312: write: len=%zd is too big!\n", count);
+ "mt312: write: len=%zu is too big!\n", count);
return -EINVAL;
}
diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c
index 10cfc0579168..873ea1da844b 100644
--- a/drivers/media/dvb-frontends/or51211.c
+++ b/drivers/media/dvb-frontends/or51211.c
@@ -111,7 +111,7 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
u8 tudata[585];
int i;
- dprintk("Firmware is %zd bytes\n",fw->size);
+ dprintk("Firmware is %zu bytes\n", fw->size);
/* Get eprom data */
tudata[0] = 17;
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index fdbed35c87fa..eb737cf29a36 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -936,7 +936,7 @@ static void rtl2832_i2c_gate_work(struct work_struct *work)
if (ret != 1)
goto err;
- priv->i2c_gate_state = 0;
+ priv->i2c_gate_state = false;
return;
err:
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 023e0f49c786..7bf98cf6bbe1 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -329,7 +329,7 @@ static int rtl2832_sdr_rd_reg_mask(struct rtl2832_sdr_state *s, u16 reg,
static struct rtl2832_sdr_frame_buf *rtl2832_sdr_get_next_fill_buf(
struct rtl2832_sdr_state *s)
{
- unsigned long flags = 0;
+ unsigned long flags;
struct rtl2832_sdr_frame_buf *buf = NULL;
spin_lock_irqsave(&s->queued_bufs_lock, flags);
@@ -365,17 +365,19 @@ static unsigned int rtl2832_sdr_convert_stream(struct rtl2832_sdr_state *s,
dst_len = 0;
}
- /* calculate samping rate and output it in 10 seconds intervals */
+ /* calculate sample rate and output it in 10 seconds intervals */
if (unlikely(time_is_before_jiffies(s->jiffies_next))) {
-#define MSECS 10000UL
+ #define MSECS 10000UL
+ unsigned int msecs = jiffies_to_msecs(jiffies -
+ s->jiffies_next + msecs_to_jiffies(MSECS));
unsigned int samples = s->sample - s->sample_measured;
s->jiffies_next = jiffies + msecs_to_jiffies(MSECS);
s->sample_measured = s->sample;
dev_dbg(&s->udev->dev,
- "slen=%d samples=%u msecs=%lu sampling rate=%lu\n",
- src_len, samples, MSECS,
- samples * 1000UL / MSECS);
+ "slen=%u samples=%u msecs=%u sample rate=%lu\n",
+ src_len, samples, msecs,
+ samples * 1000UL / msecs);
}
/* total number of I+Q pairs */
@@ -394,8 +396,8 @@ static void rtl2832_sdr_urb_complete(struct urb *urb)
struct rtl2832_sdr_frame_buf *fbuf;
dev_dbg_ratelimited(&s->udev->dev,
- "%s: status=%d length=%d/%d errors=%d\n",
- __func__, urb->status, urb->actual_length,
+ "status=%d length=%d/%d errors=%d\n",
+ urb->status, urb->actual_length,
urb->transfer_buffer_length, urb->error_count);
switch (urb->status) {
@@ -443,7 +445,7 @@ static int rtl2832_sdr_kill_urbs(struct rtl2832_sdr_state *s)
int i;
for (i = s->urbs_submitted - 1; i >= 0; i--) {
- dev_dbg(&s->udev->dev, "%s: kill urb=%d\n", __func__, i);
+ dev_dbg(&s->udev->dev, "kill urb=%d\n", i);
/* stop the URB */
usb_kill_urb(s->urb_list[i]);
}
@@ -457,7 +459,7 @@ static int rtl2832_sdr_submit_urbs(struct rtl2832_sdr_state *s)
int i, ret;
for (i = 0; i < s->urbs_initialized; i++) {
- dev_dbg(&s->udev->dev, "%s: submit urb=%d\n", __func__, i);
+ dev_dbg(&s->udev->dev, "submit urb=%d\n", i);
ret = usb_submit_urb(s->urb_list[i], GFP_ATOMIC);
if (ret) {
dev_err(&s->udev->dev,
@@ -477,8 +479,7 @@ static int rtl2832_sdr_free_stream_bufs(struct rtl2832_sdr_state *s)
if (s->flags & USB_STATE_URB_BUF) {
while (s->buf_num) {
s->buf_num--;
- dev_dbg(&s->udev->dev, "%s: free buf=%d\n",
- __func__, s->buf_num);
+ dev_dbg(&s->udev->dev, "free buf=%d\n", s->buf_num);
usb_free_coherent(s->udev, s->buf_size,
s->buf_list[s->buf_num],
s->dma_addr[s->buf_num]);
@@ -494,24 +495,22 @@ static int rtl2832_sdr_alloc_stream_bufs(struct rtl2832_sdr_state *s)
s->buf_num = 0;
s->buf_size = BULK_BUFFER_SIZE;
- dev_dbg(&s->udev->dev,
- "%s: all in all I will use %u bytes for streaming\n",
- __func__, MAX_BULK_BUFS * BULK_BUFFER_SIZE);
+ dev_dbg(&s->udev->dev, "all in all I will use %u bytes for streaming\n",
+ MAX_BULK_BUFS * BULK_BUFFER_SIZE);
for (s->buf_num = 0; s->buf_num < MAX_BULK_BUFS; s->buf_num++) {
s->buf_list[s->buf_num] = usb_alloc_coherent(s->udev,
BULK_BUFFER_SIZE, GFP_ATOMIC,
&s->dma_addr[s->buf_num]);
if (!s->buf_list[s->buf_num]) {
- dev_dbg(&s->udev->dev, "%s: alloc buf=%d failed\n",
- __func__, s->buf_num);
+ dev_dbg(&s->udev->dev, "alloc buf=%d failed\n",
+ s->buf_num);
rtl2832_sdr_free_stream_bufs(s);
return -ENOMEM;
}
- dev_dbg(&s->udev->dev, "%s: alloc buf=%d %p (dma %llu)\n",
- __func__, s->buf_num,
- s->buf_list[s->buf_num],
+ dev_dbg(&s->udev->dev, "alloc buf=%d %p (dma %llu)\n",
+ s->buf_num, s->buf_list[s->buf_num],
(long long)s->dma_addr[s->buf_num]);
s->flags |= USB_STATE_URB_BUF;
}
@@ -527,8 +526,7 @@ static int rtl2832_sdr_free_urbs(struct rtl2832_sdr_state *s)
for (i = s->urbs_initialized - 1; i >= 0; i--) {
if (s->urb_list[i]) {
- dev_dbg(&s->udev->dev, "%s: free urb=%d\n",
- __func__, i);
+ dev_dbg(&s->udev->dev, "free urb=%d\n", i);
/* free the URBs */
usb_free_urb(s->urb_list[i]);
}
@@ -544,10 +542,10 @@ static int rtl2832_sdr_alloc_urbs(struct rtl2832_sdr_state *s)
/* allocate the URBs */
for (i = 0; i < MAX_BULK_BUFS; i++) {
- dev_dbg(&s->udev->dev, "%s: alloc urb=%d\n", __func__, i);
+ dev_dbg(&s->udev->dev, "alloc urb=%d\n", i);
s->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
if (!s->urb_list[i]) {
- dev_dbg(&s->udev->dev, "%s: failed\n", __func__);
+ dev_dbg(&s->udev->dev, "failed\n");
for (j = 0; j < i; j++)
usb_free_urb(s->urb_list[j]);
return -ENOMEM;
@@ -570,9 +568,9 @@ static int rtl2832_sdr_alloc_urbs(struct rtl2832_sdr_state *s)
/* Must be called with vb_queue_lock hold */
static void rtl2832_sdr_cleanup_queued_bufs(struct rtl2832_sdr_state *s)
{
- unsigned long flags = 0;
+ unsigned long flags;
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(&s->udev->dev, "\n");
spin_lock_irqsave(&s->queued_bufs_lock, flags);
while (!list_empty(&s->queued_bufs)) {
@@ -591,7 +589,7 @@ static void rtl2832_sdr_release_sec(struct dvb_frontend *fe)
{
struct rtl2832_sdr_state *s = fe->sec_priv;
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(&s->udev->dev, "\n");
mutex_lock(&s->vb_queue_lock);
mutex_lock(&s->v4l2_lock);
@@ -613,7 +611,7 @@ static int rtl2832_sdr_querycap(struct file *file, void *fh,
{
struct rtl2832_sdr_state *s = video_drvdata(file);
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(&s->udev->dev, "\n");
strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
strlcpy(cap->card, s->vdev.name, sizeof(cap->card));
@@ -631,15 +629,15 @@ static int rtl2832_sdr_queue_setup(struct vb2_queue *vq,
{
struct rtl2832_sdr_state *s = vb2_get_drv_priv(vq);
- dev_dbg(&s->udev->dev, "%s: *nbuffers=%d\n", __func__, *nbuffers);
+ dev_dbg(&s->udev->dev, "nbuffers=%d\n", *nbuffers);
/* Need at least 8 buffers */
if (vq->num_buffers + *nbuffers < 8)
*nbuffers = 8 - vq->num_buffers;
*nplanes = 1;
sizes[0] = PAGE_ALIGN(s->buffersize);
- dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n",
- __func__, *nbuffers, sizes[0]);
+ dev_dbg(&s->udev->dev, "nbuffers=%d sizes[0]=%d\n",
+ *nbuffers, sizes[0]);
return 0;
}
@@ -659,7 +657,7 @@ static void rtl2832_sdr_buf_queue(struct vb2_buffer *vb)
struct rtl2832_sdr_state *s = vb2_get_drv_priv(vb->vb2_queue);
struct rtl2832_sdr_frame_buf *buf =
container_of(vb, struct rtl2832_sdr_frame_buf, vb);
- unsigned long flags = 0;
+ unsigned long flags;
/* Check the device has not disconnected between prep and queuing */
if (!s->udev) {
@@ -681,7 +679,7 @@ static int rtl2832_sdr_set_adc(struct rtl2832_sdr_state *s)
u64 u64tmp;
u32 u32tmp;
- dev_dbg(&s->udev->dev, "%s: f_adc=%u\n", __func__, s->f_adc);
+ dev_dbg(&s->udev->dev, "f_adc=%u\n", s->f_adc);
if (!test_bit(POWER_ON, &s->flags))
return 0;
@@ -715,8 +713,7 @@ static int rtl2832_sdr_set_adc(struct rtl2832_sdr_state *s)
u64tmp = -u64tmp;
u32tmp = u64tmp & 0x3fffff;
- dev_dbg(&s->udev->dev, "%s: f_if=%u if_ctl=%08x\n",
- __func__, f_if, u32tmp);
+ dev_dbg(&s->udev->dev, "f_if=%u if_ctl=%08x\n", f_if, u32tmp);
buf[0] = (u32tmp >> 16) & 0xff;
buf[1] = (u32tmp >> 8) & 0xff;
@@ -903,7 +900,7 @@ static void rtl2832_sdr_unset_adc(struct rtl2832_sdr_state *s)
{
int ret;
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(&s->udev->dev, "\n");
/* PID filter */
ret = rtl2832_sdr_wr_regs(s, 0x061, "\xe0", 1);
@@ -964,8 +961,8 @@ static int rtl2832_sdr_set_tuner_freq(struct rtl2832_sdr_state *s)
c->frequency = s->f_tuner;
c->delivery_system = SYS_DVBT;
- dev_dbg(&s->udev->dev, "%s: frequency=%u bandwidth=%d\n",
- __func__, c->frequency, c->bandwidth_hz);
+ dev_dbg(&s->udev->dev, "frequency=%u bandwidth=%d\n",
+ c->frequency, c->bandwidth_hz);
if (!test_bit(POWER_ON, &s->flags))
return 0;
@@ -980,7 +977,7 @@ static int rtl2832_sdr_set_tuner(struct rtl2832_sdr_state *s)
{
struct dvb_frontend *fe = s->fe;
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(&s->udev->dev, "\n");
if (fe->ops.tuner_ops.init)
fe->ops.tuner_ops.init(fe);
@@ -992,7 +989,7 @@ static void rtl2832_sdr_unset_tuner(struct rtl2832_sdr_state *s)
{
struct dvb_frontend *fe = s->fe;
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(&s->udev->dev, "\n");
if (fe->ops.tuner_ops.sleep)
fe->ops.tuner_ops.sleep(fe);
@@ -1005,7 +1002,7 @@ static int rtl2832_sdr_start_streaming(struct vb2_queue *vq, unsigned int count)
struct rtl2832_sdr_state *s = vb2_get_drv_priv(vq);
int ret;
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(&s->udev->dev, "\n");
if (!s->udev)
return -ENODEV;
@@ -1054,7 +1051,7 @@ static void rtl2832_sdr_stop_streaming(struct vb2_queue *vq)
{
struct rtl2832_sdr_state *s = vb2_get_drv_priv(vq);
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(&s->udev->dev, "\n");
mutex_lock(&s->v4l2_lock);
@@ -1088,8 +1085,7 @@ static int rtl2832_sdr_g_tuner(struct file *file, void *priv,
{
struct rtl2832_sdr_state *s = video_drvdata(file);
- dev_dbg(&s->udev->dev, "%s: index=%d type=%d\n",
- __func__, v->index, v->type);
+ dev_dbg(&s->udev->dev, "index=%d type=%d\n", v->index, v->type);
if (v->index == 0) {
strlcpy(v->name, "ADC: Realtek RTL2832", sizeof(v->name));
@@ -1115,7 +1111,7 @@ static int rtl2832_sdr_s_tuner(struct file *file, void *priv,
{
struct rtl2832_sdr_state *s = video_drvdata(file);
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(&s->udev->dev, "\n");
if (v->index > 1)
return -EINVAL;
@@ -1127,8 +1123,8 @@ static int rtl2832_sdr_enum_freq_bands(struct file *file, void *priv,
{
struct rtl2832_sdr_state *s = video_drvdata(file);
- dev_dbg(&s->udev->dev, "%s: tuner=%d type=%d index=%d\n",
- __func__, band->tuner, band->type, band->index);
+ dev_dbg(&s->udev->dev, "tuner=%d type=%d index=%d\n",
+ band->tuner, band->type, band->index);
if (band->tuner == 0) {
if (band->index >= ARRAY_SIZE(bands_adc))
@@ -1153,8 +1149,8 @@ static int rtl2832_sdr_g_frequency(struct file *file, void *priv,
struct rtl2832_sdr_state *s = video_drvdata(file);
int ret = 0;
- dev_dbg(&s->udev->dev, "%s: tuner=%d type=%d\n",
- __func__, f->tuner, f->type);
+ dev_dbg(&s->udev->dev, "tuner=%d type=%d\n",
+ f->tuner, f->type);
if (f->tuner == 0) {
f->frequency = s->f_adc;
@@ -1175,8 +1171,8 @@ static int rtl2832_sdr_s_frequency(struct file *file, void *priv,
struct rtl2832_sdr_state *s = video_drvdata(file);
int ret, band;
- dev_dbg(&s->udev->dev, "%s: tuner=%d type=%d frequency=%u\n",
- __func__, f->tuner, f->type, f->frequency);
+ dev_dbg(&s->udev->dev, "tuner=%d type=%d frequency=%u\n",
+ f->tuner, f->type, f->frequency);
/* ADC band midpoints */
#define BAND_ADC_0 ((bands_adc[0].rangehigh + bands_adc[1].rangelow) / 2)
@@ -1194,15 +1190,13 @@ static int rtl2832_sdr_s_frequency(struct file *file, void *priv,
bands_adc[band].rangelow,
bands_adc[band].rangehigh);
- dev_dbg(&s->udev->dev, "%s: ADC frequency=%u Hz\n",
- __func__, s->f_adc);
+ dev_dbg(&s->udev->dev, "ADC frequency=%u Hz\n", s->f_adc);
ret = rtl2832_sdr_set_adc(s);
} else if (f->tuner == 1) {
s->f_tuner = clamp_t(unsigned int, f->frequency,
bands_fm[0].rangelow,
bands_fm[0].rangehigh);
- dev_dbg(&s->udev->dev, "%s: RF frequency=%u Hz\n",
- __func__, f->frequency);
+ dev_dbg(&s->udev->dev, "RF frequency=%u Hz\n", f->frequency);
ret = rtl2832_sdr_set_tuner_freq(s);
} else {
@@ -1217,7 +1211,7 @@ static int rtl2832_sdr_enum_fmt_sdr_cap(struct file *file, void *priv,
{
struct rtl2832_sdr_state *s = video_drvdata(file);
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(&s->udev->dev, "\n");
if (f->index >= s->num_formats)
return -EINVAL;
@@ -1233,7 +1227,7 @@ static int rtl2832_sdr_g_fmt_sdr_cap(struct file *file, void *priv,
{
struct rtl2832_sdr_state *s = video_drvdata(file);
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(&s->udev->dev, "\n");
f->fmt.sdr.pixelformat = s->pixelformat;
f->fmt.sdr.buffersize = s->buffersize;
@@ -1250,7 +1244,7 @@ static int rtl2832_sdr_s_fmt_sdr_cap(struct file *file, void *priv,
struct vb2_queue *q = &s->vb_queue;
int i;
- dev_dbg(&s->udev->dev, "%s: pixelformat fourcc %4.4s\n", __func__,
+ dev_dbg(&s->udev->dev, "pixelformat fourcc %4.4s\n",
(char *)&f->fmt.sdr.pixelformat);
if (vb2_is_busy(q))
@@ -1280,7 +1274,7 @@ static int rtl2832_sdr_try_fmt_sdr_cap(struct file *file, void *priv,
struct rtl2832_sdr_state *s = video_drvdata(file);
int i;
- dev_dbg(&s->udev->dev, "%s: pixelformat fourcc %4.4s\n", __func__,
+ dev_dbg(&s->udev->dev, "pixelformat fourcc %4.4s\n",
(char *)&f->fmt.sdr.pixelformat);
memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
@@ -1354,8 +1348,8 @@ static int rtl2832_sdr_s_ctrl(struct v4l2_ctrl *ctrl)
int ret;
dev_dbg(&s->udev->dev,
- "%s: id=%d name=%s val=%d min=%lld max=%lld step=%lld\n",
- __func__, ctrl->id, ctrl->name, ctrl->val,
+ "id=%d name=%s val=%d min=%lld max=%lld step=%lld\n",
+ ctrl->id, ctrl->name, ctrl->val,
ctrl->minimum, ctrl->maximum, ctrl->step);
switch (ctrl->id) {
@@ -1432,7 +1426,7 @@ struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
s->pixelformat = formats[0].pixelformat;
s->buffersize = formats[0].buffersize;
s->num_formats = NUM_FORMATS;
- if (rtl2832_sdr_emulated_fmt == false)
+ if (!rtl2832_sdr_emulated_fmt)
s->num_formats -= 1;
mutex_init(&s->v4l2_lock);
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index 3a2d6c5aded6..98ddb49ad52b 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -1,5 +1,5 @@
/*
- Driver for Silicon Labs SI2165 DVB-C/-T Demodulator
+ Driver for Silicon Labs Si2161 DVB-T and Si2165 DVB-C/-T Demodulator
Copyright (C) 2013-2014 Matthias Schwarzott <zzam@gentoo.org>
@@ -44,9 +44,7 @@ struct si2165_state {
struct si2165_config config;
- /* chip revision */
- u8 revcode;
- /* chip type */
+ u8 chip_revcode;
u8 chip_type;
/* calculated by xtal and div settings */
@@ -312,7 +310,7 @@ static u32 si2165_get_fe_clk(struct si2165_state *state)
return state->adc_clk;
}
-static bool si2165_wait_init_done(struct si2165_state *state)
+static int si2165_wait_init_done(struct si2165_state *state)
{
int ret = -EINVAL;
u8 val = 0;
@@ -407,7 +405,7 @@ static int si2165_upload_firmware(struct si2165_state *state)
int ret;
const struct firmware *fw = NULL;
- u8 *fw_file = SI2165_FIRMWARE;
+ u8 *fw_file;
const u8 *data;
u32 len;
u32 offset;
@@ -415,10 +413,20 @@ static int si2165_upload_firmware(struct si2165_state *state)
u8 block_count;
u16 crc_expected;
+ switch (state->chip_revcode) {
+ case 0x03: /* revision D */
+ fw_file = SI2165_FIRMWARE_REV_D;
+ break;
+ default:
+ dev_info(&state->i2c->dev, "%s: no firmware file for revision=%d\n",
+ KBUILD_MODNAME, state->chip_revcode);
+ return 0;
+ }
+
/* request the firmware, this will block and timeout */
ret = request_firmware(&fw, fw_file, state->i2c->dev.parent);
if (ret) {
- dev_warn(&state->i2c->dev, "%s: firmare file '%s' not found\n",
+ dev_warn(&state->i2c->dev, "%s: firmware file '%s' not found\n",
KBUILD_MODNAME, fw_file);
goto error;
}
@@ -908,7 +916,7 @@ static void si2165_release(struct dvb_frontend *fe)
static struct dvb_frontend_ops si2165_ops = {
.info = {
- .name = "Silicon Labs Si2165",
+ .name = "Silicon Labs ",
.caps = FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
@@ -948,6 +956,8 @@ struct dvb_frontend *si2165_attach(const struct si2165_config *config,
int n;
int io_ret;
u8 val;
+ char rev_char;
+ const char *chip_name;
if (config == NULL || i2c == NULL)
goto error;
@@ -984,7 +994,7 @@ struct dvb_frontend *si2165_attach(const struct si2165_config *config,
if (val != state->config.chip_mode)
goto error;
- io_ret = si2165_readreg8(state, 0x0023 , &state->revcode);
+ io_ret = si2165_readreg8(state, 0x0023, &state->chip_revcode);
if (io_ret < 0)
goto error;
@@ -997,22 +1007,35 @@ struct dvb_frontend *si2165_attach(const struct si2165_config *config,
if (io_ret < 0)
goto error;
- dev_info(&state->i2c->dev, "%s: hardware revision 0x%02x, chip type 0x%02x\n",
- KBUILD_MODNAME, state->revcode, state->chip_type);
+ if (state->chip_revcode < 26)
+ rev_char = 'A' + state->chip_revcode;
+ else
+ rev_char = '?';
- /* It is a guess that register 0x0118 (chip type?) can be used to
- * differ between si2161, si2163 and si2165
- * Only si2165 has been tested.
- */
- if (state->revcode == 0x03 && state->chip_type == 0x07) {
+ switch (state->chip_type) {
+ case 0x06:
+ chip_name = "Si2161";
+ state->has_dvbt = true;
+ break;
+ case 0x07:
+ chip_name = "Si2165";
state->has_dvbt = true;
state->has_dvbc = true;
- } else {
- dev_err(&state->i2c->dev, "%s: Unsupported chip.\n",
- KBUILD_MODNAME);
+ break;
+ default:
+ dev_err(&state->i2c->dev, "%s: Unsupported Silicon Labs chip (type %d, rev %d)\n",
+ KBUILD_MODNAME, state->chip_type, state->chip_revcode);
goto error;
}
+ dev_info(&state->i2c->dev,
+ "%s: Detected Silicon Labs %s-%c (type %d, rev %d)\n",
+ KBUILD_MODNAME, chip_name, rev_char, state->chip_type,
+ state->chip_revcode);
+
+ strlcat(state->frontend.ops.info.name, chip_name,
+ sizeof(state->frontend.ops.info.name));
+
n = 0;
if (state->has_dvbt) {
state->frontend.ops.delsys[n++] = SYS_DVBT;
@@ -1037,4 +1060,4 @@ MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("Silicon Labs Si2165 DVB-C/-T Demodulator driver");
MODULE_AUTHOR("Matthias Schwarzott <zzam@gentoo.org>");
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(SI2165_FIRMWARE);
+MODULE_FIRMWARE(SI2165_FIRMWARE_REV_D);
diff --git a/drivers/media/dvb-frontends/si2165_priv.h b/drivers/media/dvb-frontends/si2165_priv.h
index d4cc93fe1096..2b70cf12cd79 100644
--- a/drivers/media/dvb-frontends/si2165_priv.h
+++ b/drivers/media/dvb-frontends/si2165_priv.h
@@ -18,6 +18,6 @@
#ifndef _DVB_SI2165_PRIV
#define _DVB_SI2165_PRIV
-#define SI2165_FIRMWARE "dvb-demod-si2165.fw"
+#define SI2165_FIRMWARE_REV_D "dvb-demod-si2165.fw"
#endif /* _DVB_SI2165_PRIV */
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 8f81d979de30..1cd93be281ed 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -55,8 +55,7 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
break;
}
- dev_dbg(&s->client->dev, "%s: cmd execution took %d ms\n",
- __func__,
+ dev_dbg(&s->client->dev, "cmd execution took %d ms\n",
jiffies_to_msecs(jiffies) -
(jiffies_to_msecs(timeout) - TIMEOUT));
@@ -75,7 +74,7 @@ err_mutex_unlock:
return 0;
err:
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -150,12 +149,12 @@ static int si2168_read_status(struct dvb_frontend *fe, fe_status_t *status)
c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
- dev_dbg(&s->client->dev, "%s: status=%02x args=%*ph\n",
- __func__, *status, cmd.rlen, cmd.args);
+ dev_dbg(&s->client->dev, "status=%02x args=%*ph\n",
+ *status, cmd.rlen, cmd.args);
return 0;
err:
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -168,10 +167,10 @@ static int si2168_set_frontend(struct dvb_frontend *fe)
u8 bandwidth, delivery_system;
dev_dbg(&s->client->dev,
- "%s: delivery_system=%u modulation=%u frequency=%u bandwidth_hz=%u symbol_rate=%u inversion=%u\n",
- __func__, c->delivery_system, c->modulation,
+ "delivery_system=%u modulation=%u frequency=%u bandwidth_hz=%u symbol_rate=%u inversion=%u, stream_id=%d\n",
+ c->delivery_system, c->modulation,
c->frequency, c->bandwidth_hz, c->symbol_rate,
- c->inversion);
+ c->inversion, c->stream_id);
if (!s->active) {
ret = -EAGAIN;
@@ -235,6 +234,18 @@ static int si2168_set_frontend(struct dvb_frontend *fe)
if (ret)
goto err;
+ if (c->delivery_system == SYS_DVBT2) {
+ /* select PLP */
+ cmd.args[0] = 0x52;
+ cmd.args[1] = c->stream_id & 0xff;
+ cmd.args[2] = c->stream_id == NO_STREAM_ID_FILTER ? 0 : 1;
+ cmd.wlen = 3;
+ cmd.rlen = 1;
+ ret = si2168_cmd_execute(s, &cmd);
+ if (ret)
+ goto err;
+ }
+
memcpy(cmd.args, "\x51\x03", 2);
cmd.wlen = 2;
cmd.rlen = 12;
@@ -297,13 +308,6 @@ static int si2168_set_frontend(struct dvb_frontend *fe)
if (ret)
goto err;
- memcpy(cmd.args, "\x14\x00\x01\x10\x16\x00", 6);
- cmd.wlen = 6;
- cmd.rlen = 4;
- ret = si2168_cmd_execute(s, &cmd);
- if (ret)
- goto err;
-
memcpy(cmd.args, "\x14\x00\x09\x10\xe3\x18", 6);
cmd.wlen = 6;
cmd.rlen = 4;
@@ -343,7 +347,7 @@ static int si2168_set_frontend(struct dvb_frontend *fe)
return 0;
err:
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -357,8 +361,9 @@ static int si2168_init(struct dvb_frontend *fe)
struct si2168_cmd cmd;
unsigned int chip_id;
- dev_dbg(&s->client->dev, "%s:\n", __func__);
+ dev_dbg(&s->client->dev, "\n");
+ /* initialize */
memcpy(cmd.args, "\xc0\x12\x00\x0c\x00\x0d\x16\x00\x00\x00\x00\x00\x00", 13);
cmd.wlen = 13;
cmd.rlen = 0;
@@ -366,6 +371,26 @@ static int si2168_init(struct dvb_frontend *fe)
if (ret)
goto err;
+ if (s->fw_loaded) {
+ /* resume */
+ memcpy(cmd.args, "\xc0\x06\x08\x0f\x00\x20\x21\x01", 8);
+ cmd.wlen = 8;
+ cmd.rlen = 1;
+ ret = si2168_cmd_execute(s, &cmd);
+ if (ret)
+ goto err;
+
+ memcpy(cmd.args, "\x85", 1);
+ cmd.wlen = 1;
+ cmd.rlen = 1;
+ ret = si2168_cmd_execute(s, &cmd);
+ if (ret)
+ goto err;
+
+ goto warm;
+ }
+
+ /* power up */
memcpy(cmd.args, "\xc0\x06\x01\x0f\x00\x20\x20\x01", 8);
cmd.wlen = 8;
cmd.rlen = 1;
@@ -400,16 +425,16 @@ static int si2168_init(struct dvb_frontend *fe)
break;
default:
dev_err(&s->client->dev,
- "%s: unkown chip version Si21%d-%c%c%c\n",
- KBUILD_MODNAME, cmd.args[2], cmd.args[1],
+ "unknown chip version Si21%d-%c%c%c\n",
+ cmd.args[2], cmd.args[1],
cmd.args[3], cmd.args[4]);
ret = -EINVAL;
goto err;
}
/* cold state - try to download firmware */
- dev_info(&s->client->dev, "%s: found a '%s' in cold state\n",
- KBUILD_MODNAME, si2168_ops.info.name);
+ dev_info(&s->client->dev, "found a '%s' in cold state\n",
+ si2168_ops.info.name);
/* request the firmware, this will block and timeout */
ret = request_firmware(&fw, fw_file, &s->client->dev);
@@ -422,18 +447,18 @@ static int si2168_init(struct dvb_frontend *fe)
if (ret == 0) {
dev_notice(&s->client->dev,
- "%s: please install firmware file '%s'\n",
- KBUILD_MODNAME, SI2168_B40_FIRMWARE);
+ "please install firmware file '%s'\n",
+ SI2168_B40_FIRMWARE);
} else {
dev_err(&s->client->dev,
- "%s: firmware file '%s' not found\n",
- KBUILD_MODNAME, fw_file);
+ "firmware file '%s' not found\n",
+ fw_file);
goto err;
}
}
- dev_info(&s->client->dev, "%s: downloading firmware from file '%s'\n",
- KBUILD_MODNAME, fw_file);
+ dev_info(&s->client->dev, "downloading firmware from file '%s'\n",
+ fw_file);
for (remaining = fw->size; remaining > 0; remaining -= i2c_wr_max) {
len = remaining;
@@ -446,8 +471,8 @@ static int si2168_init(struct dvb_frontend *fe)
ret = si2168_cmd_execute(s, &cmd);
if (ret) {
dev_err(&s->client->dev,
- "%s: firmware download failed=%d\n",
- KBUILD_MODNAME, ret);
+ "firmware download failed=%d\n",
+ ret);
goto err;
}
}
@@ -462,8 +487,20 @@ static int si2168_init(struct dvb_frontend *fe)
if (ret)
goto err;
- dev_info(&s->client->dev, "%s: found a '%s' in warm state\n",
- KBUILD_MODNAME, si2168_ops.info.name);
+ /* set ts mode */
+ memcpy(cmd.args, "\x14\x00\x01\x10\x10\x00", 6);
+ cmd.args[4] |= s->ts_mode;
+ cmd.wlen = 6;
+ cmd.rlen = 4;
+ ret = si2168_cmd_execute(s, &cmd);
+ if (ret)
+ goto err;
+
+ s->fw_loaded = true;
+
+warm:
+ dev_info(&s->client->dev, "found a '%s' in warm state\n",
+ si2168_ops.info.name);
s->active = true;
@@ -472,7 +509,7 @@ err:
if (fw)
release_firmware(fw);
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -482,7 +519,7 @@ static int si2168_sleep(struct dvb_frontend *fe)
int ret;
struct si2168_cmd cmd;
- dev_dbg(&s->client->dev, "%s:\n", __func__);
+ dev_dbg(&s->client->dev, "\n");
s->active = false;
@@ -495,7 +532,7 @@ static int si2168_sleep(struct dvb_frontend *fe)
return 0;
err:
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -528,8 +565,7 @@ static int si2168_select(struct i2c_adapter *adap, void *mux_priv, u32 chan)
/* open tuner I2C gate */
ret = __i2c_transfer(s->client->adapter, &gate_open_msg, 1);
if (ret != 1) {
- dev_warn(&s->client->dev, "%s: i2c write failed=%d\n",
- KBUILD_MODNAME, ret);
+ dev_warn(&s->client->dev, "i2c write failed=%d\n", ret);
if (ret >= 0)
ret = -EREMOTEIO;
} else {
@@ -553,8 +589,7 @@ static int si2168_deselect(struct i2c_adapter *adap, void *mux_priv, u32 chan)
/* close tuner I2C gate */
ret = __i2c_transfer(s->client->adapter, &gate_close_msg, 1);
if (ret != 1) {
- dev_warn(&s->client->dev, "%s: i2c write failed=%d\n",
- KBUILD_MODNAME, ret);
+ dev_warn(&s->client->dev, "i2c write failed=%d\n", ret);
if (ret >= 0)
ret = -EREMOTEIO;
} else {
@@ -587,7 +622,8 @@ static const struct dvb_frontend_ops si2168_ops = {
FE_CAN_GUARD_INTERVAL_AUTO |
FE_CAN_HIERARCHY_AUTO |
FE_CAN_MUTE_TS |
- FE_CAN_2G_MODULATION
+ FE_CAN_2G_MODULATION |
+ FE_CAN_MULTISTREAM
},
.get_tune_settings = si2168_get_tune_settings,
@@ -607,12 +643,12 @@ static int si2168_probe(struct i2c_client *client,
struct si2168 *s;
int ret;
- dev_dbg(&client->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
s = kzalloc(sizeof(struct si2168), GFP_KERNEL);
if (!s) {
ret = -ENOMEM;
- dev_err(&client->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
+ dev_err(&client->dev, "kzalloc() failed\n");
goto err;
}
@@ -633,16 +669,17 @@ static int si2168_probe(struct i2c_client *client,
*config->i2c_adapter = s->adapter;
*config->fe = &s->fe;
+ s->ts_mode = config->ts_mode;
+ s->fw_loaded = false;
i2c_set_clientdata(client, s);
dev_info(&s->client->dev,
- "%s: Silicon Labs Si2168 successfully attached\n",
- KBUILD_MODNAME);
+ "Silicon Labs Si2168 successfully attached\n");
return 0;
err:
kfree(s);
- dev_dbg(&client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
@@ -650,7 +687,7 @@ static int si2168_remove(struct i2c_client *client)
{
struct si2168 *s = i2c_get_clientdata(client);
- dev_dbg(&client->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
i2c_del_mux_adapter(s->adapter);
diff --git a/drivers/media/dvb-frontends/si2168.h b/drivers/media/dvb-frontends/si2168.h
index 3c5b5ab01796..e086d6719451 100644
--- a/drivers/media/dvb-frontends/si2168.h
+++ b/drivers/media/dvb-frontends/si2168.h
@@ -34,6 +34,12 @@ struct si2168_config {
* returned by driver
*/
struct i2c_adapter **i2c_adapter;
+
+ /* TS mode */
+ u8 ts_mode;
};
+#define SI2168_TS_PARALLEL 0x06
+#define SI2168_TS_SERIAL 0x03
+
#endif
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index ebbf502ec313..e13983ed4be1 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -36,6 +36,8 @@ struct si2168 {
fe_delivery_system_t delivery_system;
fe_status_t fe_status;
bool active;
+ bool fw_loaded;
+ u8 ts_mode;
};
/* firmare command struct */
diff --git a/drivers/media/dvb-frontends/si21xx.c b/drivers/media/dvb-frontends/si21xx.c
index 73b47cc6a13b..16850e2bf02f 100644
--- a/drivers/media/dvb-frontends/si21xx.c
+++ b/drivers/media/dvb-frontends/si21xx.c
@@ -236,6 +236,9 @@ static int si21_writeregs(struct si21xx_state *state, u8 reg1,
.len = len + 1
};
+ if (len > sizeof(buf) - 1)
+ return -EINVAL;
+
msg.buf[0] = reg1;
memcpy(msg.buf + 1, data, len);
diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c
new file mode 100644
index 000000000000..9b684d5c8f91
--- /dev/null
+++ b/drivers/media/dvb-frontends/sp2.c
@@ -0,0 +1,441 @@
+/*
+ * CIMaX SP2/SP2HF (Atmel T90FJR) CI driver
+ *
+ * Copyright (C) 2014 Olli Salonen <olli.salonen@iki.fi>
+ *
+ * Heavily based on CIMax2(R) SP2 driver in conjunction with NetUp Dual
+ * DVB-S2 CI card (cimax2) with following copyrights:
+ *
+ * Copyright (C) 2009 NetUP Inc.
+ * Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru>
+ * Copyright (C) 2009 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sp2_priv.h"
+
+static int sp2_read_i2c(struct sp2 *s, u8 reg, u8 *buf, int len)
+{
+ int ret;
+ struct i2c_client *client = s->client;
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .buf = &reg,
+ .len = 1
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .buf = buf,
+ .len = len
+ }
+ };
+
+ ret = i2c_transfer(adap, msg, 2);
+
+ if (ret != 2) {
+ dev_err(&client->dev, "i2c read error, reg = 0x%02x, status = %d\n",
+ reg, ret);
+ if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+ }
+
+ dev_dbg(&s->client->dev, "addr=0x%04x, reg = 0x%02x, data = %02x\n",
+ client->addr, reg, buf[0]);
+
+ return 0;
+}
+
+static int sp2_write_i2c(struct sp2 *s, u8 reg, u8 *buf, int len)
+{
+ int ret;
+ u8 buffer[35];
+ struct i2c_client *client = s->client;
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = 0,
+ .buf = &buffer[0],
+ .len = len + 1
+ };
+
+ if ((len + 1) > sizeof(buffer)) {
+ dev_err(&client->dev, "i2c wr reg=%02x: len=%d is too big!\n",
+ reg, len);
+ return -EINVAL;
+ }
+
+ buffer[0] = reg;
+ memcpy(&buffer[1], buf, len);
+
+ ret = i2c_transfer(adap, &msg, 1);
+
+ if (ret != 1) {
+ dev_err(&client->dev, "i2c write error, reg = 0x%02x, status = %d\n",
+ reg, ret);
+ if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int sp2_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot, u8 acs,
+ u8 read, int addr, u8 data)
+{
+ struct sp2 *s = en50221->data;
+ u8 store;
+ int mem, ret;
+ int (*ci_op_cam)(void*, u8, int, u8, int*) = s->ci_control;
+
+ dev_dbg(&s->client->dev, "slot=%d, acs=0x%02x, addr=0x%04x, data = 0x%02x",
+ slot, acs, addr, data);
+
+ if (slot != 0)
+ return -EINVAL;
+
+ /*
+ * change module access type between IO space and attribute memory
+ * when needed
+ */
+ if (s->module_access_type != acs) {
+ ret = sp2_read_i2c(s, 0x00, &store, 1);
+
+ if (ret)
+ return ret;
+
+ store &= ~(SP2_MOD_CTL_ACS1 | SP2_MOD_CTL_ACS0);
+ store |= acs;
+
+ ret = sp2_write_i2c(s, 0x00, &store, 1);
+ if (ret)
+ return ret;
+ }
+
+ s->module_access_type = acs;
+
+ /* implementation of ci_op_cam is device specific */
+ if (ci_op_cam) {
+ ret = ci_op_cam(s->priv, read, addr, data, &mem);
+ } else {
+ dev_err(&s->client->dev, "callback not defined");
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ if (read) {
+ dev_dbg(&s->client->dev, "cam read, addr=0x%04x, data = 0x%04x",
+ addr, mem);
+ return mem;
+ } else {
+ return 0;
+ }
+}
+
+int sp2_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
+ int slot, int addr)
+{
+ return sp2_ci_op_cam(en50221, slot, SP2_CI_ATTR_ACS,
+ SP2_CI_RD, addr, 0);
+}
+
+int sp2_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
+ int slot, int addr, u8 data)
+{
+ return sp2_ci_op_cam(en50221, slot, SP2_CI_ATTR_ACS,
+ SP2_CI_WR, addr, data);
+}
+
+int sp2_ci_read_cam_control(struct dvb_ca_en50221 *en50221,
+ int slot, u8 addr)
+{
+ return sp2_ci_op_cam(en50221, slot, SP2_CI_IO_ACS,
+ SP2_CI_RD, addr, 0);
+}
+
+int sp2_ci_write_cam_control(struct dvb_ca_en50221 *en50221,
+ int slot, u8 addr, u8 data)
+{
+ return sp2_ci_op_cam(en50221, slot, SP2_CI_IO_ACS,
+ SP2_CI_WR, addr, data);
+}
+
+int sp2_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
+{
+ struct sp2 *s = en50221->data;
+ u8 buf;
+ int ret;
+
+ dev_dbg(&s->client->dev, "slot: %d\n", slot);
+
+ if (slot != 0)
+ return -EINVAL;
+
+ /* RST on */
+ buf = SP2_MOD_CTL_RST;
+ ret = sp2_write_i2c(s, 0x00, &buf, 1);
+
+ if (ret)
+ return ret;
+
+ usleep_range(500, 600);
+
+ /* RST off */
+ buf = 0x00;
+ ret = sp2_write_i2c(s, 0x00, &buf, 1);
+
+ if (ret)
+ return ret;
+
+ msleep(1000);
+
+ return 0;
+}
+
+int sp2_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
+{
+ struct sp2 *s = en50221->data;
+
+ dev_dbg(&s->client->dev, "slot:%d\n", slot);
+
+ /* not implemented */
+ return 0;
+}
+
+int sp2_ci_slot_ts_enable(struct dvb_ca_en50221 *en50221, int slot)
+{
+ struct sp2 *s = en50221->data;
+ u8 buf;
+
+ dev_dbg(&s->client->dev, "slot:%d\n", slot);
+
+ if (slot != 0)
+ return -EINVAL;
+
+ sp2_read_i2c(s, 0x00, &buf, 1);
+
+ /* disable bypass and enable TS */
+ buf |= (SP2_MOD_CTL_TSOEN | SP2_MOD_CTL_TSIEN);
+ return sp2_write_i2c(s, 0, &buf, 1);
+}
+
+int sp2_ci_poll_slot_status(struct dvb_ca_en50221 *en50221,
+ int slot, int open)
+{
+ struct sp2 *s = en50221->data;
+ u8 buf[2];
+ int ret;
+
+ dev_dbg(&s->client->dev, "slot:%d open:%d\n", slot, open);
+
+ /*
+ * CAM module INSERT/REMOVE processing. Slow operation because of i2c
+ * transfers. Throttle read to one per sec.
+ */
+ if (time_after(jiffies, s->next_status_checked_time)) {
+ ret = sp2_read_i2c(s, 0x00, buf, 1);
+ s->next_status_checked_time = jiffies + msecs_to_jiffies(1000);
+
+ if (ret)
+ return 0;
+
+ if (buf[0] & SP2_MOD_CTL_DET)
+ s->status = DVB_CA_EN50221_POLL_CAM_PRESENT |
+ DVB_CA_EN50221_POLL_CAM_READY;
+ else
+ s->status = 0;
+ }
+
+ return s->status;
+}
+
+int sp2_init(struct sp2 *s)
+{
+ int ret = 0;
+ u8 buf;
+ u8 cimax_init[34] = {
+ 0x00, /* module A control*/
+ 0x00, /* auto select mask high A */
+ 0x00, /* auto select mask low A */
+ 0x00, /* auto select pattern high A */
+ 0x00, /* auto select pattern low A */
+ 0x44, /* memory access time A, 600 ns */
+ 0x00, /* invert input A */
+ 0x00, /* RFU */
+ 0x00, /* RFU */
+ 0x00, /* module B control*/
+ 0x00, /* auto select mask high B */
+ 0x00, /* auto select mask low B */
+ 0x00, /* auto select pattern high B */
+ 0x00, /* auto select pattern low B */
+ 0x44, /* memory access time B, 600 ns */
+ 0x00, /* invert input B */
+ 0x00, /* RFU */
+ 0x00, /* RFU */
+ 0x00, /* auto select mask high Ext */
+ 0x00, /* auto select mask low Ext */
+ 0x00, /* auto select pattern high Ext */
+ 0x00, /* auto select pattern low Ext */
+ 0x00, /* RFU */
+ 0x02, /* destination - module A */
+ 0x01, /* power control reg, VCC power on */
+ 0x00, /* RFU */
+ 0x00, /* int status read only */
+ 0x00, /* Interrupt Mask Register */
+ 0x05, /* EXTINT=active-high, INT=push-pull */
+ 0x00, /* USCG1 */
+ 0x04, /* ack active low */
+ 0x00, /* LOCK = 0 */
+ 0x22, /* unknown */
+ 0x00, /* synchronization? */
+ };
+
+ dev_dbg(&s->client->dev, "\n");
+
+ s->ca.owner = THIS_MODULE;
+ s->ca.read_attribute_mem = sp2_ci_read_attribute_mem;
+ s->ca.write_attribute_mem = sp2_ci_write_attribute_mem;
+ s->ca.read_cam_control = sp2_ci_read_cam_control;
+ s->ca.write_cam_control = sp2_ci_write_cam_control;
+ s->ca.slot_reset = sp2_ci_slot_reset;
+ s->ca.slot_shutdown = sp2_ci_slot_shutdown;
+ s->ca.slot_ts_enable = sp2_ci_slot_ts_enable;
+ s->ca.poll_slot_status = sp2_ci_poll_slot_status;
+ s->ca.data = s;
+ s->module_access_type = 0;
+
+ /* initialize all regs */
+ ret = sp2_write_i2c(s, 0x00, &cimax_init[0], 34);
+ if (ret)
+ goto err;
+
+ /* lock registers */
+ buf = 1;
+ ret = sp2_write_i2c(s, 0x1f, &buf, 1);
+ if (ret)
+ goto err;
+
+ /* power on slots */
+ ret = sp2_write_i2c(s, 0x18, &buf, 1);
+ if (ret)
+ goto err;
+
+ ret = dvb_ca_en50221_init(s->dvb_adap, &s->ca, 0, 1);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_dbg(&s->client->dev, "init failed=%d\n", ret);
+ return ret;
+}
+
+int sp2_exit(struct i2c_client *client)
+{
+ struct sp2 *s;
+
+ dev_dbg(&client->dev, "\n");
+
+ if (client == NULL)
+ return 0;
+
+ s = i2c_get_clientdata(client);
+ if (s == NULL)
+ return 0;
+
+ if (s->ca.data == NULL)
+ return 0;
+
+ dvb_ca_en50221_release(&s->ca);
+
+ return 0;
+}
+
+static int sp2_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct sp2_config *cfg = client->dev.platform_data;
+ struct sp2 *s;
+ int ret;
+
+ dev_dbg(&client->dev, "\n");
+
+ s = kzalloc(sizeof(struct sp2), GFP_KERNEL);
+ if (!s) {
+ ret = -ENOMEM;
+ dev_err(&client->dev, "kzalloc() failed\n");
+ goto err;
+ }
+
+ s->client = client;
+ s->dvb_adap = cfg->dvb_adap;
+ s->priv = cfg->priv;
+ s->ci_control = cfg->ci_control;
+
+ i2c_set_clientdata(client, s);
+
+ ret = sp2_init(s);
+ if (ret)
+ goto err;
+
+ dev_info(&s->client->dev, "CIMaX SP2 successfully attached\n");
+ return 0;
+err:
+ dev_dbg(&client->dev, "init failed=%d\n", ret);
+ kfree(s);
+
+ return ret;
+}
+
+static int sp2_remove(struct i2c_client *client)
+{
+ struct si2157 *s = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "\n");
+
+ sp2_exit(client);
+ if (s != NULL)
+ kfree(s);
+
+ return 0;
+}
+
+static const struct i2c_device_id sp2_id[] = {
+ {"sp2", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, sp2_id);
+
+static struct i2c_driver sp2_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "sp2",
+ },
+ .probe = sp2_probe,
+ .remove = sp2_remove,
+ .id_table = sp2_id,
+};
+
+module_i2c_driver(sp2_driver);
+
+MODULE_DESCRIPTION("CIMaX SP2/HF CI driver");
+MODULE_AUTHOR("Olli Salonen <olli.salonen@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/sp2.h b/drivers/media/dvb-frontends/sp2.h
new file mode 100644
index 000000000000..6cceea022d49
--- /dev/null
+++ b/drivers/media/dvb-frontends/sp2.h
@@ -0,0 +1,53 @@
+/*
+ * CIMaX SP2/HF CI driver
+ *
+ * Copyright (C) 2014 Olli Salonen <olli.salonen@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef SP2_H
+#define SP2_H
+
+#include <linux/kconfig.h>
+#include "dvb_ca_en50221.h"
+
+/*
+ * I2C address
+ * 0x40 (port 0)
+ * 0x41 (port 1)
+ */
+struct sp2_config {
+ /* dvb_adapter to attach the ci to */
+ struct dvb_adapter *dvb_adap;
+
+ /* function ci_control handles the device specific ci ops */
+ void *ci_control;
+
+ /* priv is passed back to function ci_control */
+ void *priv;
+};
+
+extern int sp2_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
+ int slot, int addr);
+extern int sp2_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
+ int slot, int addr, u8 data);
+extern int sp2_ci_read_cam_control(struct dvb_ca_en50221 *en50221,
+ int slot, u8 addr);
+extern int sp2_ci_write_cam_control(struct dvb_ca_en50221 *en50221,
+ int slot, u8 addr, u8 data);
+extern int sp2_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot);
+extern int sp2_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot);
+extern int sp2_ci_slot_ts_enable(struct dvb_ca_en50221 *en50221, int slot);
+extern int sp2_ci_poll_slot_status(struct dvb_ca_en50221 *en50221,
+ int slot, int open);
+
+#endif
diff --git a/drivers/media/dvb-frontends/sp2_priv.h b/drivers/media/dvb-frontends/sp2_priv.h
new file mode 100644
index 000000000000..37fef7bcd63f
--- /dev/null
+++ b/drivers/media/dvb-frontends/sp2_priv.h
@@ -0,0 +1,50 @@
+/*
+ * CIMaX SP2/HF CI driver
+ *
+ * Copyright (C) 2014 Olli Salonen <olli.salonen@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef SP2_PRIV_H
+#define SP2_PRIV_H
+
+#include "sp2.h"
+#include "dvb_frontend.h"
+
+/* state struct */
+struct sp2 {
+ int status;
+ struct i2c_client *client;
+ struct dvb_adapter *dvb_adap;
+ struct dvb_ca_en50221 ca;
+ int module_access_type;
+ unsigned long next_status_checked_time;
+ void *priv;
+ void *ci_control;
+};
+
+#define SP2_CI_ATTR_ACS 0x00
+#define SP2_CI_IO_ACS 0x04
+#define SP2_CI_WR 0
+#define SP2_CI_RD 1
+
+/* Module control register (0x00 module A, 0x09 module B) bits */
+#define SP2_MOD_CTL_DET 0x01
+#define SP2_MOD_CTL_AUTO 0x02
+#define SP2_MOD_CTL_ACS0 0x04
+#define SP2_MOD_CTL_ACS1 0x08
+#define SP2_MOD_CTL_HAD 0x10
+#define SP2_MOD_CTL_TSIEN 0x20
+#define SP2_MOD_CTL_TSOEN 0x40
+#define SP2_MOD_CTL_RST 0x80
+
+#endif
diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c
index 2aa8ef76eba2..57dc2abaa87b 100644
--- a/drivers/media/dvb-frontends/sp8870.c
+++ b/drivers/media/dvb-frontends/sp8870.c
@@ -394,8 +394,7 @@ static int sp8870_read_ber (struct dvb_frontend* fe, u32 * ber)
if (ret < 0)
return -EIO;
- tmp = ret << 6;
-
+ tmp = ret << 6;
if (tmp >= 0x3FFF0)
tmp = ~0;
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index 59b6e661acc0..b31ff265ff24 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -59,7 +59,6 @@ struct stv0367cab_state {
int locked; /* channel found */
u32 freq_khz; /* found frequency (in kHz) */
u32 symbol_rate; /* found symbol rate (in Bds) */
- enum stv0367cab_mod modulation; /* modulation */
fe_spectral_inversion_t spect_inv; /* Spectrum Inversion */
};
@@ -554,7 +553,7 @@ static struct st_register def0367ter[STV0367TER_NBREGS] = {
#define RF_LOOKUP_TABLE_SIZE 31
#define RF_LOOKUP_TABLE2_SIZE 16
/* RF Level (for RF AGC->AGC1) Lookup Table, depends on the board and tuner.*/
-s32 stv0367cab_RF_LookUp1[RF_LOOKUP_TABLE_SIZE][RF_LOOKUP_TABLE_SIZE] = {
+static const s32 stv0367cab_RF_LookUp1[RF_LOOKUP_TABLE_SIZE][RF_LOOKUP_TABLE_SIZE] = {
{/*AGC1*/
48, 50, 51, 53, 54, 56, 57, 58, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
@@ -566,7 +565,7 @@ s32 stv0367cab_RF_LookUp1[RF_LOOKUP_TABLE_SIZE][RF_LOOKUP_TABLE_SIZE] = {
}
};
/* RF Level (for IF AGC->AGC2) Lookup Table, depends on the board and tuner.*/
-s32 stv0367cab_RF_LookUp2[RF_LOOKUP_TABLE2_SIZE][RF_LOOKUP_TABLE2_SIZE] = {
+static const s32 stv0367cab_RF_LookUp2[RF_LOOKUP_TABLE2_SIZE][RF_LOOKUP_TABLE2_SIZE] = {
{/*AGC2*/
28, 29, 31, 32, 34, 35, 36, 37,
38, 39, 40, 41, 42, 43, 44, 45,
@@ -1935,8 +1934,6 @@ static int stv0367ter_get_frontend(struct dvb_frontend *fe)
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
-
- int error = 0;
enum stv0367_ter_mode mode;
int constell = 0,/* snr = 0,*/ Data = 0;
@@ -2020,7 +2017,7 @@ static int stv0367ter_get_frontend(struct dvb_frontend *fe)
p->guard_interval = stv0367_readbits(state, F367TER_SYR_GUARD);
- return error;
+ return 0;
}
static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr)
@@ -2999,7 +2996,6 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
if (QAMFEC_Lock) {
signalType = FE_CAB_DATAOK;
- cab_state->modulation = p->modulation;
cab_state->spect_inv = stv0367_readbits(state,
F367CAB_QUAD_INV);
#if 0
@@ -3165,7 +3161,7 @@ static int stv0367cab_get_frontend(struct dvb_frontend *fe)
case FE_CAB_MOD_QAM128:
p->modulation = QAM_128;
break;
- case QAM_256:
+ case FE_CAB_MOD_QAM256:
p->modulation = QAM_256;
break;
default:
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index e5a87b57d855..2c88abfab531 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -1270,7 +1270,6 @@ enum fe_stv0900_error stv0900_st_dvbs2_single(struct stv0900_internal *intp,
enum fe_stv0900_demod_mode LDPC_Mode,
enum fe_stv0900_demod_num demod)
{
- enum fe_stv0900_error error = STV0900_NO_ERROR;
s32 reg_ind;
dprintk("%s\n", __func__);
@@ -1337,7 +1336,7 @@ enum fe_stv0900_error stv0900_st_dvbs2_single(struct stv0900_internal *intp,
break;
}
- return error;
+ return STV0900_NO_ERROR;
}
static enum fe_stv0900_error stv0900_init_internal(struct dvb_frontend *fe,
@@ -1555,8 +1554,6 @@ static int stv0900_status(struct stv0900_internal *intp,
static int stv0900_set_mis(struct stv0900_internal *intp,
enum fe_stv0900_demod_num demod, int mis)
{
- enum fe_stv0900_error error = STV0900_NO_ERROR;
-
dprintk("%s\n", __func__);
if (mis < 0 || mis > 255) {
@@ -1569,7 +1566,7 @@ static int stv0900_set_mis(struct stv0900_internal *intp,
stv0900_write_reg(intp, ISIBITENA, 0xff);
}
- return error;
+ return STV0900_NO_ERROR;
}
diff --git a/drivers/media/dvb-frontends/stv0900_sw.c b/drivers/media/dvb-frontends/stv0900_sw.c
index 4ce1d260b3eb..a0a7b1664c53 100644
--- a/drivers/media/dvb-frontends/stv0900_sw.c
+++ b/drivers/media/dvb-frontends/stv0900_sw.c
@@ -1733,9 +1733,10 @@ static void stv0900_set_search_standard(struct stv0900_internal *intp,
break;
case STV0900_SEARCH_DSS:
dprintk("Search Standard = DSS\n");
- case STV0900_SEARCH_DVBS2:
break;
+ case STV0900_SEARCH_DVBS2:
dprintk("Search Standard = DVBS2\n");
+ break;
case STV0900_AUTO_SEARCH:
default:
dprintk("Search Standard = AUTO\n");
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
new file mode 100644
index 000000000000..d9905fb52f84
--- /dev/null
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -0,0 +1,840 @@
+/*
+ * Toshiba TC90522 Demodulator
+ *
+ * Copyright (C) 2014 Akihiro Tsukada <tskd08@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * NOTICE:
+ * This driver is incomplete and lacks init/config of the chips,
+ * as the necessary info is not disclosed.
+ * It assumes that users of this driver (such as a PCI bridge of
+ * DTV receiver cards) properly init and configure the chip
+ * via I2C *before* calling this driver's init() function.
+ *
+ * Currently, PT3 driver is the only one that uses this driver,
+ * and contains init/config code in its firmware.
+ * Thus some part of the code might be dependent on PT3 specific config.
+ */
+
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/dvb/frontend.h>
+#include "dvb_math.h"
+#include "tc90522.h"
+
+#define TC90522_I2C_THRU_REG 0xfe
+
+#define TC90522_MODULE_IDX(addr) (((u8)(addr) & 0x02U) >> 1)
+
+struct tc90522_state {
+ struct tc90522_config cfg;
+ struct dvb_frontend fe;
+ struct i2c_client *i2c_client;
+ struct i2c_adapter tuner_i2c;
+
+ bool lna;
+};
+
+struct reg_val {
+ u8 reg;
+ u8 val;
+};
+
+static int
+reg_write(struct tc90522_state *state, const struct reg_val *regs, int num)
+{
+ int i, ret;
+ struct i2c_msg msg;
+
+ ret = 0;
+ msg.addr = state->i2c_client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ for (i = 0; i < num; i++) {
+ msg.buf = (u8 *)&regs[i];
+ ret = i2c_transfer(state->i2c_client->adapter, &msg, 1);
+ if (ret == 0)
+ ret = -EIO;
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int reg_read(struct tc90522_state *state, u8 reg, u8 *val, u8 len)
+{
+ struct i2c_msg msgs[2] = {
+ {
+ .addr = state->i2c_client->addr,
+ .flags = 0,
+ .buf = &reg,
+ .len = 1,
+ },
+ {
+ .addr = state->i2c_client->addr,
+ .flags = I2C_M_RD,
+ .buf = val,
+ .len = len,
+ },
+ };
+ int ret;
+
+ ret = i2c_transfer(state->i2c_client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret == ARRAY_SIZE(msgs))
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+ return ret;
+}
+
+static struct tc90522_state *cfg_to_state(struct tc90522_config *c)
+{
+ return container_of(c, struct tc90522_state, cfg);
+}
+
+
+static int tc90522s_set_tsid(struct dvb_frontend *fe)
+{
+ struct reg_val set_tsid[] = {
+ { 0x8f, 00 },
+ { 0x90, 00 }
+ };
+
+ set_tsid[0].val = (fe->dtv_property_cache.stream_id & 0xff00) >> 8;
+ set_tsid[1].val = fe->dtv_property_cache.stream_id & 0xff;
+ return reg_write(fe->demodulator_priv, set_tsid, ARRAY_SIZE(set_tsid));
+}
+
+static int tc90522t_set_layers(struct dvb_frontend *fe)
+{
+ struct reg_val rv;
+ u8 laysel;
+
+ laysel = ~fe->dtv_property_cache.isdbt_layer_enabled & 0x07;
+ laysel = (laysel & 0x01) << 2 | (laysel & 0x02) | (laysel & 0x04) >> 2;
+ rv.reg = 0x71;
+ rv.val = laysel;
+ return reg_write(fe->demodulator_priv, &rv, 1);
+}
+
+/* frontend ops */
+
+static int tc90522s_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct tc90522_state *state;
+ int ret;
+ u8 reg;
+
+ state = fe->demodulator_priv;
+ ret = reg_read(state, 0xc3, &reg, 1);
+ if (ret < 0)
+ return ret;
+
+ *status = 0;
+ if (reg & 0x80) /* input level under min ? */
+ return 0;
+ *status |= FE_HAS_SIGNAL;
+
+ if (reg & 0x60) /* carrier? */
+ return 0;
+ *status |= FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC;
+
+ if (reg & 0x10)
+ return 0;
+ if (reg_read(state, 0xc5, &reg, 1) < 0 || !(reg & 0x03))
+ return 0;
+ *status |= FE_HAS_LOCK;
+ return 0;
+}
+
+static int tc90522t_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct tc90522_state *state;
+ int ret;
+ u8 reg;
+
+ state = fe->demodulator_priv;
+ ret = reg_read(state, 0x96, &reg, 1);
+ if (ret < 0)
+ return ret;
+
+ *status = 0;
+ if (reg & 0xe0) {
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI
+ | FE_HAS_SYNC | FE_HAS_LOCK;
+ return 0;
+ }
+
+ ret = reg_read(state, 0x80, &reg, 1);
+ if (ret < 0)
+ return ret;
+
+ if (reg & 0xf0)
+ return 0;
+ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER;
+
+ if (reg & 0x0c)
+ return 0;
+ *status |= FE_HAS_SYNC | FE_HAS_VITERBI;
+
+ if (reg & 0x02)
+ return 0;
+ *status |= FE_HAS_LOCK;
+ return 0;
+}
+
+static const fe_code_rate_t fec_conv_sat[] = {
+ FEC_NONE, /* unused */
+ FEC_1_2, /* for BPSK */
+ FEC_1_2, FEC_2_3, FEC_3_4, FEC_5_6, FEC_7_8, /* for QPSK */
+ FEC_2_3, /* for 8PSK. (trellis code) */
+};
+
+static int tc90522s_get_frontend(struct dvb_frontend *fe)
+{
+ struct tc90522_state *state;
+ struct dtv_frontend_properties *c;
+ struct dtv_fe_stats *stats;
+ int ret, i;
+ int layers;
+ u8 val[10];
+ u32 cndat;
+
+ state = fe->demodulator_priv;
+ c = &fe->dtv_property_cache;
+ c->delivery_system = SYS_ISDBS;
+
+ layers = 0;
+ ret = reg_read(state, 0xe8, val, 3);
+ if (ret == 0) {
+ int slots;
+ u8 v;
+
+ /* high/single layer */
+ v = (val[0] & 0x70) >> 4;
+ c->modulation = (v == 7) ? PSK_8 : QPSK;
+ c->fec_inner = fec_conv_sat[v];
+ c->layer[0].fec = c->fec_inner;
+ c->layer[0].modulation = c->modulation;
+ c->layer[0].segment_count = val[1] & 0x3f; /* slots */
+
+ /* low layer */
+ v = (val[0] & 0x07);
+ c->layer[1].fec = fec_conv_sat[v];
+ if (v == 0) /* no low layer */
+ c->layer[1].segment_count = 0;
+ else
+ c->layer[1].segment_count = val[2] & 0x3f; /* slots */
+ /* actually, BPSK if v==1, but not defined in fe_modulation_t */
+ c->layer[1].modulation = QPSK;
+ layers = (v > 0) ? 2 : 1;
+
+ slots = c->layer[0].segment_count + c->layer[1].segment_count;
+ c->symbol_rate = 28860000 * slots / 48;
+ }
+
+ /* statistics */
+
+ stats = &c->strength;
+ stats->len = 0;
+ /* let the connected tuner set RSSI property cache */
+ if (fe->ops.tuner_ops.get_rf_strength) {
+ u16 dummy;
+
+ fe->ops.tuner_ops.get_rf_strength(fe, &dummy);
+ }
+
+ stats = &c->cnr;
+ stats->len = 1;
+ stats->stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ cndat = 0;
+ ret = reg_read(state, 0xbc, val, 2);
+ if (ret == 0)
+ cndat = val[0] << 8 | val[1];
+ if (cndat >= 3000) {
+ u32 p, p4;
+ s64 cn;
+
+ cndat -= 3000; /* cndat: 4.12 fixed point float */
+ /*
+ * cnr[mdB] = -1634.6 * P^5 + 14341 * P^4 - 50259 * P^3
+ * + 88977 * P^2 - 89565 * P + 58857
+ * (P = sqrt(cndat) / 64)
+ */
+ /* p := sqrt(cndat) << 8 = P << 14, 2.14 fixed point float */
+ /* cn = cnr << 3 */
+ p = int_sqrt(cndat << 16);
+ p4 = cndat * cndat;
+ cn = div64_s64(-16346LL * p4 * p, 10) >> 35;
+ cn += (14341LL * p4) >> 21;
+ cn -= (50259LL * cndat * p) >> 23;
+ cn += (88977LL * cndat) >> 9;
+ cn -= (89565LL * p) >> 11;
+ cn += 58857 << 3;
+ stats->stat[0].svalue = cn >> 3;
+ stats->stat[0].scale = FE_SCALE_DECIBEL;
+ }
+
+ /* per-layer post viterbi BER (or PER? config dependent?) */
+ stats = &c->post_bit_error;
+ memset(stats, 0, sizeof(*stats));
+ stats->len = layers;
+ ret = reg_read(state, 0xeb, val, 10);
+ if (ret < 0)
+ for (i = 0; i < layers; i++)
+ stats->stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ else {
+ for (i = 0; i < layers; i++) {
+ stats->stat[i].scale = FE_SCALE_COUNTER;
+ stats->stat[i].uvalue = val[i * 5] << 16
+ | val[i * 5 + 1] << 8 | val[i * 5 + 2];
+ }
+ }
+ stats = &c->post_bit_count;
+ memset(stats, 0, sizeof(*stats));
+ stats->len = layers;
+ if (ret < 0)
+ for (i = 0; i < layers; i++)
+ stats->stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ else {
+ for (i = 0; i < layers; i++) {
+ stats->stat[i].scale = FE_SCALE_COUNTER;
+ stats->stat[i].uvalue =
+ val[i * 5 + 3] << 8 | val[i * 5 + 4];
+ stats->stat[i].uvalue *= 204 * 8;
+ }
+ }
+
+ return 0;
+}
+
+
+static const fe_transmit_mode_t tm_conv[] = {
+ TRANSMISSION_MODE_2K,
+ TRANSMISSION_MODE_4K,
+ TRANSMISSION_MODE_8K,
+ 0
+};
+
+static const fe_code_rate_t fec_conv_ter[] = {
+ FEC_1_2, FEC_2_3, FEC_3_4, FEC_5_6, FEC_7_8, 0, 0, 0
+};
+
+static const fe_modulation_t mod_conv[] = {
+ DQPSK, QPSK, QAM_16, QAM_64, 0, 0, 0, 0
+};
+
+static int tc90522t_get_frontend(struct dvb_frontend *fe)
+{
+ struct tc90522_state *state;
+ struct dtv_frontend_properties *c;
+ struct dtv_fe_stats *stats;
+ int ret, i;
+ int layers;
+ u8 val[15], mode;
+ u32 cndat;
+
+ state = fe->demodulator_priv;
+ c = &fe->dtv_property_cache;
+ c->delivery_system = SYS_ISDBT;
+ c->bandwidth_hz = 6000000;
+ mode = 1;
+ ret = reg_read(state, 0xb0, val, 1);
+ if (ret == 0) {
+ mode = (val[0] & 0xc0) >> 2;
+ c->transmission_mode = tm_conv[mode];
+ c->guard_interval = (val[0] & 0x30) >> 4;
+ }
+
+ ret = reg_read(state, 0xb2, val, 6);
+ layers = 0;
+ if (ret == 0) {
+ u8 v;
+
+ c->isdbt_partial_reception = val[0] & 0x01;
+ c->isdbt_sb_mode = (val[0] & 0xc0) == 0x01;
+
+ /* layer A */
+ v = (val[2] & 0x78) >> 3;
+ if (v == 0x0f)
+ c->layer[0].segment_count = 0;
+ else {
+ layers++;
+ c->layer[0].segment_count = v;
+ c->layer[0].fec = fec_conv_ter[(val[1] & 0x1c) >> 2];
+ c->layer[0].modulation = mod_conv[(val[1] & 0xe0) >> 5];
+ v = (val[1] & 0x03) << 1 | (val[2] & 0x80) >> 7;
+ c->layer[0].interleaving = v;
+ }
+
+ /* layer B */
+ v = (val[3] & 0x03) << 1 | (val[4] & 0xc0) >> 6;
+ if (v == 0x0f)
+ c->layer[1].segment_count = 0;
+ else {
+ layers++;
+ c->layer[1].segment_count = v;
+ c->layer[1].fec = fec_conv_ter[(val[3] & 0xe0) >> 5];
+ c->layer[1].modulation = mod_conv[(val[2] & 0x07)];
+ c->layer[1].interleaving = (val[3] & 0x1c) >> 2;
+ }
+
+ /* layer C */
+ v = (val[5] & 0x1e) >> 1;
+ if (v == 0x0f)
+ c->layer[2].segment_count = 0;
+ else {
+ layers++;
+ c->layer[2].segment_count = v;
+ c->layer[2].fec = fec_conv_ter[(val[4] & 0x07)];
+ c->layer[2].modulation = mod_conv[(val[4] & 0x38) >> 3];
+ c->layer[2].interleaving = (val[5] & 0xe0) >> 5;
+ }
+ }
+
+ /* statistics */
+
+ stats = &c->strength;
+ stats->len = 0;
+ /* let the connected tuner set RSSI property cache */
+ if (fe->ops.tuner_ops.get_rf_strength) {
+ u16 dummy;
+
+ fe->ops.tuner_ops.get_rf_strength(fe, &dummy);
+ }
+
+ stats = &c->cnr;
+ stats->len = 1;
+ stats->stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ cndat = 0;
+ ret = reg_read(state, 0x8b, val, 3);
+ if (ret == 0)
+ cndat = val[0] << 16 | val[1] << 8 | val[2];
+ if (cndat != 0) {
+ u32 p, tmp;
+ s64 cn;
+
+ /*
+ * cnr[mdB] = 0.024 P^4 - 1.6 P^3 + 39.8 P^2 + 549.1 P + 3096.5
+ * (P = 10log10(5505024/cndat))
+ */
+ /* cn = cnr << 3 (61.3 fixed point float */
+ /* p = 10log10(5505024/cndat) << 24 (8.24 fixed point float)*/
+ p = intlog10(5505024) - intlog10(cndat);
+ p *= 10;
+
+ cn = 24772;
+ cn += div64_s64(43827LL * p, 10) >> 24;
+ tmp = p >> 8;
+ cn += div64_s64(3184LL * tmp * tmp, 10) >> 32;
+ tmp = p >> 13;
+ cn -= div64_s64(128LL * tmp * tmp * tmp, 10) >> 33;
+ tmp = p >> 18;
+ cn += div64_s64(192LL * tmp * tmp * tmp * tmp, 1000) >> 24;
+
+ stats->stat[0].svalue = cn >> 3;
+ stats->stat[0].scale = FE_SCALE_DECIBEL;
+ }
+
+ /* per-layer post viterbi BER (or PER? config dependent?) */
+ stats = &c->post_bit_error;
+ memset(stats, 0, sizeof(*stats));
+ stats->len = layers;
+ ret = reg_read(state, 0x9d, val, 15);
+ if (ret < 0)
+ for (i = 0; i < layers; i++)
+ stats->stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ else {
+ for (i = 0; i < layers; i++) {
+ stats->stat[i].scale = FE_SCALE_COUNTER;
+ stats->stat[i].uvalue = val[i * 3] << 16
+ | val[i * 3 + 1] << 8 | val[i * 3 + 2];
+ }
+ }
+ stats = &c->post_bit_count;
+ memset(stats, 0, sizeof(*stats));
+ stats->len = layers;
+ if (ret < 0)
+ for (i = 0; i < layers; i++)
+ stats->stat[i].scale = FE_SCALE_NOT_AVAILABLE;
+ else {
+ for (i = 0; i < layers; i++) {
+ stats->stat[i].scale = FE_SCALE_COUNTER;
+ stats->stat[i].uvalue =
+ val[9 + i * 2] << 8 | val[9 + i * 2 + 1];
+ stats->stat[i].uvalue *= 204 * 8;
+ }
+ }
+
+ return 0;
+}
+
+static const struct reg_val reset_sat = { 0x03, 0x01 };
+static const struct reg_val reset_ter = { 0x01, 0x40 };
+
+static int tc90522_set_frontend(struct dvb_frontend *fe)
+{
+ struct tc90522_state *state;
+ int ret;
+
+ state = fe->demodulator_priv;
+
+ if (fe->ops.tuner_ops.set_params)
+ ret = fe->ops.tuner_ops.set_params(fe);
+ else
+ ret = -ENODEV;
+ if (ret < 0)
+ goto failed;
+
+ if (fe->ops.delsys[0] == SYS_ISDBS) {
+ ret = tc90522s_set_tsid(fe);
+ if (ret < 0)
+ goto failed;
+ ret = reg_write(state, &reset_sat, 1);
+ } else {
+ ret = tc90522t_set_layers(fe);
+ if (ret < 0)
+ goto failed;
+ ret = reg_write(state, &reset_ter, 1);
+ }
+ if (ret < 0)
+ goto failed;
+
+ return 0;
+
+failed:
+ dev_warn(&state->tuner_i2c.dev, "(%s) failed. [adap%d-fe%d]\n",
+ __func__, fe->dvb->num, fe->id);
+ return ret;
+}
+
+static int tc90522_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *settings)
+{
+ if (fe->ops.delsys[0] == SYS_ISDBS) {
+ settings->min_delay_ms = 250;
+ settings->step_size = 1000;
+ settings->max_drift = settings->step_size * 2;
+ } else {
+ settings->min_delay_ms = 400;
+ settings->step_size = 142857;
+ settings->max_drift = settings->step_size;
+ }
+ return 0;
+}
+
+static int tc90522_set_if_agc(struct dvb_frontend *fe, bool on)
+{
+ struct reg_val agc_sat[] = {
+ { 0x0a, 0x00 },
+ { 0x10, 0x30 },
+ { 0x11, 0x00 },
+ { 0x03, 0x01 },
+ };
+ struct reg_val agc_ter[] = {
+ { 0x25, 0x00 },
+ { 0x23, 0x4c },
+ { 0x01, 0x40 },
+ };
+ struct tc90522_state *state;
+ struct reg_val *rv;
+ int num;
+
+ state = fe->demodulator_priv;
+ if (fe->ops.delsys[0] == SYS_ISDBS) {
+ agc_sat[0].val = on ? 0xff : 0x00;
+ agc_sat[1].val |= 0x80;
+ agc_sat[1].val |= on ? 0x01 : 0x00;
+ agc_sat[2].val |= on ? 0x40 : 0x00;
+ rv = agc_sat;
+ num = ARRAY_SIZE(agc_sat);
+ } else {
+ agc_ter[0].val = on ? 0x40 : 0x00;
+ agc_ter[1].val |= on ? 0x00 : 0x01;
+ rv = agc_ter;
+ num = ARRAY_SIZE(agc_ter);
+ }
+ return reg_write(state, rv, num);
+}
+
+static const struct reg_val sleep_sat = { 0x17, 0x01 };
+static const struct reg_val sleep_ter = { 0x03, 0x90 };
+
+static int tc90522_sleep(struct dvb_frontend *fe)
+{
+ struct tc90522_state *state;
+ int ret;
+
+ state = fe->demodulator_priv;
+ if (fe->ops.delsys[0] == SYS_ISDBS)
+ ret = reg_write(state, &sleep_sat, 1);
+ else {
+ ret = reg_write(state, &sleep_ter, 1);
+ if (ret == 0 && fe->ops.set_lna &&
+ fe->dtv_property_cache.lna == LNA_AUTO) {
+ fe->dtv_property_cache.lna = 0;
+ ret = fe->ops.set_lna(fe);
+ fe->dtv_property_cache.lna = LNA_AUTO;
+ }
+ }
+ if (ret < 0)
+ dev_warn(&state->tuner_i2c.dev,
+ "(%s) failed. [adap%d-fe%d]\n",
+ __func__, fe->dvb->num, fe->id);
+ return ret;
+}
+
+static const struct reg_val wakeup_sat = { 0x17, 0x00 };
+static const struct reg_val wakeup_ter = { 0x03, 0x80 };
+
+static int tc90522_init(struct dvb_frontend *fe)
+{
+ struct tc90522_state *state;
+ int ret;
+
+ /*
+ * Because the init sequence is not public,
+ * the parent device/driver should have init'ed the device before.
+ * just wake up the device here.
+ */
+
+ state = fe->demodulator_priv;
+ if (fe->ops.delsys[0] == SYS_ISDBS)
+ ret = reg_write(state, &wakeup_sat, 1);
+ else {
+ ret = reg_write(state, &wakeup_ter, 1);
+ if (ret == 0 && fe->ops.set_lna &&
+ fe->dtv_property_cache.lna == LNA_AUTO) {
+ fe->dtv_property_cache.lna = 1;
+ ret = fe->ops.set_lna(fe);
+ fe->dtv_property_cache.lna = LNA_AUTO;
+ }
+ }
+ if (ret < 0) {
+ dev_warn(&state->tuner_i2c.dev,
+ "(%s) failed. [adap%d-fe%d]\n",
+ __func__, fe->dvb->num, fe->id);
+ return ret;
+ }
+
+ /* prefer 'all-layers' to 'none' as a default */
+ if (fe->dtv_property_cache.isdbt_layer_enabled == 0)
+ fe->dtv_property_cache.isdbt_layer_enabled = 7;
+ return tc90522_set_if_agc(fe, true);
+}
+
+
+/*
+ * tuner I2C adapter functions
+ */
+
+static int
+tc90522_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct tc90522_state *state;
+ struct i2c_msg *new_msgs;
+ int i, j;
+ int ret, rd_num;
+ u8 wbuf[256];
+ u8 *p, *bufend;
+
+ if (num <= 0)
+ return -EINVAL;
+
+ rd_num = 0;
+ for (i = 0; i < num; i++)
+ if (msgs[i].flags & I2C_M_RD)
+ rd_num++;
+ new_msgs = kmalloc(sizeof(*new_msgs) * (num + rd_num), GFP_KERNEL);
+ if (!new_msgs)
+ return -ENOMEM;
+
+ state = i2c_get_adapdata(adap);
+ p = wbuf;
+ bufend = wbuf + sizeof(wbuf);
+ for (i = 0, j = 0; i < num; i++, j++) {
+ new_msgs[j].addr = state->i2c_client->addr;
+ new_msgs[j].flags = msgs[i].flags;
+
+ if (msgs[i].flags & I2C_M_RD) {
+ new_msgs[j].flags &= ~I2C_M_RD;
+ if (p + 2 > bufend)
+ break;
+ p[0] = TC90522_I2C_THRU_REG;
+ p[1] = msgs[i].addr << 1 | 0x01;
+ new_msgs[j].buf = p;
+ new_msgs[j].len = 2;
+ p += 2;
+ j++;
+ new_msgs[j].addr = state->i2c_client->addr;
+ new_msgs[j].flags = msgs[i].flags;
+ new_msgs[j].buf = msgs[i].buf;
+ new_msgs[j].len = msgs[i].len;
+ continue;
+ }
+
+ if (p + msgs[i].len + 2 > bufend)
+ break;
+ p[0] = TC90522_I2C_THRU_REG;
+ p[1] = msgs[i].addr << 1;
+ memcpy(p + 2, msgs[i].buf, msgs[i].len);
+ new_msgs[j].buf = p;
+ new_msgs[j].len = msgs[i].len + 2;
+ p += new_msgs[j].len;
+ }
+
+ if (i < num)
+ ret = -ENOMEM;
+ else
+ ret = i2c_transfer(state->i2c_client->adapter, new_msgs, j);
+ if (ret >= 0 && ret < j)
+ ret = -EIO;
+ kfree(new_msgs);
+ return (ret == j) ? num : ret;
+}
+
+static u32 tc90522_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C;
+}
+
+static const struct i2c_algorithm tc90522_tuner_i2c_algo = {
+ .master_xfer = &tc90522_master_xfer,
+ .functionality = &tc90522_functionality,
+};
+
+
+/*
+ * I2C driver functions
+ */
+
+static const struct dvb_frontend_ops tc90522_ops_sat = {
+ .delsys = { SYS_ISDBS },
+ .info = {
+ .name = "Toshiba TC90522 ISDB-S module",
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_AUTO |
+ FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO,
+ },
+
+ .init = tc90522_init,
+ .sleep = tc90522_sleep,
+ .set_frontend = tc90522_set_frontend,
+ .get_tune_settings = tc90522_get_tune_settings,
+
+ .get_frontend = tc90522s_get_frontend,
+ .read_status = tc90522s_read_status,
+};
+
+static const struct dvb_frontend_ops tc90522_ops_ter = {
+ .delsys = { SYS_ISDBT },
+ .info = {
+ .name = "Toshiba TC90522 ISDB-T module",
+ .frequency_min = 470000000,
+ .frequency_max = 770000000,
+ .frequency_stepsize = 142857,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
+ FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER |
+ FE_CAN_HIERARCHY_AUTO,
+ },
+
+ .init = tc90522_init,
+ .sleep = tc90522_sleep,
+ .set_frontend = tc90522_set_frontend,
+ .get_tune_settings = tc90522_get_tune_settings,
+
+ .get_frontend = tc90522t_get_frontend,
+ .read_status = tc90522t_read_status,
+};
+
+
+static int tc90522_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tc90522_state *state;
+ struct tc90522_config *cfg;
+ const struct dvb_frontend_ops *ops;
+ struct i2c_adapter *adap;
+ int ret;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+ state->i2c_client = client;
+
+ cfg = client->dev.platform_data;
+ memcpy(&state->cfg, cfg, sizeof(state->cfg));
+ cfg->fe = state->cfg.fe = &state->fe;
+ ops = id->driver_data == 0 ? &tc90522_ops_sat : &tc90522_ops_ter;
+ memcpy(&state->fe.ops, ops, sizeof(*ops));
+ state->fe.demodulator_priv = state;
+
+ adap = &state->tuner_i2c;
+ adap->owner = THIS_MODULE;
+ adap->algo = &tc90522_tuner_i2c_algo;
+ adap->dev.parent = &client->dev;
+ strlcpy(adap->name, "tc90522_sub", sizeof(adap->name));
+ i2c_set_adapdata(adap, state);
+ ret = i2c_add_adapter(adap);
+ if (ret < 0)
+ goto err;
+ cfg->tuner_i2c = state->cfg.tuner_i2c = adap;
+
+ i2c_set_clientdata(client, &state->cfg);
+ dev_info(&client->dev, "Toshiba TC90522 attached.\n");
+ return 0;
+
+err:
+ kfree(state);
+ return ret;
+}
+
+static int tc90522_remove(struct i2c_client *client)
+{
+ struct tc90522_state *state;
+
+ state = cfg_to_state(i2c_get_clientdata(client));
+ i2c_del_adapter(&state->tuner_i2c);
+ kfree(state);
+ return 0;
+}
+
+
+static const struct i2c_device_id tc90522_id[] = {
+ { TC90522_I2C_DEV_SAT, 0 },
+ { TC90522_I2C_DEV_TER, 1 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tc90522_id);
+
+static struct i2c_driver tc90522_driver = {
+ .driver = {
+ .name = "tc90522",
+ },
+ .probe = tc90522_probe,
+ .remove = tc90522_remove,
+ .id_table = tc90522_id,
+};
+
+module_i2c_driver(tc90522_driver);
+
+MODULE_DESCRIPTION("Toshiba TC90522 frontend");
+MODULE_AUTHOR("Akihiro TSUKADA");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/tc90522.h b/drivers/media/dvb-frontends/tc90522.h
new file mode 100644
index 000000000000..b1cbddfa6ee6
--- /dev/null
+++ b/drivers/media/dvb-frontends/tc90522.h
@@ -0,0 +1,42 @@
+/*
+ * Toshiba TC90522 Demodulator
+ *
+ * Copyright (C) 2014 Akihiro Tsukada <tskd08@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * The demod has 4 input (2xISDB-T and 2xISDB-S),
+ * and provides independent sub modules for each input.
+ * As the sub modules work in parallel and have the separate i2c addr's,
+ * this driver treats each sub module as one demod device.
+ */
+
+#ifndef TC90522_H
+#define TC90522_H
+
+#include <linux/i2c.h>
+#include "dvb_frontend.h"
+
+/* I2C device types */
+#define TC90522_I2C_DEV_SAT "tc90522sat"
+#define TC90522_I2C_DEV_TER "tc90522ter"
+
+struct tc90522_config {
+ /* [OUT] frontend returned by driver */
+ struct dvb_frontend *fe;
+
+ /* [OUT] tuner I2C adapter returned by driver */
+ struct i2c_adapter *tuner_i2c;
+};
+
+#endif /* TC90522_H */
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 9619be5d4827..4a19b85995f1 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -1037,7 +1037,7 @@ static int tda10071_init(struct dvb_frontend *fe)
ret = -EFAULT;
goto error;
} else {
- priv->warm = 1;
+ priv->warm = true;
}
cmd.args[0] = CMD_GET_FW_VERSION;
diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
index 91b6b2e9b792..ee09ec26c553 100644
--- a/drivers/media/dvb-frontends/zl10039.c
+++ b/drivers/media/dvb-frontends/zl10039.c
@@ -111,7 +111,7 @@ static int zl10039_write(struct zl10039_state *state,
if (1 + count > sizeof(buf)) {
printk(KERN_WARNING
- "%s: i2c wr reg=%04x: len=%zd is too big!\n",
+ "%s: i2c wr reg=%04x: len=%zu is too big!\n",
KBUILD_MODNAME, reg, count);
return -EINVAL;
}
diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
index d1a1a1324ef8..251a556112a9 100644
--- a/drivers/media/firewire/firedtv-avc.c
+++ b/drivers/media/firewire/firedtv-avc.c
@@ -1157,6 +1157,10 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
dev_err(fdtv->device,
"invalid pmt_cmd_id %d\n", pmt_cmd_id);
+ if (program_info_length > sizeof(c->operand) - 4 - write_pos) {
+ ret = -EINVAL;
+ goto out;
+ }
memcpy(&c->operand[write_pos], &msg[read_pos],
program_info_length);
@@ -1180,6 +1184,12 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
dev_err(fdtv->device, "invalid pmt_cmd_id %d "
"at stream level\n", pmt_cmd_id);
+ if (es_info_length > sizeof(c->operand) - 4 -
+ write_pos) {
+ ret = -EINVAL;
+ goto out;
+ }
+
memcpy(&c->operand[write_pos], &msg[read_pos],
es_info_length);
read_pos += es_info_length;
diff --git a/drivers/media/i2c/adv7343_regs.h b/drivers/media/i2c/adv7343_regs.h
index 446606764346..2f04ce4b9118 100644
--- a/drivers/media/i2c/adv7343_regs.h
+++ b/drivers/media/i2c/adv7343_regs.h
@@ -13,7 +13,7 @@
* GNU General Public License for more details.
*/
-#ifndef ADV7343_REG_H
+#ifndef ADV7343_REGS_H
#define ADV7343_REGS_H
struct adv7343_std_info {
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index de88b980a837..47795ff71688 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1593,7 +1593,7 @@ static int adv7604_query_dv_timings(struct v4l2_subdev *sd,
bt->height += hdmi_read16(sd, 0x0b, 0xfff);
bt->il_vfrontporch = hdmi_read16(sd, 0x2c, 0x1fff) / 2;
bt->il_vsync = hdmi_read16(sd, 0x30, 0x1fff) / 2;
- bt->vbackporch = hdmi_read16(sd, 0x34, 0x1fff) / 2;
+ bt->il_vbackporch = hdmi_read16(sd, 0x34, 0x1fff) / 2;
}
adv7604_fill_optional_dv_timings_fields(sd, timings);
} else {
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 0d554919cdd5..48b628bc6714 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -1435,6 +1435,8 @@ static int adv7842_query_dv_timings(struct v4l2_subdev *sd,
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+ memset(timings, 0, sizeof(struct v4l2_dv_timings));
+
/* SDP block */
if (state->mode == ADV7842_MODE_SDP)
return -ENODATA;
@@ -1483,7 +1485,7 @@ static int adv7842_query_dv_timings(struct v4l2_subdev *sd,
hdmi_read(sd, 0x2d)) / 2;
bt->il_vsync = ((hdmi_read(sd, 0x30) & 0x1f) * 256 +
hdmi_read(sd, 0x31)) / 2;
- bt->vbackporch = ((hdmi_read(sd, 0x34) & 0x1f) * 256 +
+ bt->il_vbackporch = ((hdmi_read(sd, 0x34) & 0x1f) * 256 +
hdmi_read(sd, 0x35)) / 2;
}
adv7842_fill_optional_dv_timings_fields(sd, timings);
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
index c23de593c17d..d9ece4b2d047 100644
--- a/drivers/media/i2c/lm3560.c
+++ b/drivers/media/i2c/lm3560.c
@@ -100,14 +100,14 @@ static int lm3560_enable_ctrl(struct lm3560_flash *flash,
int rval;
if (led_no == LM3560_LED0) {
- if (on == true)
+ if (on)
rval = regmap_update_bits(flash->regmap,
REG_ENABLE, 0x08, 0x08);
else
rval = regmap_update_bits(flash->regmap,
REG_ENABLE, 0x08, 0x00);
} else {
- if (on == true)
+ if (on)
rval = regmap_update_bits(flash->regmap,
REG_ENABLE, 0x10, 0x10);
else
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index cdd7c1b7259b..dd3db2458a4f 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -19,6 +19,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-mediabus.h>
+#include <media/v4l2-image-sizes.h>
#include <media/ov7670.h>
MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
@@ -30,19 +31,6 @@ module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
/*
- * Basic window sizes. These probably belong somewhere more globally
- * useful.
- */
-#define VGA_WIDTH 640
-#define VGA_HEIGHT 480
-#define QVGA_WIDTH 320
-#define QVGA_HEIGHT 240
-#define CIF_WIDTH 352
-#define CIF_HEIGHT 288
-#define QCIF_WIDTH 176
-#define QCIF_HEIGHT 144
-
-/*
* The 7670 sits on i2c with ID 0x42
*/
#define OV7670_I2C_ADDR 0x42
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 564f05f2c9ef..0e461a6fd065 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -816,7 +816,7 @@ static void s5k5baf_hw_find_min_fiv(struct s5k5baf *state)
"error setting frame interval: %d\n", err);
state->error = -EINVAL;
}
- };
+ }
v4l2_err(&state->sd, "cannot find correct frame interval\n");
state->error = -ERANGE;
}
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index 04e9e55018a5..4024ea6f1371 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -660,7 +660,7 @@ static const struct v4l2_subdev_ops saa6752hs_ops = {
static int saa6752hs_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct saa6752hs_state *h = kzalloc(sizeof(*h), GFP_KERNEL);
+ struct saa6752hs_state *h;
struct v4l2_subdev *sd;
struct v4l2_ctrl_handler *hdl;
u8 addr = 0x13;
@@ -668,6 +668,8 @@ static int saa6752hs_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
+
+ h = devm_kzalloc(&client->dev, sizeof(*h), GFP_KERNEL);
if (h == NULL)
return -ENOMEM;
sd = &h->sd;
@@ -752,7 +754,6 @@ static int saa6752hs_probe(struct i2c_client *client,
int err = hdl->error;
v4l2_ctrl_handler_free(hdl);
- kfree(h);
return err;
}
v4l2_ctrl_cluster(3, &h->video_bitrate_mode);
@@ -767,7 +768,6 @@ static int saa6752hs_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&to_state(sd)->hdl);
- kfree(to_state(sd));
return 0;
}
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 62acb10630f9..932ed9be9ff3 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -31,8 +31,9 @@
#include <linux/device.h>
#include <linux/gpio.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/smiapp.h>
#include <linux/v4l2-mediabus.h>
#include <media/v4l2-device.h>
@@ -297,8 +298,9 @@ static int smiapp_pll_update(struct smiapp_sensor *sensor)
if (rval < 0)
return rval;
- *sensor->pixel_rate_parray->p_cur.p_s64 = pll->vt_pix_clk_freq_hz;
- *sensor->pixel_rate_csi->p_cur.p_s64 = pll->pixel_rate_csi;
+ __v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate_parray,
+ pll->vt_pix_clk_freq_hz);
+ __v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate_csi, pll->pixel_rate_csi);
return 0;
}
@@ -319,13 +321,7 @@ static void __smiapp_update_exposure_limits(struct smiapp_sensor *sensor)
+ sensor->vblank->val
- sensor->limits[SMIAPP_LIMIT_COARSE_INTEGRATION_TIME_MAX_MARGIN];
- ctrl->maximum = max;
- if (ctrl->default_value > max)
- ctrl->default_value = max;
- if (ctrl->val > max)
- ctrl->val = max;
- if (ctrl->cur.val > max)
- ctrl->cur.val = max;
+ __v4l2_ctrl_modify_range(ctrl, ctrl->minimum, max, ctrl->step, max);
}
/*
@@ -404,6 +400,14 @@ static void smiapp_update_mbus_formats(struct smiapp_sensor *sensor)
pixel_order_str[pixel_order]);
}
+static const char * const smiapp_test_patterns[] = {
+ "Disabled",
+ "Solid Colour",
+ "Eight Vertical Colour Bars",
+ "Colour Bars With Fade to Grey",
+ "Pseudorandom Sequence (PN9)",
+};
+
static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct smiapp_sensor *sensor =
@@ -477,6 +481,39 @@ static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
return smiapp_pll_update(sensor);
+ case V4L2_CID_TEST_PATTERN: {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sensor->test_data); i++)
+ v4l2_ctrl_activate(
+ sensor->test_data[i],
+ ctrl->val ==
+ V4L2_SMIAPP_TEST_PATTERN_MODE_SOLID_COLOUR);
+
+ return smiapp_write(
+ sensor, SMIAPP_REG_U16_TEST_PATTERN_MODE, ctrl->val);
+ }
+
+ case V4L2_CID_TEST_PATTERN_RED:
+ return smiapp_write(
+ sensor, SMIAPP_REG_U16_TEST_DATA_RED, ctrl->val);
+
+ case V4L2_CID_TEST_PATTERN_GREENR:
+ return smiapp_write(
+ sensor, SMIAPP_REG_U16_TEST_DATA_GREENR, ctrl->val);
+
+ case V4L2_CID_TEST_PATTERN_BLUE:
+ return smiapp_write(
+ sensor, SMIAPP_REG_U16_TEST_DATA_BLUE, ctrl->val);
+
+ case V4L2_CID_TEST_PATTERN_GREENB:
+ return smiapp_write(
+ sensor, SMIAPP_REG_U16_TEST_DATA_GREENB, ctrl->val);
+
+ case V4L2_CID_PIXEL_RATE:
+ /* For v4l2_ctrl_s_ctrl_int64() used internally. */
+ return 0;
+
default:
return -EINVAL;
}
@@ -489,10 +526,10 @@ static const struct v4l2_ctrl_ops smiapp_ctrl_ops = {
static int smiapp_init_controls(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- unsigned int max;
+ unsigned int max, i;
int rval;
- rval = v4l2_ctrl_handler_init(&sensor->pixel_array->ctrl_handler, 7);
+ rval = v4l2_ctrl_handler_init(&sensor->pixel_array->ctrl_handler, 12);
if (rval)
return rval;
sensor->pixel_array->ctrl_handler.lock = &sensor->mutex;
@@ -535,6 +572,20 @@ static int smiapp_init_controls(struct smiapp_sensor *sensor)
&sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
V4L2_CID_PIXEL_RATE, 1, INT_MAX, 1, 1);
+ v4l2_ctrl_new_std_menu_items(&sensor->pixel_array->ctrl_handler,
+ &smiapp_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(smiapp_test_patterns) - 1,
+ 0, 0, smiapp_test_patterns);
+
+ for (i = 0; i < ARRAY_SIZE(sensor->test_data); i++) {
+ int max_value = (1 << sensor->csi_format->width) - 1;
+ sensor->test_data[i] =
+ v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler,
+ &smiapp_ctrl_ops, V4L2_CID_TEST_PATTERN_RED + i,
+ 0, max_value, 1, max_value);
+ }
+
if (sensor->pixel_array->ctrl_handler.error) {
dev_err(&client->dev,
"pixel array controls initialization failed (%d)\n",
@@ -782,36 +833,25 @@ static void smiapp_update_blanking(struct smiapp_sensor *sensor)
{
struct v4l2_ctrl *vblank = sensor->vblank;
struct v4l2_ctrl *hblank = sensor->hblank;
+ int min, max;
- vblank->minimum =
- max_t(int,
- sensor->limits[SMIAPP_LIMIT_MIN_FRAME_BLANKING_LINES],
- sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN] -
- sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height);
- vblank->maximum =
- sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN] -
+ min = max_t(int,
+ sensor->limits[SMIAPP_LIMIT_MIN_FRAME_BLANKING_LINES],
+ sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN] -
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height);
+ max = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN] -
sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height;
- vblank->val = clamp_t(int, vblank->val,
- vblank->minimum, vblank->maximum);
- vblank->default_value = vblank->minimum;
- vblank->val = vblank->val;
- vblank->cur.val = vblank->val;
-
- hblank->minimum =
- max_t(int,
- sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN] -
- sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width,
- sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN]);
- hblank->maximum =
- sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN] -
+ __v4l2_ctrl_modify_range(vblank, min, max, vblank->step, min);
+
+ min = max_t(int,
+ sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN] -
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width,
+ sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN]);
+ max = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN] -
sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width;
- hblank->val = clamp_t(int, hblank->val,
- hblank->minimum, hblank->maximum);
- hblank->default_value = hblank->minimum;
- hblank->val = hblank->val;
- hblank->cur.val = hblank->val;
+ __v4l2_ctrl_modify_range(hblank, min, max, hblank->step, min);
__smiapp_update_exposure_limits(sensor);
}
@@ -1272,7 +1312,7 @@ static void smiapp_power_off(struct smiapp_sensor *sensor)
clk_disable_unprepare(sensor->ext_clk);
usleep_range(5000, 5000);
regulator_disable(sensor->vana);
- sensor->streaming = 0;
+ sensor->streaming = false;
}
static int smiapp_set_power(struct v4l2_subdev *subdev, int on)
@@ -1462,13 +1502,13 @@ static int smiapp_set_stream(struct v4l2_subdev *subdev, int enable)
return 0;
if (enable) {
- sensor->streaming = 1;
+ sensor->streaming = true;
rval = smiapp_start_streaming(sensor);
if (rval < 0)
- sensor->streaming = 0;
+ sensor->streaming = false;
} else {
rval = smiapp_stop_streaming(sensor);
- sensor->streaming = 0;
+ sensor->streaming = false;
}
return rval;
@@ -1664,17 +1704,34 @@ static int smiapp_set_format(struct v4l2_subdev *subdev,
if (fmt->pad == ssd->source_pad) {
u32 code = fmt->format.code;
int rval = __smiapp_get_format(subdev, fh, fmt);
+ bool range_changed = false;
+ unsigned int i;
if (!rval && subdev == &sensor->src->sd) {
const struct smiapp_csi_data_format *csi_format =
smiapp_validate_csi_data_format(sensor, code);
- if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ if (csi_format->width !=
+ sensor->csi_format->width)
+ range_changed = true;
+
sensor->csi_format = csi_format;
+ }
+
fmt->format.code = csi_format->code;
}
mutex_unlock(&sensor->mutex);
- return rval;
+ if (rval || !range_changed)
+ return rval;
+
+ for (i = 0; i < ARRAY_SIZE(sensor->test_data); i++)
+ v4l2_ctrl_modify_range(
+ sensor->test_data[i],
+ 0, (1 << sensor->csi_format->width) - 1, 1, 0);
+
+ return 0;
}
/* Sink pad. Width and height are changeable here. */
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index 7cc5aae662fd..874b49ffd88f 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -54,6 +54,8 @@
(1000 + (SMIAPP_RESET_DELAY_CLOCKS * 1000 \
+ (clk) / 1000 - 1) / ((clk) / 1000))
+#define SMIAPP_COLOUR_COMPONENTS 4
+
#include "smiapp-limits.h"
struct smiapp_quirk;
@@ -241,6 +243,8 @@ struct smiapp_sensor {
/* src controls */
struct v4l2_ctrl *link_freq;
struct v4l2_ctrl *pixel_rate_csi;
+ /* test pattern colour components */
+ struct v4l2_ctrl *test_data[SMIAPP_COLOUR_COMPONENTS];
};
#define to_smiapp_subdev(_sd) \
diff --git a/drivers/media/i2c/soc_camera/mt9t112.c b/drivers/media/i2c/soc_camera/mt9t112.c
index 46f431a13782..996d7b4007a5 100644
--- a/drivers/media/i2c/soc_camera/mt9t112.c
+++ b/drivers/media/i2c/soc_camera/mt9t112.c
@@ -29,6 +29,7 @@
#include <media/soc_camera.h>
#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-image-sizes.h>
/* you can check PLL/clock info */
/* #define EXT_CLOCK 24000000 */
@@ -42,9 +43,6 @@
#define MAX_WIDTH 2048
#define MAX_HEIGHT 1536
-#define VGA_WIDTH 640
-#define VGA_HEIGHT 480
-
/*
* macro of read/write
*/
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index 7f2b3c8926af..970a04e1e56e 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -29,6 +29,7 @@
#include <media/v4l2-clk.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-subdev.h>
+#include <media/v4l2-image-sizes.h>
/*
* register offset
@@ -360,10 +361,6 @@
#define SCAL0_ACTRL 0x08 /* Auto scaling factor control */
#define SCAL1_2_ACTRL 0x04 /* Auto scaling factor control */
-#define VGA_WIDTH 640
-#define VGA_HEIGHT 480
-#define QVGA_WIDTH 320
-#define QVGA_HEIGHT 240
#define OV772X_MAX_WIDTH VGA_WIDTH
#define OV772X_MAX_HEIGHT VGA_HEIGHT
diff --git a/drivers/media/i2c/soc_camera/ov9740.c b/drivers/media/i2c/soc_camera/ov9740.c
index ea76863dfdb4..ee9eb635d540 100644
--- a/drivers/media/i2c/soc_camera/ov9740.c
+++ b/drivers/media/i2c/soc_camera/ov9740.c
@@ -564,13 +564,13 @@ static int ov9740_set_res(struct i2c_client *client, u32 width, u32 height)
u32 y_start;
u32 x_end;
u32 y_end;
- bool scaling = 0;
+ bool scaling = false;
u32 scale_input_x;
u32 scale_input_y;
int ret;
if ((width != OV9740_MAX_WIDTH) || (height != OV9740_MAX_HEIGHT))
- scaling = 1;
+ scaling = true;
/*
* Try to use as much of the sensor area as possible when supporting
diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
index 72af644fa051..cf93021a6500 100644
--- a/drivers/media/i2c/tda7432.c
+++ b/drivers/media/i2c/tda7432.c
@@ -293,7 +293,7 @@ static int tda7432_s_ctrl(struct v4l2_ctrl *ctrl)
if (t->mute->val) {
lf |= TDA7432_MUTE;
lr |= TDA7432_MUTE;
- lf |= TDA7432_MUTE;
+ rf |= TDA7432_MUTE;
rr |= TDA7432_MUTE;
}
/* Mute & update balance*/
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 11f2387e1dab..51bac762638b 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -775,25 +775,20 @@ static int tvp7002_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
static int tvp7002_s_stream(struct v4l2_subdev *sd, int enable)
{
struct tvp7002 *device = to_tvp7002(sd);
- int error = 0;
+ int error;
if (device->streaming == enable)
return 0;
- if (enable) {
- /* Set output state on (low impedance means stream on) */
- error = tvp7002_write(sd, TVP7002_MISC_CTL_2, 0x00);
- device->streaming = enable;
- } else {
- /* Set output state off (high impedance means stream off) */
- error = tvp7002_write(sd, TVP7002_MISC_CTL_2, 0x03);
- if (error)
- v4l2_dbg(1, debug, sd, "Unable to stop streaming\n");
-
- device->streaming = enable;
+ /* low impedance: on, high impedance: off */
+ error = tvp7002_write(sd, TVP7002_MISC_CTL_2, enable ? 0x00 : 0x03);
+ if (error) {
+ v4l2_dbg(1, debug, sd, "Fail to set streaming\n");
+ return error;
}
- return error;
+ device->streaming = enable;
+ return 0;
}
/*
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index 23f4f65fccd7..373f2df52492 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -30,22 +30,10 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mediabus.h>
+#include <media/v4l2-image-sizes.h>
#include "vs6624_regs.h"
-#define VGA_WIDTH 640
-#define VGA_HEIGHT 480
-#define QVGA_WIDTH 320
-#define QVGA_HEIGHT 240
-#define QQVGA_WIDTH 160
-#define QQVGA_HEIGHT 120
-#define CIF_WIDTH 352
-#define CIF_HEIGHT 288
-#define QCIF_WIDTH 176
-#define QCIF_HEIGHT 144
-#define QQCIF_WIDTH 88
-#define QQCIF_HEIGHT 72
-
#define MAX_FRAME_RATE 30
struct vs6624 {
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 73a432934bd8..7b39440192d6 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -103,10 +103,8 @@ static long media_device_enum_entities(struct media_device *mdev,
return -EINVAL;
u_ent.id = ent->id;
- if (ent->name) {
- strncpy(u_ent.name, ent->name, sizeof(u_ent.name));
- u_ent.name[sizeof(u_ent.name) - 1] = '\0';
- }
+ if (ent->name)
+ strlcpy(u_ent.name, ent->name, sizeof(u_ent.name));
u_ent.type = ent->type;
u_ent.revision = ent->revision;
u_ent.flags = ent->flags;
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index 7acd19c881de..ebf9626e5ae5 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -192,7 +192,6 @@ static int media_open(struct inode *inode, struct file *filp)
static int media_release(struct inode *inode, struct file *filp)
{
struct media_devnode *mdev = media_devnode_data(filp);
- int ret = 0;
if (mdev->fops->release)
mdev->fops->release(filp);
@@ -201,7 +200,7 @@ static int media_release(struct inode *inode, struct file *filp)
return value is ignored. */
put_device(&mdev->dev);
filp->private_data = NULL;
- return ret;
+ return 0;
}
static const struct file_operations media_devnode_fops = {
diff --git a/drivers/media/parport/pms.c b/drivers/media/parport/pms.c
index 9bc105b3db1b..e6b497528cea 100644
--- a/drivers/media/parport/pms.c
+++ b/drivers/media/parport/pms.c
@@ -629,11 +629,15 @@ static int pms_capture(struct pms *dev, char __user *buf, int rgb555, int count)
{
int y;
int dw = 2 * dev->width;
- char tmp[dw + 32]; /* using a temp buffer is faster than direct */
+ char *tmp; /* using a temp buffer is faster than direct */
int cnt = 0;
int len = 0;
unsigned char r8 = 0x5; /* value for reg8 */
+ tmp = kmalloc(dw + 32, GFP_KERNEL);
+ if (!tmp)
+ return 0;
+
if (rgb555)
r8 |= 0x20; /* else use untranslated rgb = 565 */
mvv_write(dev, 0x08, r8); /* capture rgb555/565, init DRAM, PC enable */
@@ -664,6 +668,7 @@ static int pms_capture(struct pms *dev, char __user *buf, int rgb555, int count)
len += dt;
}
}
+ kfree(tmp);
return len;
}
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index 5c16c9c2203e..f8cec8e8cf82 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -20,6 +20,7 @@ source "drivers/media/pci/ivtv/Kconfig"
source "drivers/media/pci/zoran/Kconfig"
source "drivers/media/pci/saa7146/Kconfig"
source "drivers/media/pci/solo6x10/Kconfig"
+source "drivers/media/pci/tw68/Kconfig"
endif
if MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT
@@ -41,6 +42,7 @@ source "drivers/media/pci/b2c2/Kconfig"
source "drivers/media/pci/pluto2/Kconfig"
source "drivers/media/pci/dm1105/Kconfig"
source "drivers/media/pci/pt1/Kconfig"
+source "drivers/media/pci/pt3/Kconfig"
source "drivers/media/pci/mantis/Kconfig"
source "drivers/media/pci/ngene/Kconfig"
source "drivers/media/pci/ddbridge/Kconfig"
diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile
index e5b53fb569ef..a12926e4b51f 100644
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -7,10 +7,10 @@ obj-y += ttpci/ \
pluto2/ \
dm1105/ \
pt1/ \
+ pt3/ \
mantis/ \
ngene/ \
ddbridge/ \
- b2c2/ \
saa7146/
obj-$(CONFIG_VIDEO_IVTV) += ivtv/
@@ -22,6 +22,7 @@ obj-$(CONFIG_VIDEO_CX88) += cx88/
obj-$(CONFIG_VIDEO_BT848) += bt8xx/
obj-$(CONFIG_VIDEO_SAA7134) += saa7134/
obj-$(CONFIG_VIDEO_SAA7164) += saa7164/
+obj-$(CONFIG_VIDEO_TW68) += tw68/
obj-$(CONFIG_VIDEO_MEYE) += meye/
obj-$(CONFIG_STA2X11_VIP) += sta2x11/
obj-$(CONFIG_VIDEO_SOLO6X10) += solo6x10/
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 970e542d3a51..4a8176c09fc9 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -1531,7 +1531,6 @@ bttv_switch_overlay(struct bttv *btv, struct bttv_fh *fh,
{
struct bttv_buffer *old;
unsigned long flags;
- int retval = 0;
dprintk("switch_overlay: enter [new=%p]\n", new);
if (new)
@@ -1551,7 +1550,7 @@ bttv_switch_overlay(struct bttv *btv, struct bttv_fh *fh,
if (NULL == new)
free_btres_lock(btv,fh,RESOURCE_OVERLAY);
dprintk("switch_overlay: done\n");
- return retval;
+ return 0;
}
/* ----------------------------------------------------------------------- */
@@ -3856,7 +3855,7 @@ static irqreturn_t bttv_irq(int irq, void *dev_id)
btwrite(btread(BT848_INT_MASK) & (-1 ^ BT848_INT_GPINT),
BT848_INT_MASK);
- };
+ }
bttv_print_irqbits(stat,astat);
diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c
index 0e788fca992c..c22c4ae06844 100644
--- a/drivers/media/pci/bt8xx/dst_ca.c
+++ b/drivers/media/pci/bt8xx/dst_ca.c
@@ -674,11 +674,9 @@ static int dst_ca_release(struct inode *inode, struct file *file)
static ssize_t dst_ca_read(struct file *file, char __user *buffer, size_t length, loff_t *offset)
{
- ssize_t bytes_read = 0;
-
dprintk(verbose, DST_CA_DEBUG, 1, " Device read.");
- return bytes_read;
+ return 0;
}
static ssize_t dst_ca_write(struct file *file, const char __user *buffer, size_t length, loff_t *offset)
diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.c b/drivers/media/pci/cx18/cx18-alsa-pcm.c
index 180077c49123..ffb6acdc575f 100644
--- a/drivers/media/pci/cx18/cx18-alsa-pcm.c
+++ b/drivers/media/pci/cx18/cx18-alsa-pcm.c
@@ -80,7 +80,7 @@ void cx18_alsa_announce_pcm_data(struct snd_cx18_card *cxsc, u8 *pcm_data,
int period_elapsed = 0;
int length;
- dprintk("cx18 alsa announce ptr=%p data=%p num_bytes=%zd\n", cxsc,
+ dprintk("cx18 alsa announce ptr=%p data=%p num_bytes=%zu\n", cxsc,
pcm_data, num_bytes);
substream = cxsc->capture_pcm_substream;
diff --git a/drivers/media/pci/cx18/cx18-firmware.c b/drivers/media/pci/cx18/cx18-firmware.c
index a1c1cec05f98..c6c83445f8bf 100644
--- a/drivers/media/pci/cx18/cx18-firmware.c
+++ b/drivers/media/pci/cx18/cx18-firmware.c
@@ -130,7 +130,7 @@ static int load_cpu_fw_direct(const char *fn, u8 __iomem *mem, struct cx18 *cx)
}
}
if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags))
- CX18_INFO("loaded %s firmware (%zd bytes)\n", fn, fw->size);
+ CX18_INFO("loaded %s firmware (%zu bytes)\n", fn, fw->size);
size = fw->size;
release_firmware(fw);
cx18_setup_page(cx, SCB_OFFSET);
@@ -164,7 +164,7 @@ static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx,
apu_version = (vers[0] << 24) | (vers[4] << 16) | vers[32];
while (offset + sizeof(seghdr) < fw->size) {
- const u32 *shptr = src + offset / 4;
+ const __le32 *shptr = (__force __le32 *)src + offset / 4;
seghdr.sync1 = le32_to_cpu(shptr[0]);
seghdr.sync2 = le32_to_cpu(shptr[1]);
@@ -202,7 +202,7 @@ static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx,
offset += seghdr.size;
}
if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags))
- CX18_INFO("loaded %s firmware V%08x (%zd bytes)\n",
+ CX18_INFO("loaded %s firmware V%08x (%zu bytes)\n",
fn, apu_version, fw->size);
size = fw->size;
release_firmware(fw);
diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c
index 8884537bd62f..2a247d264b87 100644
--- a/drivers/media/pci/cx18/cx18-queue.c
+++ b/drivers/media/pci/cx18/cx18-queue.c
@@ -364,7 +364,7 @@ int cx18_stream_alloc(struct cx18_stream *s)
((char __iomem *)cx->scb->cpu_mdl));
CX18_ERR("Too many buffers, cannot fit in SCB area\n");
- CX18_ERR("Max buffers = %zd\n",
+ CX18_ERR("Max buffers = %zu\n",
bufsz / sizeof(struct cx18_mdl_ent));
return -ENOMEM;
}
diff --git a/drivers/media/pci/cx23885/Kconfig b/drivers/media/pci/cx23885/Kconfig
index e12c006e6e2d..f613314b360b 100644
--- a/drivers/media/pci/cx23885/Kconfig
+++ b/drivers/media/pci/cx23885/Kconfig
@@ -3,12 +3,11 @@ config VIDEO_CX23885
depends on DVB_CORE && VIDEO_DEV && PCI && I2C && INPUT && SND
select SND_PCM
select I2C_ALGOBIT
- select VIDEO_BTCX
select VIDEO_TUNER
select VIDEO_TVEEPROM
depends on RC_CORE
- select VIDEOBUF_DVB
- select VIDEOBUF_DMA_SG
+ select VIDEOBUF2_DVB
+ select VIDEOBUF2_DMA_SG
select VIDEO_CX25840
select VIDEO_CX2341X
select DVB_DIB7000P if MEDIA_SUBDRV_AUTOSELECT
@@ -32,12 +31,16 @@ config VIDEO_CX23885
select DVB_A8293 if MEDIA_SUBDRV_AUTOSELECT
select DVB_MB86A20S if MEDIA_SUBDRV_AUTOSELECT
select DVB_SI2165 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MT2063 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MT2131 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_XC2028 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA8290 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18271 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
select DVB_TUNER_DIB0070 if MEDIA_SUBDRV_AUTOSELECT
---help---
This is a video4linux driver for Conexant 23885 based
diff --git a/drivers/media/pci/cx23885/Makefile b/drivers/media/pci/cx23885/Makefile
index 2a2cafb8cf5b..a2cbdcf15a8c 100644
--- a/drivers/media/pci/cx23885/Makefile
+++ b/drivers/media/pci/cx23885/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_VIDEO_CX23885) += cx23885.o
obj-$(CONFIG_MEDIA_ALTERA_CI) += altera-ci.o
ccflags-y += -Idrivers/media/i2c
-ccflags-y += -Idrivers/media/common
ccflags-y += -Idrivers/media/tuners
ccflags-y += -Idrivers/media/dvb-core
ccflags-y += -Idrivers/media/dvb-frontends
diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c
index 2926f7fadccd..2bbbf545b042 100644
--- a/drivers/media/pci/cx23885/altera-ci.c
+++ b/drivers/media/pci/cx23885/altera-ci.c
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -52,8 +48,8 @@
* | DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0|
* +-------+-------+-------+-------+-------+-------+-------+-------+
*/
-#include <media/videobuf-dma-sg.h>
-#include <media/videobuf-dvb.h>
+#include <dvb_demux.h>
+#include <dvb_frontend.h>
#include "altera-ci.h"
#include "dvb_ca_en50221.h"
diff --git a/drivers/media/pci/cx23885/altera-ci.h b/drivers/media/pci/cx23885/altera-ci.h
index 4998c96caebe..5028f0cf83f4 100644
--- a/drivers/media/pci/cx23885/altera-ci.h
+++ b/drivers/media/pci/cx23885/altera-ci.h
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __ALTERA_CI_H
#define __ALTERA_CI_H
diff --git a/drivers/media/pci/cx23885/cimax2.c b/drivers/media/pci/cx23885/cimax2.c
index 16fa7ea4d4aa..631e4f24aea6 100644
--- a/drivers/media/pci/cx23885/cimax2.c
+++ b/drivers/media/pci/cx23885/cimax2.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "cx23885.h"
diff --git a/drivers/media/pci/cx23885/cimax2.h b/drivers/media/pci/cx23885/cimax2.h
index 518744a4c8a5..565e958f6f8d 100644
--- a/drivers/media/pci/cx23885/cimax2.h
+++ b/drivers/media/pci/cx23885/cimax2.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef CIMAX2_H
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index bf89fc88692e..3948db386fb5 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -18,10 +18,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -865,6 +861,11 @@ static int cx23885_api_cmd(struct cx23885_dev *dev,
return err;
}
+static int cx23885_api_func(void *priv, u32 cmd, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA])
+{
+ return cx23885_mbox_func(priv, cmd, in, out, data);
+}
+
static int cx23885_find_mailbox(struct cx23885_dev *dev)
{
u32 signature[4] = {
@@ -941,7 +942,7 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
if (firmware->size != CX23885_FIRM_IMAGE_SIZE) {
printk(KERN_ERR "ERROR: Firmware size mismatch "
- "(have %zd, expected %d)\n",
+ "(have %zu, expected %d)\n",
firmware->size, CX23885_FIRM_IMAGE_SIZE);
release_firmware(firmware);
return -1;
@@ -1033,12 +1034,12 @@ static void cx23885_codec_settings(struct cx23885_dev *dev)
cx23885_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
dev->ts1.height, dev->ts1.width);
- dev->mpeg_params.width = dev->ts1.width;
- dev->mpeg_params.height = dev->ts1.height;
- dev->mpeg_params.is_50hz =
+ dev->cxhdl.width = dev->ts1.width;
+ dev->cxhdl.height = dev->ts1.height;
+ dev->cxhdl.is_50hz =
(dev->encodernorm.id & V4L2_STD_625_50) != 0;
- cx2341x_update(dev, cx23885_mbox_func, NULL, &dev->mpeg_params);
+ cx2341x_handler_setup(&dev->cxhdl);
cx23885_api_cmd(dev, CX2341X_ENC_MISC, 2, 0, 3, 1);
cx23885_api_cmd(dev, CX2341X_ENC_MISC, 2, 0, 4, 1);
@@ -1137,85 +1138,107 @@ static int cx23885_initialize_codec(struct cx23885_dev *dev, int startencoder)
/* ------------------------------------------------------------------ */
-static int bb_buf_setup(struct videobuf_queue *q,
- unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct cx23885_fh *fh = q->priv_data;
-
- fh->dev->ts1.ts_packet_size = mpeglinesize;
- fh->dev->ts1.ts_packet_count = mpeglines;
-
- *size = fh->dev->ts1.ts_packet_size * fh->dev->ts1.ts_packet_count;
- *count = mpegbufs;
+ struct cx23885_dev *dev = q->drv_priv;
+ dev->ts1.ts_packet_size = mpeglinesize;
+ dev->ts1.ts_packet_count = mpeglines;
+ *num_planes = 1;
+ sizes[0] = mpeglinesize * mpeglines;
+ *num_buffers = mpegbufs;
return 0;
}
-static int bb_buf_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb, enum v4l2_field field)
+static int buffer_prepare(struct vb2_buffer *vb)
{
- struct cx23885_fh *fh = q->priv_data;
- return cx23885_buf_prepare(q, &fh->dev->ts1,
- (struct cx23885_buffer *)vb,
- field);
+ struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx23885_buffer *buf =
+ container_of(vb, struct cx23885_buffer, vb);
+
+ return cx23885_buf_prepare(buf, &dev->ts1);
}
-static void bb_buf_queue(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void buffer_finish(struct vb2_buffer *vb)
{
- struct cx23885_fh *fh = q->priv_data;
- cx23885_buf_queue(&fh->dev->ts1, (struct cx23885_buffer *)vb);
+ struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx23885_buffer *buf = container_of(vb,
+ struct cx23885_buffer, vb);
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
+
+ cx23885_free_buffer(dev, buf);
+
+ dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
}
-static void bb_buf_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void buffer_queue(struct vb2_buffer *vb)
{
- cx23885_free_buffer(q, (struct cx23885_buffer *)vb);
-}
+ struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx23885_buffer *buf = container_of(vb,
+ struct cx23885_buffer, vb);
-static struct videobuf_queue_ops cx23885_qops = {
- .buf_setup = bb_buf_setup,
- .buf_prepare = bb_buf_prepare,
- .buf_queue = bb_buf_queue,
- .buf_release = bb_buf_release,
-};
+ cx23885_buf_queue(&dev->ts1, buf);
+}
-/* ------------------------------------------------------------------ */
+static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct cx23885_dev *dev = q->drv_priv;
+ struct cx23885_dmaqueue *dmaq = &dev->ts1.mpegq;
+ unsigned long flags;
+ int ret;
+
+ ret = cx23885_initialize_codec(dev, 1);
+ if (ret == 0) {
+ struct cx23885_buffer *buf = list_entry(dmaq->active.next,
+ struct cx23885_buffer, queue);
+
+ cx23885_start_dma(&dev->ts1, dmaq, buf);
+ return 0;
+ }
+ spin_lock_irqsave(&dev->slock, flags);
+ while (!list_empty(&dmaq->active)) {
+ struct cx23885_buffer *buf = list_entry(dmaq->active.next,
+ struct cx23885_buffer, queue);
-static const u32 *ctrl_classes[] = {
- cx2341x_mpeg_ctrls,
- NULL
-};
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irqrestore(&dev->slock, flags);
+ return ret;
+}
-static int cx23885_queryctrl(struct cx23885_dev *dev,
- struct v4l2_queryctrl *qctrl)
+static void cx23885_stop_streaming(struct vb2_queue *q)
{
- qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
- if (qctrl->id == 0)
- return -EINVAL;
+ struct cx23885_dev *dev = q->drv_priv;
- /* MPEG V4L2 controls */
- if (cx2341x_ctrl_query(&dev->mpeg_params, qctrl))
- qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
+ /* stop mpeg capture */
+ cx23885_api_cmd(dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
+ CX23885_END_NOW, CX23885_MPEG_CAPTURE,
+ CX23885_RAW_BITS_NONE);
- return 0;
+ msleep(500);
+ cx23885_417_check_encoder(dev);
+ cx23885_cancel_buffers(&dev->ts1);
}
-static int cx23885_querymenu(struct cx23885_dev *dev,
- struct v4l2_querymenu *qmenu)
-{
- struct v4l2_queryctrl qctrl;
+static struct vb2_ops cx23885_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_finish = buffer_finish,
+ .buf_queue = buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = cx23885_start_streaming,
+ .stop_streaming = cx23885_stop_streaming,
+};
- qctrl.id = qmenu->id;
- cx23885_queryctrl(dev, &qctrl);
- return v4l2_ctrl_query_menu(qmenu, &qctrl,
- cx2341x_ctrl_get_menu(&dev->mpeg_params, qmenu->id));
-}
+/* ------------------------------------------------------------------ */
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
*id = dev->tvnorm;
return 0;
@@ -1223,29 +1246,26 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
unsigned int i;
+ int ret;
for (i = 0; i < ARRAY_SIZE(cx23885_tvnorms); i++)
if (id & cx23885_tvnorms[i].id)
break;
if (i == ARRAY_SIZE(cx23885_tvnorms))
return -EINVAL;
- dev->encodernorm = cx23885_tvnorms[i];
- /* Have the drier core notify the subdevices */
- mutex_lock(&dev->lock);
- cx23885_set_tvnorm(dev, id);
- mutex_unlock(&dev->lock);
-
- return 0;
+ ret = cx23885_set_tvnorm(dev, id);
+ if (!ret)
+ dev->encodernorm = cx23885_tvnorms[i];
+ return ret;
}
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
dprintk(1, "%s()\n", __func__);
return cx23885_enum_input(dev, i);
}
@@ -1263,8 +1283,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
if (dev->tuner_type == TUNER_ABSENT)
return -EINVAL;
@@ -1281,8 +1300,7 @@ static int vidioc_g_tuner(struct file *file, void *priv,
static int vidioc_s_tuner(struct file *file, void *priv,
const struct v4l2_tuner *t)
{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
if (dev->tuner_type == TUNER_ABSENT)
return -EINVAL;
@@ -1296,8 +1314,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
if (dev->tuner_type == TUNER_ABSENT)
return -EINVAL;
@@ -1315,27 +1332,10 @@ static int vidioc_s_frequency(struct file *file, void *priv,
return cx23885_set_frequency(file, priv, f);
}
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctl)
-{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
-
- return cx23885_get_control(dev, ctl);
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctl)
-{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
-
- return cx23885_set_control(dev, ctl);
-}
-
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
struct cx23885_tsport *tsport = &dev->ts1;
strlcpy(cap->driver, dev->name, sizeof(cap->driver));
@@ -1368,8 +1368,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
@@ -1378,285 +1377,63 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.colorspace = 0;
f->fmt.pix.width = dev->ts1.width;
f->fmt.pix.height = dev->ts1.height;
- f->fmt.pix.field = fh->mpegq.field;
- dprintk(1, "VIDIOC_G_FMT: w: %d, h: %d, f: %d\n",
- dev->ts1.width, dev->ts1.height, fh->mpegq.field);
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ dprintk(1, "VIDIOC_G_FMT: w: %d, h: %d\n",
+ dev->ts1.width, dev->ts1.height);
return 0;
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage =
dev->ts1.ts_packet_size * dev->ts1.ts_packet_count;
f->fmt.pix.colorspace = 0;
- dprintk(1, "VIDIOC_TRY_FMT: w: %d, h: %d, f: %d\n",
- dev->ts1.width, dev->ts1.height, fh->mpegq.field);
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ dprintk(1, "VIDIOC_TRY_FMT: w: %d, h: %d\n",
+ dev->ts1.width, dev->ts1.height);
return 0;
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage =
dev->ts1.ts_packet_size * dev->ts1.ts_packet_count;
f->fmt.pix.colorspace = 0;
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
dprintk(1, "VIDIOC_S_FMT: w: %d, h: %d, f: %d\n",
f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
return 0;
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct cx23885_fh *fh = file->private_data;
-
- return videobuf_reqbufs(&fh->mpegq, p);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct cx23885_fh *fh = file->private_data;
-
- return videobuf_querybuf(&fh->mpegq, p);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct cx23885_fh *fh = file->private_data;
-
- return videobuf_qbuf(&fh->mpegq, p);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx23885_fh *fh = priv;
-
- return videobuf_dqbuf(&fh->mpegq, b, file->f_flags & O_NONBLOCK);
-}
-
-
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type i)
-{
- struct cx23885_fh *fh = file->private_data;
-
- return videobuf_streamon(&fh->mpegq);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct cx23885_fh *fh = file->private_data;
-
- return videobuf_streamoff(&fh->mpegq);
-}
-
-static int vidioc_g_ext_ctrls(struct file *file, void *priv,
- struct v4l2_ext_controls *f)
-{
- struct cx23885_fh *fh = priv;
- struct cx23885_dev *dev = fh->dev;
-
- if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
- return -EINVAL;
- return cx2341x_ext_ctrls(&dev->mpeg_params, 0, f, VIDIOC_G_EXT_CTRLS);
-}
-
-static int vidioc_s_ext_ctrls(struct file *file, void *priv,
- struct v4l2_ext_controls *f)
-{
- struct cx23885_fh *fh = priv;
- struct cx23885_dev *dev = fh->dev;
- struct cx2341x_mpeg_params p;
- int err;
-
- if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
- return -EINVAL;
-
- p = dev->mpeg_params;
- err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_S_EXT_CTRLS);
-
- if (err == 0) {
- err = cx2341x_update(dev, cx23885_mbox_func,
- &dev->mpeg_params, &p);
- dev->mpeg_params = p;
- }
- return err;
-}
-
-static int vidioc_try_ext_ctrls(struct file *file, void *priv,
- struct v4l2_ext_controls *f)
-{
- struct cx23885_fh *fh = priv;
- struct cx23885_dev *dev = fh->dev;
- struct cx2341x_mpeg_params p;
- int err;
-
- if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
- return -EINVAL;
-
- p = dev->mpeg_params;
- err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_TRY_EXT_CTRLS);
- return err;
-}
-
static int vidioc_log_status(struct file *file, void *priv)
{
- struct cx23885_fh *fh = priv;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
char name[32 + 2];
snprintf(name, sizeof(name), "%s/2", dev->name);
- printk(KERN_INFO
- "%s/2: ============ START LOG STATUS ============\n",
- dev->name);
call_all(dev, core, log_status);
- cx2341x_log_status(&dev->mpeg_params, name);
- printk(KERN_INFO
- "%s/2: ============= END LOG STATUS =============\n",
- dev->name);
+ v4l2_ctrl_handler_log_status(&dev->cxhdl.hdl, name);
return 0;
}
-static int vidioc_querymenu(struct file *file, void *priv,
- struct v4l2_querymenu *a)
-{
- struct cx23885_fh *fh = priv;
- struct cx23885_dev *dev = fh->dev;
-
- return cx23885_querymenu(dev, a);
-}
-
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *c)
-{
- struct cx23885_fh *fh = priv;
- struct cx23885_dev *dev = fh->dev;
-
- return cx23885_queryctrl(dev, c);
-}
-
-static int mpeg_open(struct file *file)
-{
- struct cx23885_dev *dev = video_drvdata(file);
- struct cx23885_fh *fh;
-
- dprintk(2, "%s()\n", __func__);
-
- /* allocate + initialize per filehandle data */
- fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (!fh)
- return -ENOMEM;
-
- file->private_data = fh;
- fh->dev = dev;
-
- videobuf_queue_sg_init(&fh->mpegq, &cx23885_qops,
- &dev->pci->dev, &dev->ts1.slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct cx23885_buffer),
- fh, NULL);
- return 0;
-}
-
-static int mpeg_release(struct file *file)
-{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
-
- dprintk(2, "%s()\n", __func__);
-
- /* FIXME: Review this crap */
- /* Shut device down on last close */
- if (atomic_cmpxchg(&fh->v4l_reading, 1, 0) == 1) {
- if (atomic_dec_return(&dev->v4l_reader_count) == 0) {
- /* stop mpeg capture */
- cx23885_api_cmd(fh->dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
- CX23885_END_NOW, CX23885_MPEG_CAPTURE,
- CX23885_RAW_BITS_NONE);
-
- msleep(500);
- cx23885_417_check_encoder(dev);
-
- cx23885_cancel_buffers(&fh->dev->ts1);
- }
- }
-
- if (fh->mpegq.streaming)
- videobuf_streamoff(&fh->mpegq);
- if (fh->mpegq.reading)
- videobuf_read_stop(&fh->mpegq);
-
- videobuf_mmap_free(&fh->mpegq);
- file->private_data = NULL;
- kfree(fh);
-
- return 0;
-}
-
-static ssize_t mpeg_read(struct file *file, char __user *data,
- size_t count, loff_t *ppos)
-{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
-
- dprintk(2, "%s()\n", __func__);
-
- /* Deal w/ A/V decoder * and mpeg encoder sync issues. */
- /* Start mpeg encoder on first read. */
- if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
- if (atomic_inc_return(&dev->v4l_reader_count) == 1) {
- if (cx23885_initialize_codec(dev, 1) < 0)
- return -EINVAL;
- }
- }
-
- return videobuf_read_stream(&fh->mpegq, data, count, ppos, 0,
- file->f_flags & O_NONBLOCK);
-}
-
-static unsigned int mpeg_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
-
- dprintk(2, "%s\n", __func__);
-
- return videobuf_poll_stream(file, &fh->mpegq, wait);
-}
-
-static int mpeg_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
-
- dprintk(2, "%s()\n", __func__);
-
- return videobuf_mmap_mapper(&fh->mpegq, vma);
-}
-
static struct v4l2_file_operations mpeg_fops = {
.owner = THIS_MODULE,
- .open = mpeg_open,
- .release = mpeg_release,
- .read = mpeg_read,
- .poll = mpeg_poll,
- .mmap = mpeg_mmap,
- .ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
};
static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
@@ -1669,25 +1446,19 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
- .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls,
- .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls,
- .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_log_status = vidioc_log_status,
- .vidioc_querymenu = vidioc_querymenu,
- .vidioc_queryctrl = vidioc_queryctrl,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_chip_info = cx23885_g_chip_info,
.vidioc_g_register = cx23885_g_register,
@@ -1711,6 +1482,7 @@ void cx23885_417_unregister(struct cx23885_dev *dev)
video_unregister_device(dev->v4l_device);
else
video_device_release(dev->v4l_device);
+ v4l2_ctrl_handler_free(&dev->cxhdl.hdl);
dev->v4l_device = NULL;
}
}
@@ -1742,6 +1514,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
/* FIXME: Port1 hardcoded here */
int err = -ENODEV;
struct cx23885_tsport *tsport = &dev->ts1;
+ struct vb2_queue *q;
dprintk(1, "%s()\n", __func__);
@@ -1757,14 +1530,36 @@ int cx23885_417_register(struct cx23885_dev *dev)
tsport->height = 576;
tsport->width = 720;
- cx2341x_fill_defaults(&dev->mpeg_params);
-
- dev->mpeg_params.port = CX2341X_PORT_SERIAL;
+ dev->cxhdl.port = CX2341X_PORT_SERIAL;
+ err = cx2341x_handler_init(&dev->cxhdl, 50);
+ if (err)
+ return err;
+ dev->cxhdl.priv = dev;
+ dev->cxhdl.func = cx23885_api_func;
+ cx2341x_handler_set_50hz(&dev->cxhdl, tsport->height == 576);
+ v4l2_ctrl_add_handler(&dev->ctrl_handler, &dev->cxhdl.hdl, NULL);
/* Allocate and initialize V4L video device */
dev->v4l_device = cx23885_video_dev_alloc(tsport,
dev->pci, &cx23885_mpeg_template, "mpeg");
+ q = &dev->vb2_mpegq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->gfp_flags = GFP_DMA32;
+ q->min_buffers_needed = 2;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx23885_buffer);
+ q->ops = &cx23885_qops;
+ q->mem_ops = &vb2_dma_sg_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &dev->lock;
+
+ err = vb2_queue_init(q);
+ if (err < 0)
+ return err;
video_set_drvdata(dev->v4l_device, dev);
+ dev->v4l_device->lock = &dev->lock;
+ dev->v4l_device->queue = q;
err = video_register_device(dev->v4l_device,
VFL_TYPE_GRABBER, -1);
if (err < 0) {
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index 554798dcedd0..ae7c2e89ad1c 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -15,10 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -84,6 +80,82 @@ MODULE_PARM_DESC(audio_debug, "enable debug messages [analog audio]");
#define AUD_INT_MCHG_IRQ (1 << 21)
#define GP_COUNT_CONTROL_RESET 0x3
+static int cx23885_alsa_dma_init(struct cx23885_audio_dev *chip, int nr_pages)
+{
+ struct cx23885_audio_buffer *buf = chip->buf;
+ struct page *pg;
+ int i;
+
+ buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
+ if (NULL == buf->vaddr) {
+ dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
+ return -ENOMEM;
+ }
+
+ dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
+ (unsigned long)buf->vaddr,
+ nr_pages << PAGE_SHIFT);
+
+ memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
+ buf->nr_pages = nr_pages;
+
+ buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
+ if (NULL == buf->sglist)
+ goto vzalloc_err;
+
+ sg_init_table(buf->sglist, buf->nr_pages);
+ for (i = 0; i < buf->nr_pages; i++) {
+ pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
+ if (NULL == pg)
+ goto vmalloc_to_page_err;
+ sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
+ }
+ return 0;
+
+vmalloc_to_page_err:
+ vfree(buf->sglist);
+ buf->sglist = NULL;
+vzalloc_err:
+ vfree(buf->vaddr);
+ buf->vaddr = NULL;
+ return -ENOMEM;
+}
+
+static int cx23885_alsa_dma_map(struct cx23885_audio_dev *dev)
+{
+ struct cx23885_audio_buffer *buf = dev->buf;
+
+ buf->sglen = dma_map_sg(&dev->pci->dev, buf->sglist,
+ buf->nr_pages, PCI_DMA_FROMDEVICE);
+
+ if (0 == buf->sglen) {
+ pr_warn("%s: cx23885_alsa_map_sg failed\n", __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int cx23885_alsa_dma_unmap(struct cx23885_audio_dev *dev)
+{
+ struct cx23885_audio_buffer *buf = dev->buf;
+
+ if (!buf->sglen)
+ return 0;
+
+ dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->sglen, PCI_DMA_FROMDEVICE);
+ buf->sglen = 0;
+ return 0;
+}
+
+static int cx23885_alsa_dma_free(struct cx23885_audio_buffer *buf)
+{
+ vfree(buf->sglist);
+ buf->sglist = NULL;
+ vfree(buf->vaddr);
+ buf->vaddr = NULL;
+ return 0;
+}
+
/*
* BOARD Specific: Sets audio DMA
*/
@@ -198,15 +270,18 @@ int cx23885_audio_irq(struct cx23885_dev *dev, u32 status, u32 mask)
static int dsp_buffer_free(struct cx23885_audio_dev *chip)
{
+ struct cx23885_riscmem *risc;
+
BUG_ON(!chip->dma_size);
dprintk(2, "Freeing buffer\n");
- videobuf_dma_unmap(&chip->pci->dev, chip->dma_risc);
- videobuf_dma_free(chip->dma_risc);
- btcx_riscmem_free(chip->pci, &chip->buf->risc);
+ cx23885_alsa_dma_unmap(chip);
+ cx23885_alsa_dma_free(chip->buf);
+ risc = &chip->buf->risc;
+ pci_free_consistent(chip->pci, risc->size, risc->cpu, risc->dma);
kfree(chip->buf);
- chip->dma_risc = NULL;
+ chip->buf = NULL;
chip->dma_size = 0;
return 0;
@@ -289,6 +364,7 @@ static int snd_cx23885_close(struct snd_pcm_substream *substream)
return 0;
}
+
/*
* hw_params callback
*/
@@ -296,8 +372,6 @@ static int snd_cx23885_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream);
- struct videobuf_dmabuf *dma;
-
struct cx23885_audio_buffer *buf;
int ret;
@@ -318,19 +392,18 @@ static int snd_cx23885_hw_params(struct snd_pcm_substream *substream,
return -ENOMEM;
buf->bpl = chip->period_size;
+ chip->buf = buf;
- dma = &buf->dma;
- videobuf_dma_init(dma);
- ret = videobuf_dma_init_kernel(dma, PCI_DMA_FROMDEVICE,
+ ret = cx23885_alsa_dma_init(chip,
(PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT));
if (ret < 0)
goto error;
- ret = videobuf_dma_map(&chip->pci->dev, dma);
+ ret = cx23885_alsa_dma_map(chip);
if (ret < 0)
goto error;
- ret = cx23885_risc_databuffer(chip->pci, &buf->risc, dma->sglist,
+ ret = cx23885_risc_databuffer(chip->pci, &buf->risc, buf->sglist,
chip->period_size, chip->num_periods, 1);
if (ret < 0)
goto error;
@@ -340,10 +413,7 @@ static int snd_cx23885_hw_params(struct snd_pcm_substream *substream,
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
- chip->buf = buf;
- chip->dma_risc = dma;
-
- substream->runtime->dma_area = chip->dma_risc->vaddr;
+ substream->runtime->dma_area = chip->buf->vaddr;
substream->runtime->dma_bytes = chip->dma_size;
substream->runtime->dma_addr = 0;
@@ -351,6 +421,7 @@ static int snd_cx23885_hw_params(struct snd_pcm_substream *substream,
error:
kfree(buf);
+ chip->buf = NULL;
return ret;
}
diff --git a/drivers/media/pci/cx23885/cx23885-av.c b/drivers/media/pci/cx23885/cx23885-av.c
index c443b7ac5adf..877dad89107e 100644
--- a/drivers/media/pci/cx23885/cx23885-av.c
+++ b/drivers/media/pci/cx23885/cx23885-av.c
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#include "cx23885.h"
diff --git a/drivers/media/pci/cx23885/cx23885-av.h b/drivers/media/pci/cx23885/cx23885-av.h
index d2915c3e53a2..97f232f8efb9 100644
--- a/drivers/media/pci/cx23885/cx23885-av.h
+++ b/drivers/media/pci/cx23885/cx23885-av.h
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#ifndef _CX23885_AV_H_
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index c2b608007190..88c257d1161b 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
@@ -679,6 +675,11 @@ struct cx23885_board cx23885_boards[] = {
.amux = CX25840_AUDIO7,
} },
},
+ [CX23885_BOARD_DVBSKY_T9580] = {
+ .name = "DVBSky T9580",
+ .portb = CX23885_MPEG_DVB,
+ .portc = CX23885_MPEG_DVB,
+ },
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
@@ -934,6 +935,10 @@ struct cx23885_subid cx23885_subids[] = {
.subvendor = 0x18ac,
.subdevice = 0xdb98,
.card = CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP2,
+ }, {
+ .subvendor = 0x4254,
+ .subdevice = 0x9580,
+ .card = CX23885_BOARD_DVBSKY_T9580,
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -1528,6 +1533,14 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_set(GP0_IO, 0x00040004);
mdelay(60);
break;
+ case CX23885_BOARD_DVBSKY_T9580:
+ /* enable GPIO3-18 pins */
+ cx_write(MC417_CTL, 0x00000037);
+ cx23885_gpio_enable(dev, GPIO_2 | GPIO_11, 1);
+ cx23885_gpio_clear(dev, GPIO_2 | GPIO_11);
+ mdelay(100);
+ cx23885_gpio_set(dev, GPIO_2 | GPIO_11);
+ break;
}
}
@@ -1851,6 +1864,14 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
+ case CX23885_BOARD_DVBSKY_T9580:
+ ts1->gen_ctrl_val = 0x5; /* Parallel */
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ ts2->gen_ctrl_val = 0x8; /* Serial bus */
+ ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ break;
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
@@ -1913,6 +1934,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_AVERMEDIA_HC81R:
case CX23885_BOARD_TBS_6980:
case CX23885_BOARD_TBS_6981:
+ case CX23885_BOARD_DVBSKY_T9580:
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
"cx25840", 0x88 >> 1, NULL);
@@ -1970,5 +1992,3 @@ void cx23885_card_setup(struct cx23885_dev *dev)
}
}
}
-
-/* ------------------------------------------------------------------ */
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index edcd79db1e4e..331eddac7222 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
@@ -420,39 +416,23 @@ static int cx23885_risc_decode(u32 risc)
return incr[risc >> 28] ? incr[risc >> 28] : 1;
}
-void cx23885_wakeup(struct cx23885_tsport *port,
+static void cx23885_wakeup(struct cx23885_tsport *port,
struct cx23885_dmaqueue *q, u32 count)
{
struct cx23885_dev *dev = port->dev;
struct cx23885_buffer *buf;
- int bc;
-
- for (bc = 0;; bc++) {
- if (list_empty(&q->active))
- break;
- buf = list_entry(q->active.next,
- struct cx23885_buffer, vb.queue);
-
- /* count comes from the hw and is is 16bit wide --
- * this trick handles wrap-arounds correctly for
- * up to 32767 buffers in flight... */
- if ((s16) (count - buf->count) < 0)
- break;
- v4l2_get_timestamp(&buf->vb.ts);
- dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
- count, buf->count);
- buf->vb.state = VIDEOBUF_DONE;
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
- }
if (list_empty(&q->active))
- del_timer(&q->timeout);
- else
- mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
- if (bc != 1)
- printk(KERN_WARNING "%s: %d buffers handled (should be 1)\n",
- __func__, bc);
+ return;
+ buf = list_entry(q->active.next,
+ struct cx23885_buffer, queue);
+
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ buf->vb.v4l2_buf.sequence = q->count++;
+ dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.v4l2_buf.index,
+ count, q->count);
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
}
int cx23885_sram_channel_setup(struct cx23885_dev *dev,
@@ -482,8 +462,8 @@ int cx23885_sram_channel_setup(struct cx23885_dev *dev,
lines = 6;
BUG_ON(lines < 2);
- cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
- cx_write(8 + 4, 8);
+ cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
+ cx_write(8 + 4, 12);
cx_write(8 + 8, 0);
/* write CDT */
@@ -590,7 +570,7 @@ void cx23885_sram_channel_dump(struct cx23885_dev *dev,
}
static void cx23885_risc_disasm(struct cx23885_tsport *port,
- struct btcx_riscmem *risc)
+ struct cx23885_riscmem *risc)
{
struct cx23885_dev *dev = port->dev;
unsigned int i, j, n;
@@ -699,10 +679,6 @@ static int get_resources(struct cx23885_dev *dev)
return -EBUSY;
}
-static void cx23885_timeout(unsigned long data);
-int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
- u32 reg, u32 mask, u32 value);
-
static int cx23885_init_tsport(struct cx23885_dev *dev,
struct cx23885_tsport *port, int portno)
{
@@ -719,11 +695,6 @@ static int cx23885_init_tsport(struct cx23885_dev *dev,
port->nr = portno;
INIT_LIST_HEAD(&port->mpegq.active);
- INIT_LIST_HEAD(&port->mpegq.queued);
- port->mpegq.timeout.function = cx23885_timeout;
- port->mpegq.timeout.data = (unsigned long)port;
- init_timer(&port->mpegq.timeout);
-
mutex_init(&port->frontends.lock);
INIT_LIST_HEAD(&port->frontends.felist);
port->frontends.active_fe_id = 0;
@@ -776,9 +747,6 @@ static int cx23885_init_tsport(struct cx23885_dev *dev,
BUG();
}
- cx23885_risc_stopper(dev->pci, &port->mpegq.stopper,
- port->reg_dma_ctl, port->dma_ctl_val, 0x00);
-
return 0;
}
@@ -1089,11 +1057,18 @@ static void cx23885_dev_unregister(struct cx23885_dev *dev)
static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
unsigned int offset, u32 sync_line,
unsigned int bpl, unsigned int padding,
- unsigned int lines, unsigned int lpi)
+ unsigned int lines, unsigned int lpi, bool jump)
{
struct scatterlist *sg;
unsigned int line, todo, sol;
+
+ if (jump) {
+ *(rp++) = cpu_to_le32(RISC_JUMP);
+ *(rp++) = cpu_to_le32(0);
+ *(rp++) = cpu_to_le32(0); /* bits 63-32 */
+ }
+
/* sync instruction */
if (sync_line != NO_SYNC_LINE)
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
@@ -1146,14 +1121,13 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
return rp;
}
-int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
+int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
struct scatterlist *sglist, unsigned int top_offset,
unsigned int bottom_offset, unsigned int bpl,
unsigned int padding, unsigned int lines)
{
u32 instructions, fields;
__le32 *rp;
- int rc;
fields = 0;
if (UNSET != top_offset)
@@ -1168,19 +1142,20 @@ int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
/* write and jump need and extra dword */
instructions = fields * (1 + ((bpl + padding) * lines)
/ PAGE_SIZE + lines);
- instructions += 2;
- rc = btcx_riscmem_alloc(pci, risc, instructions*12);
- if (rc < 0)
- return rc;
+ instructions += 5;
+ risc->size = instructions * 12;
+ risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
+ if (risc->cpu == NULL)
+ return -ENOMEM;
/* write risc instructions */
rp = risc->cpu;
if (UNSET != top_offset)
rp = cx23885_risc_field(rp, sglist, top_offset, 0,
- bpl, padding, lines, 0);
+ bpl, padding, lines, 0, true);
if (UNSET != bottom_offset)
rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
- bpl, padding, lines, 0);
+ bpl, padding, lines, 0, UNSET == top_offset);
/* save pointer to jmp instruction address */
risc->jmp = rp;
@@ -1189,14 +1164,13 @@ int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
}
int cx23885_risc_databuffer(struct pci_dev *pci,
- struct btcx_riscmem *risc,
+ struct cx23885_riscmem *risc,
struct scatterlist *sglist,
unsigned int bpl,
unsigned int lines, unsigned int lpi)
{
u32 instructions;
__le32 *rp;
- int rc;
/* estimate risc mem: worst case is one write per page border +
one write per scan line + syncs + jump (all 2 dwords). Here
@@ -1204,16 +1178,17 @@ int cx23885_risc_databuffer(struct pci_dev *pci,
than PAGE_SIZE */
/* Jump and write need an extra dword */
instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
- instructions += 1;
+ instructions += 4;
- rc = btcx_riscmem_alloc(pci, risc, instructions*12);
- if (rc < 0)
- return rc;
+ risc->size = instructions * 12;
+ risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
+ if (risc->cpu == NULL)
+ return -ENOMEM;
/* write risc instructions */
rp = risc->cpu;
rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
- bpl, 0, lines, lpi);
+ bpl, 0, lines, lpi, lpi == 0);
/* save pointer to jmp instruction address */
risc->jmp = rp;
@@ -1221,14 +1196,13 @@ int cx23885_risc_databuffer(struct pci_dev *pci,
return 0;
}
-int cx23885_risc_vbibuffer(struct pci_dev *pci, struct btcx_riscmem *risc,
+int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
struct scatterlist *sglist, unsigned int top_offset,
unsigned int bottom_offset, unsigned int bpl,
unsigned int padding, unsigned int lines)
{
u32 instructions, fields;
__le32 *rp;
- int rc;
fields = 0;
if (UNSET != top_offset)
@@ -1243,22 +1217,23 @@ int cx23885_risc_vbibuffer(struct pci_dev *pci, struct btcx_riscmem *risc,
/* write and jump need and extra dword */
instructions = fields * (1 + ((bpl + padding) * lines)
/ PAGE_SIZE + lines);
- instructions += 2;
- rc = btcx_riscmem_alloc(pci, risc, instructions*12);
- if (rc < 0)
- return rc;
+ instructions += 5;
+ risc->size = instructions * 12;
+ risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
+ if (risc->cpu == NULL)
+ return -ENOMEM;
/* write risc instructions */
rp = risc->cpu;
/* Sync to line 6, so US CC line 21 will appear in line '12'
* in the userland vbi payload */
if (UNSET != top_offset)
- rp = cx23885_risc_field(rp, sglist, top_offset, 6,
- bpl, padding, lines, 0);
+ rp = cx23885_risc_field(rp, sglist, top_offset, 0,
+ bpl, padding, lines, 0, true);
if (UNSET != bottom_offset)
- rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x207,
- bpl, padding, lines, 0);
+ rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
+ bpl, padding, lines, 0, UNSET == top_offset);
@@ -1269,38 +1244,12 @@ int cx23885_risc_vbibuffer(struct pci_dev *pci, struct btcx_riscmem *risc,
}
-int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
- u32 reg, u32 mask, u32 value)
+void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
{
- __le32 *rp;
- int rc;
-
- rc = btcx_riscmem_alloc(pci, risc, 4*16);
- if (rc < 0)
- return rc;
-
- /* write risc instructions */
- rp = risc->cpu;
- *(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2);
- *(rp++) = cpu_to_le32(reg);
- *(rp++) = cpu_to_le32(value);
- *(rp++) = cpu_to_le32(mask);
- *(rp++) = cpu_to_le32(RISC_JUMP);
- *(rp++) = cpu_to_le32(risc->dma);
- *(rp++) = cpu_to_le32(0); /* bits 63-32 */
- return 0;
-}
-
-void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf)
-{
- struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
+ struct cx23885_riscmem *risc = &buf->risc;
BUG_ON(in_interrupt());
- videobuf_waiton(q, &buf->vb, 0, 0);
- videobuf_dma_unmap(q->dev, dma);
- videobuf_dma_free(dma);
- btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
+ pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
}
static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
@@ -1355,7 +1304,7 @@ static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
}
-static int cx23885_start_dma(struct cx23885_tsport *port,
+int cx23885_start_dma(struct cx23885_tsport *port,
struct cx23885_dmaqueue *q,
struct cx23885_buffer *buf)
{
@@ -1363,7 +1312,7 @@ static int cx23885_start_dma(struct cx23885_tsport *port,
u32 reg;
dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
- buf->vb.width, buf->vb.height, buf->vb.field);
+ dev->width, dev->height, dev->field);
/* Stop the fifo and risc engine for this port */
cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
@@ -1379,7 +1328,7 @@ static int cx23885_start_dma(struct cx23885_tsport *port,
}
/* write TS length to chip */
- cx_write(port->reg_lngth, buf->vb.width);
+ cx_write(port->reg_lngth, port->ts_packet_size);
if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
@@ -1408,7 +1357,7 @@ static int cx23885_start_dma(struct cx23885_tsport *port,
/* NOTE: this is 2 (reserved) for portb, does it matter? */
/* reset counter to zero */
cx_write(port->reg_gpcnt_ctl, 3);
- q->count = 1;
+ q->count = 0;
/* Set VIDB pins to input */
if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
@@ -1497,134 +1446,83 @@ static int cx23885_stop_dma(struct cx23885_tsport *port)
return 0;
}
-int cx23885_restart_queue(struct cx23885_tsport *port,
- struct cx23885_dmaqueue *q)
-{
- struct cx23885_dev *dev = port->dev;
- struct cx23885_buffer *buf;
-
- dprintk(5, "%s()\n", __func__);
- if (list_empty(&q->active)) {
- struct cx23885_buffer *prev;
- prev = NULL;
-
- dprintk(5, "%s() queue is empty\n", __func__);
-
- for (;;) {
- if (list_empty(&q->queued))
- return 0;
- buf = list_entry(q->queued.next, struct cx23885_buffer,
- vb.queue);
- if (NULL == prev) {
- list_move_tail(&buf->vb.queue, &q->active);
- cx23885_start_dma(port, q, buf);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
- dprintk(5, "[%p/%d] restart_queue - f/active\n",
- buf, buf->vb.i);
-
- } else if (prev->vb.width == buf->vb.width &&
- prev->vb.height == buf->vb.height &&
- prev->fmt == buf->fmt) {
- list_move_tail(&buf->vb.queue, &q->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
- /* 64 bit bits 63-32 */
- prev->risc.jmp[2] = cpu_to_le32(0);
- dprintk(5, "[%p/%d] restart_queue - m/active\n",
- buf, buf->vb.i);
- } else {
- return 0;
- }
- prev = buf;
- }
- return 0;
- }
-
- buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue);
- dprintk(2, "restart_queue [%p/%d]: restart dma\n",
- buf, buf->vb.i);
- cx23885_start_dma(port, q, buf);
- list_for_each_entry(buf, &q->active, vb.queue)
- buf->count = q->count++;
- mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
- return 0;
-}
-
/* ------------------------------------------------------------------ */
-int cx23885_buf_prepare(struct videobuf_queue *q, struct cx23885_tsport *port,
- struct cx23885_buffer *buf, enum v4l2_field field)
+int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
{
struct cx23885_dev *dev = port->dev;
int size = port->ts_packet_size * port->ts_packet_count;
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
int rc;
dprintk(1, "%s: %p\n", __func__, buf);
- if (0 != buf->vb.baddr && buf->vb.bsize < size)
+ if (vb2_plane_size(&buf->vb, 0) < size)
return -EINVAL;
+ vb2_set_plane_payload(&buf->vb, 0, size);
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- buf->vb.width = port->ts_packet_size;
- buf->vb.height = port->ts_packet_count;
- buf->vb.size = size;
- buf->vb.field = field /*V4L2_FIELD_TOP*/;
-
- rc = videobuf_iolock(q, &buf->vb, NULL);
- if (0 != rc)
- goto fail;
- cx23885_risc_databuffer(dev->pci, &buf->risc,
- videobuf_to_dma(&buf->vb)->sglist,
- buf->vb.width, buf->vb.height, 0);
- }
- buf->vb.state = VIDEOBUF_PREPARED;
- return 0;
+ rc = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
+ if (!rc)
+ return -EIO;
- fail:
- cx23885_free_buffer(q, buf);
- return rc;
+ cx23885_risc_databuffer(dev->pci, &buf->risc,
+ sgt->sgl,
+ port->ts_packet_size, port->ts_packet_count, 0);
+ return 0;
}
+/*
+ * The risc program for each buffer works as follows: it starts with a simple
+ * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
+ * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
+ * the initial JUMP).
+ *
+ * This is the risc program of the first buffer to be queued if the active list
+ * is empty and it just keeps DMAing this buffer without generating any
+ * interrupts.
+ *
+ * If a new buffer is added then the initial JUMP in the code for that buffer
+ * will generate an interrupt which signals that the previous buffer has been
+ * DMAed successfully and that it can be returned to userspace.
+ *
+ * It also sets the final jump of the previous buffer to the start of the new
+ * buffer, thus chaining the new buffer into the DMA chain. This is a single
+ * atomic u32 write, so there is no race condition.
+ *
+ * The end-result of all this that you only get an interrupt when a buffer
+ * is ready, so the control flow is very easy.
+ */
void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
{
struct cx23885_buffer *prev;
struct cx23885_dev *dev = port->dev;
struct cx23885_dmaqueue *cx88q = &port->mpegq;
+ unsigned long flags;
- /* add jump to stopper */
- buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
- buf->risc.jmp[1] = cpu_to_le32(cx88q->stopper.dma);
+ buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
+ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
+ buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
+ spin_lock_irqsave(&dev->slock, flags);
if (list_empty(&cx88q->active)) {
- dprintk(1, "queue is empty - first active\n");
- list_add_tail(&buf->vb.queue, &cx88q->active);
- cx23885_start_dma(port, cx88q, buf);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = cx88q->count++;
- mod_timer(&cx88q->timeout, jiffies + BUFFER_TIMEOUT);
+ list_add_tail(&buf->queue, &cx88q->active);
dprintk(1, "[%p/%d] %s - first active\n",
- buf, buf->vb.i, __func__);
+ buf, buf->vb.v4l2_buf.index, __func__);
} else {
- dprintk(1, "queue is not empty - append to active\n");
+ buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
- vb.queue);
- list_add_tail(&buf->vb.queue, &cx88q->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = cx88q->count++;
+ queue);
+ list_add_tail(&buf->queue, &cx88q->active);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
- prev->risc.jmp[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
dprintk(1, "[%p/%d] %s - append to active\n",
- buf, buf->vb.i, __func__);
+ buf, buf->vb.v4l2_buf.index, __func__);
}
+ spin_unlock_irqrestore(&dev->slock, flags);
}
/* ----------------------------------------------------------- */
-static void do_cancel_buffers(struct cx23885_tsport *port, char *reason,
- int restart)
+static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
{
struct cx23885_dev *dev = port->dev;
struct cx23885_dmaqueue *q = &port->mpegq;
@@ -1634,16 +1532,11 @@ static void do_cancel_buffers(struct cx23885_tsport *port, char *reason,
spin_lock_irqsave(&port->slock, flags);
while (!list_empty(&q->active)) {
buf = list_entry(q->active.next, struct cx23885_buffer,
- vb.queue);
- list_del(&buf->vb.queue);
- buf->vb.state = VIDEOBUF_ERROR;
- wake_up(&buf->vb.done);
+ queue);
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
- buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
- }
- if (restart) {
- dprintk(1, "restarting queue\n");
- cx23885_restart_queue(port, q);
+ buf, buf->vb.v4l2_buf.index, reason, (unsigned long)buf->risc.dma);
}
spin_unlock_irqrestore(&port->slock, flags);
}
@@ -1651,27 +1544,10 @@ static void do_cancel_buffers(struct cx23885_tsport *port, char *reason,
void cx23885_cancel_buffers(struct cx23885_tsport *port)
{
struct cx23885_dev *dev = port->dev;
- struct cx23885_dmaqueue *q = &port->mpegq;
-
- dprintk(1, "%s()\n", __func__);
- del_timer_sync(&q->timeout);
- cx23885_stop_dma(port);
- do_cancel_buffers(port, "cancel", 0);
-}
-
-static void cx23885_timeout(unsigned long data)
-{
- struct cx23885_tsport *port = (struct cx23885_tsport *)data;
- struct cx23885_dev *dev = port->dev;
dprintk(1, "%s()\n", __func__);
-
- if (debug > 5)
- cx23885_sram_channel_dump(dev,
- &dev->sram_channels[port->sram_chno]);
-
cx23885_stop_dma(port);
- do_cancel_buffers(port, "timeout", 1);
+ do_cancel_buffers(port, "cancel");
}
int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
@@ -1721,11 +1597,6 @@ int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
spin_lock(&port->slock);
cx23885_wakeup(port, &port->mpegq, count);
spin_unlock(&port->slock);
- } else if (status & VID_B_MSK_RISCI2) {
- dprintk(7, " VID_B_MSK_RISCI2\n");
- spin_lock(&port->slock);
- cx23885_restart_queue(port, &port->mpegq);
- spin_unlock(&port->slock);
}
if (status) {
cx_write(port->reg_ts_int_stat, status);
@@ -1777,14 +1648,6 @@ static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
cx23885_wakeup(port, &port->mpegq, count);
spin_unlock(&port->slock);
- } else if (status & VID_BC_MSK_RISCI2) {
-
- dprintk(7, " (RISCI2 0x%08x)\n", VID_BC_MSK_RISCI2);
-
- spin_lock(&port->slock);
- cx23885_restart_queue(port, &port->mpegq);
- spin_unlock(&port->slock);
-
}
if (status) {
cx_write(port->reg_ts_int_stat, status);
@@ -2087,6 +1950,7 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
struct cx23885_dev *dev;
+ struct v4l2_ctrl_handler *hdl;
int err;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -2097,6 +1961,14 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
if (err < 0)
goto fail_free;
+ hdl = &dev->ctrl_handler;
+ v4l2_ctrl_handler_init(hdl, 6);
+ if (hdl->error) {
+ err = hdl->error;
+ goto fail_ctrl;
+ }
+ dev->v4l2_dev.ctrl_handler = hdl;
+
/* Prepare to handle notifications from subdevices */
cx23885_v4l2_dev_notify_init(dev);
@@ -2104,12 +1976,12 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
dev->pci = pci_dev;
if (pci_enable_device(pci_dev)) {
err = -EIO;
- goto fail_unreg;
+ goto fail_ctrl;
}
if (cx23885_dev_setup(dev) < 0) {
err = -EINVAL;
- goto fail_unreg;
+ goto fail_ctrl;
}
/* print pci info */
@@ -2157,7 +2029,8 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
fail_irq:
cx23885_dev_unregister(dev);
-fail_unreg:
+fail_ctrl:
+ v4l2_ctrl_handler_free(hdl);
v4l2_device_unregister(&dev->v4l2_dev);
fail_free:
kfree(dev);
@@ -2180,6 +2053,7 @@ static void cx23885_finidev(struct pci_dev *pci_dev)
free_irq(pci_dev->irq, dev);
cx23885_dev_unregister(dev);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_device_unregister(v4l2_dev);
kfree(dev);
}
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 968fecc32f9c..13734b8c7917 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -73,6 +69,10 @@
#include "a8293.h"
#include "mb86a20s.h"
#include "si2165.h"
+#include "si2168.h"
+#include "si2157.h"
+#include "m88ds3103.h"
+#include "m88ts2022.h"
static unsigned int debug;
@@ -91,59 +91,95 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
/* ------------------------------------------------------------------ */
-static int dvb_buf_setup(struct videobuf_queue *q,
- unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct cx23885_tsport *port = q->priv_data;
+ struct cx23885_tsport *port = q->drv_priv;
port->ts_packet_size = 188 * 4;
port->ts_packet_count = 32;
-
- *size = port->ts_packet_size * port->ts_packet_count;
- *count = 32;
+ *num_planes = 1;
+ sizes[0] = port->ts_packet_size * port->ts_packet_count;
+ *num_buffers = 32;
return 0;
}
-static int dvb_buf_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb, enum v4l2_field field)
+
+static int buffer_prepare(struct vb2_buffer *vb)
{
- struct cx23885_tsport *port = q->priv_data;
- return cx23885_buf_prepare(q, port, (struct cx23885_buffer *)vb, field);
+ struct cx23885_tsport *port = vb->vb2_queue->drv_priv;
+ struct cx23885_buffer *buf =
+ container_of(vb, struct cx23885_buffer, vb);
+
+ return cx23885_buf_prepare(buf, port);
}
-static void dvb_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
+static void buffer_finish(struct vb2_buffer *vb)
{
- struct cx23885_tsport *port = q->priv_data;
- cx23885_buf_queue(port, (struct cx23885_buffer *)vb);
+ struct cx23885_tsport *port = vb->vb2_queue->drv_priv;
+ struct cx23885_dev *dev = port->dev;
+ struct cx23885_buffer *buf = container_of(vb,
+ struct cx23885_buffer, vb);
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
+
+ cx23885_free_buffer(dev, buf);
+
+ dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
}
-static void dvb_buf_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void buffer_queue(struct vb2_buffer *vb)
{
- cx23885_free_buffer(q, (struct cx23885_buffer *)vb);
+ struct cx23885_tsport *port = vb->vb2_queue->drv_priv;
+ struct cx23885_buffer *buf = container_of(vb,
+ struct cx23885_buffer, vb);
+
+ cx23885_buf_queue(port, buf);
}
static void cx23885_dvb_gate_ctrl(struct cx23885_tsport *port, int open)
{
- struct videobuf_dvb_frontends *f;
- struct videobuf_dvb_frontend *fe;
+ struct vb2_dvb_frontends *f;
+ struct vb2_dvb_frontend *fe;
f = &port->frontends;
if (f->gate <= 1) /* undefined or fe0 */
- fe = videobuf_dvb_get_frontend(f, 1);
+ fe = vb2_dvb_get_frontend(f, 1);
else
- fe = videobuf_dvb_get_frontend(f, f->gate);
+ fe = vb2_dvb_get_frontend(f, f->gate);
if (fe && fe->dvb.frontend && fe->dvb.frontend->ops.i2c_gate_ctrl)
fe->dvb.frontend->ops.i2c_gate_ctrl(fe->dvb.frontend, open);
}
-static struct videobuf_queue_ops dvb_qops = {
- .buf_setup = dvb_buf_setup,
- .buf_prepare = dvb_buf_prepare,
- .buf_queue = dvb_buf_queue,
- .buf_release = dvb_buf_release,
+static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct cx23885_tsport *port = q->drv_priv;
+ struct cx23885_dmaqueue *dmaq = &port->mpegq;
+ struct cx23885_buffer *buf = list_entry(dmaq->active.next,
+ struct cx23885_buffer, queue);
+
+ cx23885_start_dma(port, dmaq, buf);
+ return 0;
+}
+
+static void cx23885_stop_streaming(struct vb2_queue *q)
+{
+ struct cx23885_tsport *port = q->drv_priv;
+
+ cx23885_cancel_buffers(port);
+}
+
+static struct vb2_ops dvb_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_finish = buffer_finish,
+ .buf_queue = buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = cx23885_start_streaming,
+ .stop_streaming = cx23885_stop_streaming,
};
static struct s5h1409_config hauppauge_generic_config = {
@@ -551,6 +587,35 @@ static int p8000_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
return 0;
}
+static int dvbsky_t9580_set_voltage(struct dvb_frontend *fe,
+ fe_sec_voltage_t voltage)
+{
+ struct cx23885_tsport *port = fe->dvb->priv;
+ struct cx23885_dev *dev = port->dev;
+
+ cx23885_gpio_enable(dev, GPIO_0 | GPIO_1, 1);
+
+ switch (voltage) {
+ case SEC_VOLTAGE_13:
+ cx23885_gpio_set(dev, GPIO_1);
+ cx23885_gpio_clear(dev, GPIO_0);
+ break;
+ case SEC_VOLTAGE_18:
+ cx23885_gpio_set(dev, GPIO_1);
+ cx23885_gpio_set(dev, GPIO_0);
+ break;
+ case SEC_VOLTAGE_OFF:
+ cx23885_gpio_clear(dev, GPIO_1);
+ cx23885_gpio_clear(dev, GPIO_0);
+ break;
+ }
+
+ /* call the frontend set_voltage function */
+ port->fe_set_voltage(fe, voltage);
+
+ return 0;
+}
+
static int cx23885_dvb_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
@@ -715,6 +780,19 @@ static const struct si2165_config hauppauge_hvr4400_si2165_config = {
.ref_freq_Hz = 16000000,
};
+static const struct m88ds3103_config dvbsky_t9580_m88ds3103_config = {
+ .i2c_addr = 0x68,
+ .clock = 27000000,
+ .i2c_wr_max = 33,
+ .clock_out = 0,
+ .ts_mode = M88DS3103_TS_PARALLEL,
+ .ts_clk = 16000,
+ .ts_clk_pol = 1,
+ .lnb_en_pol = 1,
+ .lnb_hv_pol = 0,
+ .agc = 0x99,
+};
+
static int netup_altera_fpga_rw(void *device, int flag, int data, int read)
{
struct cx23885_dev *dev = (struct cx23885_dev *)device;
@@ -863,16 +941,23 @@ static int dvb_register(struct cx23885_tsport *port)
struct dib7000p_ops dib7000p_ops;
struct cx23885_dev *dev = port->dev;
struct cx23885_i2c *i2c_bus = NULL, *i2c_bus2 = NULL;
- struct videobuf_dvb_frontend *fe0, *fe1 = NULL;
+ struct vb2_dvb_frontend *fe0, *fe1 = NULL;
+ struct si2168_config si2168_config;
+ struct si2157_config si2157_config;
+ struct m88ts2022_config m88ts2022_config;
+ struct i2c_board_info info;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client_demod;
+ struct i2c_client *client_tuner;
int mfe_shared = 0; /* bus not shared by default */
int ret;
/* Get the first frontend */
- fe0 = videobuf_dvb_get_frontend(&port->frontends, 1);
+ fe0 = vb2_dvb_get_frontend(&port->frontends, 1);
if (!fe0)
return -EINVAL;
- /* init struct videobuf_dvb */
+ /* init struct vb2_dvb */
fe0->dvb.name = dev->name;
/* multi-frontend gate control is undefined or defaults to fe0 */
@@ -1392,7 +1477,7 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend->ops.tuner_ops.init(fe0->dvb.frontend);
}
/* MFE frontend 2 */
- fe1 = videobuf_dvb_get_frontend(&port->frontends, 2);
+ fe1 = vb2_dvb_get_frontend(&port->frontends, 2);
if (fe1 == NULL)
goto frontend_detach;
/* DVB-C init */
@@ -1491,7 +1576,7 @@ static int dvb_register(struct cx23885_tsport *port)
&hauppauge_hvr4400_si2165_config,
&i2c_bus->i2c_adap);
if (fe0->dvb.frontend != NULL) {
- fe0->dvb.frontend->ops.i2c_gate_ctrl = 0;
+ fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
if (!dvb_attach(tda18271_attach,
fe0->dvb.frontend,
0x60, &i2c_bus2->i2c_adap,
@@ -1501,6 +1586,97 @@ static int dvb_register(struct cx23885_tsport *port)
break;
}
break;
+ case CX23885_BOARD_DVBSKY_T9580:
+ i2c_bus = &dev->i2c_bus[0];
+ i2c_bus2 = &dev->i2c_bus[1];
+ switch (port->nr) {
+ /* port b - satellite */
+ case 1:
+ /* attach frontend */
+ fe0->dvb.frontend = dvb_attach(m88ds3103_attach,
+ &dvbsky_t9580_m88ds3103_config,
+ &i2c_bus2->i2c_adap, &adapter);
+ if (fe0->dvb.frontend == NULL)
+ break;
+
+ /* attach tuner */
+ m88ts2022_config.fe = fe0->dvb.frontend;
+ m88ts2022_config.clock = 27000000;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &m88ts2022_config;
+ request_module(info.type);
+ client_tuner = i2c_new_device(adapter, &info);
+ if (client_tuner == NULL ||
+ client_tuner->dev.driver == NULL)
+ goto frontend_detach;
+ if (!try_module_get(client_tuner->dev.driver->owner)) {
+ i2c_unregister_device(client_tuner);
+ goto frontend_detach;
+ }
+
+ /* delegate signal strength measurement to tuner */
+ fe0->dvb.frontend->ops.read_signal_strength =
+ fe0->dvb.frontend->ops.tuner_ops.get_rf_strength;
+
+ /*
+ * for setting the voltage we need to set GPIOs on
+ * the card.
+ */
+ port->fe_set_voltage =
+ fe0->dvb.frontend->ops.set_voltage;
+ fe0->dvb.frontend->ops.set_voltage =
+ dvbsky_t9580_set_voltage;
+
+ port->i2c_client_tuner = client_tuner;
+
+ break;
+ /* port c - terrestrial/cable */
+ case 2:
+ /* attach frontend */
+ si2168_config.i2c_adapter = &adapter;
+ si2168_config.fe = &fe0->dvb.frontend;
+ si2168_config.ts_mode = SI2168_TS_SERIAL;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2168", I2C_NAME_SIZE);
+ info.addr = 0x64;
+ info.platform_data = &si2168_config;
+ request_module(info.type);
+ client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
+ if (client_demod == NULL ||
+ client_demod->dev.driver == NULL)
+ goto frontend_detach;
+ if (!try_module_get(client_demod->dev.driver->owner)) {
+ i2c_unregister_device(client_demod);
+ goto frontend_detach;
+ }
+ port->i2c_client_demod = client_demod;
+
+ /* attach tuner */
+ si2157_config.fe = fe0->dvb.frontend;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2157", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &si2157_config;
+ request_module(info.type);
+ client_tuner = i2c_new_device(adapter, &info);
+ if (client_tuner == NULL ||
+ client_tuner->dev.driver == NULL) {
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ goto frontend_detach;
+ }
+ if (!try_module_get(client_tuner->dev.driver->owner)) {
+ i2c_unregister_device(client_tuner);
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ goto frontend_detach;
+ }
+ port->i2c_client_tuner = client_tuner;
+ break;
+ }
+ break;
default:
printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
" isn't supported yet\n",
@@ -1532,7 +1708,7 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend->ops.analog_ops.standby(fe0->dvb.frontend);
/* register everything */
- ret = videobuf_dvb_register_bus(&port->frontends, THIS_MODULE, port,
+ ret = vb2_dvb_register_bus(&port->frontends, THIS_MODULE, port,
&dev->pci->dev, adapter_nr, mfe_shared);
if (ret)
goto frontend_detach;
@@ -1575,20 +1751,36 @@ static int dvb_register(struct cx23885_tsport *port)
memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xa0, 6);
break;
}
+ case CX23885_BOARD_DVBSKY_T9580: {
+ u8 eeprom[256]; /* 24C02 i2c eeprom */
+
+ if (port->nr > 2)
+ break;
+
+ /* Read entire EEPROM */
+ dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
+ tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
+ sizeof(eeprom));
+ printk(KERN_INFO "DVBSky T9580 port %d MAC address: %pM\n",
+ port->nr, eeprom + 0xc0 + (port->nr-1) * 8);
+ memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0 +
+ (port->nr-1) * 8, 6);
+ break;
+ }
}
return ret;
frontend_detach:
port->gate_ctrl = NULL;
- videobuf_dvb_dealloc_frontends(&port->frontends);
+ vb2_dvb_dealloc_frontends(&port->frontends);
return -EINVAL;
}
int cx23885_dvb_register(struct cx23885_tsport *port)
{
- struct videobuf_dvb_frontend *fe0;
+ struct vb2_dvb_frontend *fe0;
struct cx23885_dev *dev = port->dev;
int err, i;
@@ -1605,13 +1797,15 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
port->num_frontends);
for (i = 1; i <= port->num_frontends; i++) {
- if (videobuf_dvb_alloc_frontend(
+ struct vb2_queue *q;
+
+ if (vb2_dvb_alloc_frontend(
&port->frontends, i) == NULL) {
printk(KERN_ERR "%s() failed to alloc\n", __func__);
return -ENOMEM;
}
- fe0 = videobuf_dvb_get_frontend(&port->frontends, i);
+ fe0 = vb2_dvb_get_frontend(&port->frontends, i);
if (!fe0)
err = -EINVAL;
@@ -1627,10 +1821,21 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
/* dvb stuff */
/* We have to init the queue for each frontend on a port. */
printk(KERN_INFO "%s: cx23885 based dvb card\n", dev->name);
- videobuf_queue_sg_init(&fe0->dvb.dvbq, &dvb_qops,
- &dev->pci->dev, &port->slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_TOP,
- sizeof(struct cx23885_buffer), port, NULL);
+ q = &fe0->dvb.dvbq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->gfp_flags = GFP_DMA32;
+ q->min_buffers_needed = 2;
+ q->drv_priv = port;
+ q->buf_struct_size = sizeof(struct cx23885_buffer);
+ q->ops = &dvb_qops;
+ q->mem_ops = &vb2_dma_sg_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &dev->lock;
+
+ err = vb2_queue_init(q);
+ if (err < 0)
+ return err;
}
err = dvb_register(port);
if (err != 0)
@@ -1642,18 +1847,27 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
int cx23885_dvb_unregister(struct cx23885_tsport *port)
{
- struct videobuf_dvb_frontend *fe0;
-
- /* FIXME: in an error condition where the we have
- * an expected number of frontends (attach problem)
- * then this might not clean up correctly, if 1
- * is invalid.
- * This comment only applies to future boards IF they
- * implement MFE support.
- */
- fe0 = videobuf_dvb_get_frontend(&port->frontends, 1);
+ struct vb2_dvb_frontend *fe0;
+ struct i2c_client *client;
+
+ /* remove I2C client for tuner */
+ client = port->i2c_client_tuner;
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+
+ /* remove I2C client for demodulator */
+ client = port->i2c_client_demod;
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+
+ fe0 = vb2_dvb_get_frontend(&port->frontends, 1);
+
if (fe0 && fe0->dvb.frontend)
- videobuf_dvb_unregister_bus(&port->frontends);
+ vb2_dvb_unregister_bus(&port->frontends);
switch (port->dev->board) {
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
@@ -1668,4 +1882,3 @@ int cx23885_dvb_unregister(struct cx23885_tsport *port)
return 0;
}
-
diff --git a/drivers/media/pci/cx23885/cx23885-f300.c b/drivers/media/pci/cx23885/cx23885-f300.c
index 5444cc526008..6f817d8732da 100644
--- a/drivers/media/pci/cx23885/cx23885-f300.c
+++ b/drivers/media/pci/cx23885/cx23885-f300.c
@@ -22,10 +22,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "cx23885.h"
diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c
index 4887314339cb..fd71306af6e2 100644
--- a/drivers/media/pci/cx23885/cx23885-i2c.c
+++ b/drivers/media/pci/cx23885/cx23885-i2c.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -386,11 +382,3 @@ void cx23885_av_clk(struct cx23885_dev *dev, int enable)
i2c_xfer(&dev->i2c_bus[2].i2c_adap, &msg, 1);
}
-
-/* ----------------------------------------------------------------------- */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 1940c18e186c..9d37fe661691 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -28,11 +28,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#include <linux/slab.h>
diff --git a/drivers/media/pci/cx23885/cx23885-input.h b/drivers/media/pci/cx23885/cx23885-input.h
index 87dc44e69977..6199c7e86e83 100644
--- a/drivers/media/pci/cx23885/cx23885-input.h
+++ b/drivers/media/pci/cx23885/cx23885-input.h
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#ifndef _CX23885_INPUT_H_
diff --git a/drivers/media/pci/cx23885/cx23885-ioctl.c b/drivers/media/pci/cx23885/cx23885-ioctl.c
index 271d69d1ca8c..d2cdd40f79f5 100644
--- a/drivers/media/pci/cx23885/cx23885-ioctl.c
+++ b/drivers/media/pci/cx23885/cx23885-ioctl.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "cx23885.h"
@@ -28,7 +24,7 @@
int cx23885_g_chip_info(struct file *file, void *fh,
struct v4l2_dbg_chip_info *chip)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)fh)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
if (chip->match.addr > 1)
return -EINVAL;
@@ -64,7 +60,7 @@ static int cx23417_g_register(struct cx23885_dev *dev,
int cx23885_g_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)fh)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
if (reg->match.addr > 1)
return -EINVAL;
@@ -96,7 +92,7 @@ static int cx23417_s_register(struct cx23885_dev *dev,
int cx23885_s_register(struct file *file, void *fh,
const struct v4l2_dbg_register *reg)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)fh)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
if (reg->match.addr > 1)
return -EINVAL;
diff --git a/drivers/media/pci/cx23885/cx23885-ioctl.h b/drivers/media/pci/cx23885/cx23885-ioctl.h
index 92d9f0774366..cc5dbb6c1afc 100644
--- a/drivers/media/pci/cx23885/cx23885-ioctl.h
+++ b/drivers/media/pci/cx23885/cx23885-ioctl.h
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _CX23885_IOCTL_H_
diff --git a/drivers/media/pci/cx23885/cx23885-ir.c b/drivers/media/pci/cx23885/cx23885-ir.c
index bfef19359291..89dc4cc3e1ce 100644
--- a/drivers/media/pci/cx23885/cx23885-ir.c
+++ b/drivers/media/pci/cx23885/cx23885-ir.c
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#include <media/v4l2-device.h>
diff --git a/drivers/media/pci/cx23885/cx23885-ir.h b/drivers/media/pci/cx23885/cx23885-ir.h
index 0c9d8bda9e28..8e93d1f10ae0 100644
--- a/drivers/media/pci/cx23885/cx23885-ir.h
+++ b/drivers/media/pci/cx23885/cx23885-ir.h
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#ifndef _CX23885_IR_H_
diff --git a/drivers/media/pci/cx23885/cx23885-reg.h b/drivers/media/pci/cx23885/cx23885-reg.h
index a99936e0cbc2..2d3cbafe2402 100644
--- a/drivers/media/pci/cx23885/cx23885-reg.h
+++ b/drivers/media/pci/cx23885/cx23885-reg.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _CX23885_REG_H_
diff --git a/drivers/media/pci/cx23885/cx23885-vbi.c b/drivers/media/pci/cx23885/cx23885-vbi.c
index a1154f035bc1..a7c6ef8f3ea3 100644
--- a/drivers/media/pci/cx23885/cx23885-vbi.c
+++ b/drivers/media/pci/cx23885/cx23885-vbi.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
@@ -42,33 +38,32 @@ MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
/* ------------------------------------------------------------------ */
#define VBI_LINE_LENGTH 1440
-#define NTSC_VBI_START_LINE 10 /* line 10 - 21 */
-#define NTSC_VBI_END_LINE 21
-#define NTSC_VBI_LINES (NTSC_VBI_END_LINE - NTSC_VBI_START_LINE + 1)
+#define VBI_NTSC_LINE_COUNT 12
+#define VBI_PAL_LINE_COUNT 18
int cx23885_vbi_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx23885_fh *fh = priv;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
+ f->fmt.vbi.sampling_rate = 27000000;
+ f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
+ f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
+ f->fmt.vbi.offset = 0;
+ f->fmt.vbi.flags = 0;
if (dev->tvnorm & V4L2_STD_525_60) {
/* ntsc */
- f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
- f->fmt.vbi.sampling_rate = 27000000;
- f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
- f->fmt.vbi.offset = 0;
- f->fmt.vbi.flags = 0;
- f->fmt.vbi.start[0] = 10;
- f->fmt.vbi.count[0] = 17;
- f->fmt.vbi.start[1] = 263 + 10 + 1;
- f->fmt.vbi.count[1] = 17;
+ f->fmt.vbi.start[0] = V4L2_VBI_ITU_525_F1_START + 9;
+ f->fmt.vbi.start[1] = V4L2_VBI_ITU_525_F2_START + 9;
+ f->fmt.vbi.count[0] = VBI_NTSC_LINE_COUNT;
+ f->fmt.vbi.count[1] = VBI_NTSC_LINE_COUNT;
} else if (dev->tvnorm & V4L2_STD_625_50) {
/* pal */
- f->fmt.vbi.sampling_rate = 35468950;
- f->fmt.vbi.start[0] = 7 - 1;
- f->fmt.vbi.start[1] = 319 - 1;
+ f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5;
+ f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5;
+ f->fmt.vbi.count[0] = VBI_PAL_LINE_COUNT;
+ f->fmt.vbi.count[1] = VBI_PAL_LINE_COUNT;
}
return 0;
@@ -94,15 +89,6 @@ int cx23885_vbi_irq(struct cx23885_dev *dev, u32 status)
handled++;
}
- if (status & VID_BC_MSK_VBI_RISCI2) {
- dprintk(1, "%s() VID_BC_MSK_VBI_RISCI2\n", __func__);
- dprintk(2, "stopper vbi\n");
- spin_lock(&dev->slock);
- cx23885_restart_vbi_queue(dev, &dev->vbiq);
- spin_unlock(&dev->slock);
- handled++;
- }
-
return handled;
}
@@ -114,13 +100,13 @@ static int cx23885_start_vbi_dma(struct cx23885_dev *dev,
/* setup fifo + format */
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02],
- buf->vb.width, buf->risc.dma);
+ VBI_LINE_LENGTH, buf->risc.dma);
/* reset counter */
cx_write(VID_A_GPCNT_CTL, 3);
cx_write(VID_A_VBI_CTRL, 3);
cx_write(VBI_A_GPCNT_CTL, 3);
- q->count = 1;
+ q->count = 0;
/* enable irq */
cx23885_irq_add_enable(dev, 0x01);
@@ -133,163 +119,153 @@ static int cx23885_start_vbi_dma(struct cx23885_dev *dev,
return 0;
}
+/* ------------------------------------------------------------------ */
-int cx23885_restart_vbi_queue(struct cx23885_dev *dev,
- struct cx23885_dmaqueue *q)
+static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct cx23885_buffer *buf;
- struct list_head *item;
-
- if (list_empty(&q->active))
- return 0;
-
- buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue);
- dprintk(2, "restart_queue [%p/%d]: restart dma\n",
- buf, buf->vb.i);
- cx23885_start_vbi_dma(dev, q, buf);
- list_for_each(item, &q->active) {
- buf = list_entry(item, struct cx23885_buffer, vb.queue);
- buf->count = q->count++;
- }
- mod_timer(&q->timeout, jiffies + (BUFFER_TIMEOUT / 30));
+ struct cx23885_dev *dev = q->drv_priv;
+ unsigned lines = VBI_PAL_LINE_COUNT;
+
+ if (dev->tvnorm & V4L2_STD_525_60)
+ lines = VBI_NTSC_LINE_COUNT;
+ *num_planes = 1;
+ sizes[0] = lines * VBI_LINE_LENGTH * 2;
return 0;
}
-void cx23885_vbi_timeout(unsigned long data)
+static int buffer_prepare(struct vb2_buffer *vb)
{
- struct cx23885_dev *dev = (struct cx23885_dev *)data;
- struct cx23885_dmaqueue *q = &dev->vbiq;
- struct cx23885_buffer *buf;
- unsigned long flags;
+ struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx23885_buffer *buf = container_of(vb,
+ struct cx23885_buffer, vb);
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
+ unsigned lines = VBI_PAL_LINE_COUNT;
+ int ret;
- /* Stop the VBI engine */
- cx_clear(VID_A_DMA_CTL, 0x22);
+ if (dev->tvnorm & V4L2_STD_525_60)
+ lines = VBI_NTSC_LINE_COUNT;
- spin_lock_irqsave(&dev->slock, flags);
- while (!list_empty(&q->active)) {
- buf = list_entry(q->active.next, struct cx23885_buffer,
- vb.queue);
- list_del(&buf->vb.queue);
- buf->vb.state = VIDEOBUF_ERROR;
- wake_up(&buf->vb.done);
- printk("%s/0: [%p/%d] timeout - dma=0x%08lx\n", dev->name,
- buf, buf->vb.i, (unsigned long)buf->risc.dma);
- }
- cx23885_restart_vbi_queue(dev, q);
- spin_unlock_irqrestore(&dev->slock, flags);
-}
+ if (vb2_plane_size(vb, 0) < lines * VBI_LINE_LENGTH * 2)
+ return -EINVAL;
+ vb2_set_plane_payload(vb, 0, lines * VBI_LINE_LENGTH * 2);
-/* ------------------------------------------------------------------ */
-#define VBI_LINE_LENGTH 1440
-#define VBI_LINE_COUNT 17
+ ret = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
+ if (!ret)
+ return -EIO;
-static int
-vbi_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
-{
- *size = VBI_LINE_COUNT * VBI_LINE_LENGTH * 2;
- if (0 == *count)
- *count = vbibufs;
- if (*count < 2)
- *count = 2;
- if (*count > 32)
- *count = 32;
+ cx23885_risc_vbibuffer(dev->pci, &buf->risc,
+ sgt->sgl,
+ 0, VBI_LINE_LENGTH * lines,
+ VBI_LINE_LENGTH, 0,
+ lines);
return 0;
}
-static int
-vbi_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static void buffer_finish(struct vb2_buffer *vb)
{
- struct cx23885_fh *fh = q->priv_data;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
struct cx23885_buffer *buf = container_of(vb,
struct cx23885_buffer, vb);
- struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
- unsigned int size;
- int rc;
-
- size = VBI_LINE_COUNT * VBI_LINE_LENGTH * 2;
- if (0 != buf->vb.baddr && buf->vb.bsize < size)
- return -EINVAL;
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- buf->vb.width = VBI_LINE_LENGTH;
- buf->vb.height = VBI_LINE_COUNT;
- buf->vb.size = size;
- buf->vb.field = V4L2_FIELD_SEQ_TB;
-
- rc = videobuf_iolock(q, &buf->vb, NULL);
- if (0 != rc)
- goto fail;
- cx23885_risc_vbibuffer(dev->pci, &buf->risc,
- dma->sglist,
- 0, buf->vb.width * buf->vb.height,
- buf->vb.width, 0,
- buf->vb.height);
- }
- buf->vb.state = VIDEOBUF_PREPARED;
- return 0;
+ cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
- fail:
- cx23885_free_buffer(q, buf);
- return rc;
+ dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
}
-static void
-vbi_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+/*
+ * The risc program for each buffer works as follows: it starts with a simple
+ * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
+ * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
+ * the initial JUMP).
+ *
+ * This is the risc program of the first buffer to be queued if the active list
+ * is empty and it just keeps DMAing this buffer without generating any
+ * interrupts.
+ *
+ * If a new buffer is added then the initial JUMP in the code for that buffer
+ * will generate an interrupt which signals that the previous buffer has been
+ * DMAed successfully and that it can be returned to userspace.
+ *
+ * It also sets the final jump of the previous buffer to the start of the new
+ * buffer, thus chaining the new buffer into the DMA chain. This is a single
+ * atomic u32 write, so there is no race condition.
+ *
+ * The end-result of all this that you only get an interrupt when a buffer
+ * is ready, so the control flow is very easy.
+ */
+static void buffer_queue(struct vb2_buffer *vb)
{
- struct cx23885_buffer *buf =
- container_of(vb, struct cx23885_buffer, vb);
- struct cx23885_buffer *prev;
- struct cx23885_fh *fh = vq->priv_data;
- struct cx23885_dev *dev = fh->dev;
- struct cx23885_dmaqueue *q = &dev->vbiq;
-
- /* add jump to stopper */
- buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
- buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma);
+ struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer, vb);
+ struct cx23885_buffer *prev;
+ struct cx23885_dmaqueue *q = &dev->vbiq;
+ unsigned long flags;
+
+ buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
+ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
+ buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
if (list_empty(&q->active)) {
- list_add_tail(&buf->vb.queue, &q->active);
- cx23885_start_vbi_dma(dev, q, buf);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- mod_timer(&q->timeout, jiffies + (BUFFER_TIMEOUT / 30));
+ spin_lock_irqsave(&dev->slock, flags);
+ list_add_tail(&buf->queue, &q->active);
+ spin_unlock_irqrestore(&dev->slock, flags);
dprintk(2, "[%p/%d] vbi_queue - first active\n",
- buf, buf->vb.i);
+ buf, buf->vb.v4l2_buf.index);
} else {
+ buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
prev = list_entry(q->active.prev, struct cx23885_buffer,
- vb.queue);
- list_add_tail(&buf->vb.queue, &q->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
+ queue);
+ spin_lock_irqsave(&dev->slock, flags);
+ list_add_tail(&buf->queue, &q->active);
+ spin_unlock_irqrestore(&dev->slock, flags);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
- prev->risc.jmp[2] = cpu_to_le32(0); /* Bits 63-32 */
dprintk(2, "[%p/%d] buffer_queue - append to active\n",
- buf, buf->vb.i);
+ buf, buf->vb.v4l2_buf.index);
}
}
-static void vbi_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
+static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
{
- struct cx23885_buffer *buf =
- container_of(vb, struct cx23885_buffer, vb);
+ struct cx23885_dev *dev = q->drv_priv;
+ struct cx23885_dmaqueue *dmaq = &dev->vbiq;
+ struct cx23885_buffer *buf = list_entry(dmaq->active.next,
+ struct cx23885_buffer, queue);
- cx23885_free_buffer(q, buf);
+ cx23885_start_vbi_dma(dev, dmaq, buf);
+ return 0;
}
-struct videobuf_queue_ops cx23885_vbi_qops = {
- .buf_setup = vbi_setup,
- .buf_prepare = vbi_prepare,
- .buf_queue = vbi_queue,
- .buf_release = vbi_release,
-};
+static void cx23885_stop_streaming(struct vb2_queue *q)
+{
+ struct cx23885_dev *dev = q->drv_priv;
+ struct cx23885_dmaqueue *dmaq = &dev->vbiq;
+ unsigned long flags;
-/* ------------------------------------------------------------------ */
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
+ cx_clear(VID_A_DMA_CTL, 0x22); /* FIFO and RISC enable */
+ spin_lock_irqsave(&dev->slock, flags);
+ while (!list_empty(&dmaq->active)) {
+ struct cx23885_buffer *buf = list_entry(dmaq->active.next,
+ struct cx23885_buffer, queue);
+
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+
+struct vb2_ops cx23885_vbi_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_finish = buffer_finish,
+ .buf_queue = buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = cx23885_start_streaming,
+ .stop_streaming = cx23885_stop_streaming,
+};
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 91e4cb457296..682a4f95df6b 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
@@ -35,6 +31,7 @@
#include "cx23885-video.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
#include "cx23885-ioctl.h"
#include "tuner-xc2028.h"
@@ -48,15 +45,12 @@ MODULE_LICENSE("GPL");
static unsigned int video_nr[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
static unsigned int vbi_nr[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
-static unsigned int radio_nr[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
module_param_array(video_nr, int, NULL, 0444);
module_param_array(vbi_nr, int, NULL, 0444);
-module_param_array(radio_nr, int, NULL, 0444);
MODULE_PARM_DESC(video_nr, "video device numbers");
MODULE_PARM_DESC(vbi_nr, "vbi device numbers");
-MODULE_PARM_DESC(radio_nr, "radio device numbers");
static unsigned int video_debug;
module_param(video_debug, int, 0644);
@@ -79,77 +73,14 @@ MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
/* static data */
#define FORMAT_FLAGS_PACKED 0x01
-#if 0
-static struct cx23885_fmt formats[] = {
- {
- .name = "8 bpp, gray",
- .fourcc = V4L2_PIX_FMT_GREY,
- .depth = 8,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
- .name = "15 bpp RGB, le",
- .fourcc = V4L2_PIX_FMT_RGB555,
- .depth = 16,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
- .name = "15 bpp RGB, be",
- .fourcc = V4L2_PIX_FMT_RGB555X,
- .depth = 16,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
- .name = "16 bpp RGB, le",
- .fourcc = V4L2_PIX_FMT_RGB565,
- .depth = 16,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
- .name = "16 bpp RGB, be",
- .fourcc = V4L2_PIX_FMT_RGB565X,
- .depth = 16,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
- .name = "24 bpp RGB, le",
- .fourcc = V4L2_PIX_FMT_BGR24,
- .depth = 24,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
- .name = "32 bpp RGB, le",
- .fourcc = V4L2_PIX_FMT_BGR32,
- .depth = 32,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
- .name = "32 bpp RGB, be",
- .fourcc = V4L2_PIX_FMT_RGB32,
- .depth = 32,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
- .name = "4:2:2, packed, YUYV",
- .fourcc = V4L2_PIX_FMT_YUYV,
- .depth = 16,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
- .name = "4:2:2, packed, UYVY",
- .fourcc = V4L2_PIX_FMT_UYVY,
- .depth = 16,
- .flags = FORMAT_FLAGS_PACKED,
- },
-};
-#else
static struct cx23885_fmt formats[] = {
{
-#if 0
- .name = "4:2:2, packed, UYVY",
- .fourcc = V4L2_PIX_FMT_UYVY,
- .depth = 16,
- .flags = FORMAT_FLAGS_PACKED,
- }, {
-#endif
.name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
}
};
-#endif
static struct cx23885_fmt *format_by_fourcc(unsigned int fourcc)
{
@@ -158,163 +89,27 @@ static struct cx23885_fmt *format_by_fourcc(unsigned int fourcc)
for (i = 0; i < ARRAY_SIZE(formats); i++)
if (formats[i].fourcc == fourcc)
return formats+i;
-
- printk(KERN_ERR "%s(%c%c%c%c) NOT FOUND\n", __func__,
- (fourcc & 0xff),
- ((fourcc >> 8) & 0xff),
- ((fourcc >> 16) & 0xff),
- ((fourcc >> 24) & 0xff)
- );
return NULL;
}
/* ------------------------------------------------------------------- */
-static const struct v4l2_queryctrl no_ctl = {
- .name = "42",
- .flags = V4L2_CTRL_FLAG_DISABLED,
-};
-
-static struct cx23885_ctrl cx23885_ctls[] = {
- /* --- video --- */
- {
- .v = {
- .id = V4L2_CID_BRIGHTNESS,
- .name = "Brightness",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x7f,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
- .off = 128,
- .reg = LUMA_CTRL,
- .mask = 0x00ff,
- .shift = 0,
- }, {
- .v = {
- .id = V4L2_CID_CONTRAST,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 0x7f,
- .step = 1,
- .default_value = 0x3f,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
- .off = 0,
- .reg = LUMA_CTRL,
- .mask = 0xff00,
- .shift = 8,
- }, {
- .v = {
- .id = V4L2_CID_HUE,
- .name = "Hue",
- .minimum = -127,
- .maximum = 128,
- .step = 1,
- .default_value = 0x0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
- .off = 128,
- .reg = CHROMA_CTRL,
- .mask = 0xff0000,
- .shift = 16,
- }, {
- /* strictly, this only describes only U saturation.
- * V saturation is handled specially through code.
- */
- .v = {
- .id = V4L2_CID_SATURATION,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 0x7f,
- .step = 1,
- .default_value = 0x3f,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
- .off = 0,
- .reg = CHROMA_CTRL,
- .mask = 0x00ff,
- .shift = 0,
- }, {
- /* --- audio --- */
- .v = {
- .id = V4L2_CID_AUDIO_MUTE,
- .name = "Mute",
- .minimum = 0,
- .maximum = 1,
- .default_value = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- },
- .reg = PATH1_CTL1,
- .mask = (0x1f << 24),
- .shift = 24,
- }, {
- .v = {
- .id = V4L2_CID_AUDIO_VOLUME,
- .name = "Volume",
- .minimum = 0,
- .maximum = 65535,
- .step = 65535 / 100,
- .default_value = 65535,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
- .reg = PATH1_VOL_CTL,
- .mask = 0xff,
- .shift = 0,
- }
-};
-static const int CX23885_CTLS = ARRAY_SIZE(cx23885_ctls);
-
-/* Must be sorted from low to high control ID! */
-static const u32 cx23885_user_ctrls[] = {
- V4L2_CID_USER_CLASS,
- V4L2_CID_BRIGHTNESS,
- V4L2_CID_CONTRAST,
- V4L2_CID_SATURATION,
- V4L2_CID_HUE,
- V4L2_CID_AUDIO_VOLUME,
- V4L2_CID_AUDIO_MUTE,
- 0
-};
-
-static const u32 *ctrl_classes[] = {
- cx23885_user_ctrls,
- NULL
-};
-
void cx23885_video_wakeup(struct cx23885_dev *dev,
struct cx23885_dmaqueue *q, u32 count)
{
struct cx23885_buffer *buf;
- int bc;
-
- for (bc = 0;; bc++) {
- if (list_empty(&q->active))
- break;
- buf = list_entry(q->active.next,
- struct cx23885_buffer, vb.queue);
-
- /* count comes from the hw and is is 16bit wide --
- * this trick handles wrap-arounds correctly for
- * up to 32767 buffers in flight... */
- if ((s16) (count - buf->count) < 0)
- break;
-
- v4l2_get_timestamp(&buf->vb.ts);
- dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
- count, buf->count);
- buf->vb.state = VIDEOBUF_DONE;
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
- }
+
if (list_empty(&q->active))
- del_timer(&q->timeout);
- else
- mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
- if (bc != 1)
- printk(KERN_ERR "%s: %d buffers handled (should be 1)\n",
- __func__, bc);
+ return;
+ buf = list_entry(q->active.next,
+ struct cx23885_buffer, queue);
+
+ buf->vb.v4l2_buf.sequence = q->count++;
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.v4l2_buf.index,
+ count, q->count);
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
}
int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm)
@@ -324,6 +119,12 @@ int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm)
(unsigned int)norm,
v4l2_norm_to_name(norm));
+ if (dev->tvnorm != norm) {
+ if (vb2_is_busy(&dev->vb2_vidq) || vb2_is_busy(&dev->vb2_vbiq) ||
+ vb2_is_busy(&dev->vb2_mpegq))
+ return -EBUSY;
+ }
+
dev->tvnorm = norm;
call_all(dev, video, s_std, norm);
@@ -345,79 +146,13 @@ static struct video_device *cx23885_vdev_init(struct cx23885_dev *dev,
*vfd = *template;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
+ vfd->lock = &dev->lock;
snprintf(vfd->name, sizeof(vfd->name), "%s (%s)",
cx23885_boards[dev->board].name, type);
video_set_drvdata(vfd, dev);
return vfd;
}
-static int cx23885_ctrl_query(struct v4l2_queryctrl *qctrl)
-{
- int i;
-
- if (qctrl->id < V4L2_CID_BASE ||
- qctrl->id >= V4L2_CID_LASTP1)
- return -EINVAL;
- for (i = 0; i < CX23885_CTLS; i++)
- if (cx23885_ctls[i].v.id == qctrl->id)
- break;
- if (i == CX23885_CTLS) {
- *qctrl = no_ctl;
- return 0;
- }
- *qctrl = cx23885_ctls[i].v;
- return 0;
-}
-
-/* ------------------------------------------------------------------- */
-/* resource management */
-
-static int res_get(struct cx23885_dev *dev, struct cx23885_fh *fh,
- unsigned int bit)
-{
- dprintk(1, "%s()\n", __func__);
- if (fh->resources & bit)
- /* have it already allocated */
- return 1;
-
- /* is it free? */
- mutex_lock(&dev->lock);
- if (dev->resources & bit) {
- /* no, someone else uses it */
- mutex_unlock(&dev->lock);
- return 0;
- }
- /* it's free, grab it */
- fh->resources |= bit;
- dev->resources |= bit;
- dprintk(1, "res: get %d\n", bit);
- mutex_unlock(&dev->lock);
- return 1;
-}
-
-static int res_check(struct cx23885_fh *fh, unsigned int bit)
-{
- return fh->resources & bit;
-}
-
-static int res_locked(struct cx23885_dev *dev, unsigned int bit)
-{
- return dev->resources & bit;
-}
-
-static void res_free(struct cx23885_dev *dev, struct cx23885_fh *fh,
- unsigned int bits)
-{
- BUG_ON((fh->resources & bits) != bits);
- dprintk(1, "%s()\n", __func__);
-
- mutex_lock(&dev->lock);
- fh->resources &= ~bits;
- dev->resources &= ~bits;
- dprintk(1, "res: put %d\n", bits);
- mutex_unlock(&dev->lock);
-}
-
int cx23885_flatiron_write(struct cx23885_dev *dev, u8 reg, u8 data)
{
/* 8 bit registers, 8 bit values */
@@ -567,7 +302,7 @@ static int cx23885_start_video_dma(struct cx23885_dev *dev,
/* reset counter */
cx_write(VID_A_GPCNT_CTL, 3);
- q->count = 1;
+ q->count = 0;
/* enable irq */
cx23885_irq_add_enable(dev, 0x01);
@@ -580,479 +315,206 @@ static int cx23885_start_video_dma(struct cx23885_dev *dev,
return 0;
}
-
-static int cx23885_restart_video_queue(struct cx23885_dev *dev,
- struct cx23885_dmaqueue *q)
-{
- struct cx23885_buffer *buf, *prev;
- struct list_head *item;
- dprintk(1, "%s()\n", __func__);
-
- if (!list_empty(&q->active)) {
- buf = list_entry(q->active.next, struct cx23885_buffer,
- vb.queue);
- dprintk(2, "restart_queue [%p/%d]: restart dma\n",
- buf, buf->vb.i);
- cx23885_start_video_dma(dev, q, buf);
- list_for_each(item, &q->active) {
- buf = list_entry(item, struct cx23885_buffer,
- vb.queue);
- buf->count = q->count++;
- }
- mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
- return 0;
- }
-
- prev = NULL;
- for (;;) {
- if (list_empty(&q->queued))
- return 0;
- buf = list_entry(q->queued.next, struct cx23885_buffer,
- vb.queue);
- if (NULL == prev) {
- list_move_tail(&buf->vb.queue, &q->active);
- cx23885_start_video_dma(dev, q, buf);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
- dprintk(2, "[%p/%d] restart_queue - first active\n",
- buf, buf->vb.i);
-
- } else if (prev->vb.width == buf->vb.width &&
- prev->vb.height == buf->vb.height &&
- prev->fmt == buf->fmt) {
- list_move_tail(&buf->vb.queue, &q->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
- prev->risc.jmp[2] = cpu_to_le32(0); /* Bits 63 - 32 */
- dprintk(2, "[%p/%d] restart_queue - move to active\n",
- buf, buf->vb.i);
- } else {
- return 0;
- }
- prev = buf;
- }
-}
-
-static int buffer_setup(struct videobuf_queue *q, unsigned int *count,
- unsigned int *size)
+static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct cx23885_fh *fh = q->priv_data;
+ struct cx23885_dev *dev = q->drv_priv;
- *size = fh->fmt->depth*fh->width*fh->height >> 3;
- if (0 == *count)
- *count = 32;
- if (*size * *count > vid_limit * 1024 * 1024)
- *count = (vid_limit * 1024 * 1024) / *size;
+ *num_planes = 1;
+ sizes[0] = (dev->fmt->depth * dev->width * dev->height) >> 3;
return 0;
}
-static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int buffer_prepare(struct vb2_buffer *vb)
{
- struct cx23885_fh *fh = q->priv_data;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
struct cx23885_buffer *buf =
container_of(vb, struct cx23885_buffer, vb);
- int rc, init_buffer = 0;
u32 line0_offset, line1_offset;
- struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
int field_tff;
+ int ret;
- BUG_ON(NULL == fh->fmt);
- if (fh->width < 48 || fh->width > norm_maxw(dev->tvnorm) ||
- fh->height < 32 || fh->height > norm_maxh(dev->tvnorm))
- return -EINVAL;
- buf->vb.size = (fh->width * fh->height * fh->fmt->depth) >> 3;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ buf->bpl = (dev->width * dev->fmt->depth) >> 3;
+
+ if (vb2_plane_size(vb, 0) < dev->height * buf->bpl)
return -EINVAL;
+ vb2_set_plane_payload(vb, 0, dev->height * buf->bpl);
- if (buf->fmt != fh->fmt ||
- buf->vb.width != fh->width ||
- buf->vb.height != fh->height ||
- buf->vb.field != field) {
- buf->fmt = fh->fmt;
- buf->vb.width = fh->width;
- buf->vb.height = fh->height;
- buf->vb.field = field;
- init_buffer = 1;
- }
+ ret = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
+ if (!ret)
+ return -EIO;
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- init_buffer = 1;
- rc = videobuf_iolock(q, &buf->vb, NULL);
- if (0 != rc)
- goto fail;
- }
+ switch (dev->field) {
+ case V4L2_FIELD_TOP:
+ cx23885_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, 0, UNSET,
+ buf->bpl, 0, dev->height);
+ break;
+ case V4L2_FIELD_BOTTOM:
+ cx23885_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, UNSET, 0,
+ buf->bpl, 0, dev->height);
+ break;
+ case V4L2_FIELD_INTERLACED:
+ if (dev->tvnorm & V4L2_STD_525_60)
+ /* NTSC or */
+ field_tff = 1;
+ else
+ field_tff = 0;
+
+ if (cx23885_boards[dev->board].force_bff)
+ /* PAL / SECAM OR 888 in NTSC MODE */
+ field_tff = 0;
- if (init_buffer) {
- buf->bpl = buf->vb.width * buf->fmt->depth >> 3;
- switch (buf->vb.field) {
- case V4L2_FIELD_TOP:
- cx23885_risc_buffer(dev->pci, &buf->risc,
- dma->sglist, 0, UNSET,
- buf->bpl, 0, buf->vb.height);
- break;
- case V4L2_FIELD_BOTTOM:
- cx23885_risc_buffer(dev->pci, &buf->risc,
- dma->sglist, UNSET, 0,
- buf->bpl, 0, buf->vb.height);
- break;
- case V4L2_FIELD_INTERLACED:
- if (dev->tvnorm & V4L2_STD_NTSC)
- /* NTSC or */
- field_tff = 1;
- else
- field_tff = 0;
-
- if (cx23885_boards[dev->board].force_bff)
- /* PAL / SECAM OR 888 in NTSC MODE */
- field_tff = 0;
-
- if (field_tff) {
- /* cx25840 transmits NTSC bottom field first */
- dprintk(1, "%s() Creating TFF/NTSC risc\n",
+ if (field_tff) {
+ /* cx25840 transmits NTSC bottom field first */
+ dprintk(1, "%s() Creating TFF/NTSC risc\n",
__func__);
- line0_offset = buf->bpl;
- line1_offset = 0;
- } else {
- /* All other formats are top field first */
- dprintk(1, "%s() Creating BFF/PAL/SECAM risc\n",
+ line0_offset = buf->bpl;
+ line1_offset = 0;
+ } else {
+ /* All other formats are top field first */
+ dprintk(1, "%s() Creating BFF/PAL/SECAM risc\n",
__func__);
- line0_offset = 0;
- line1_offset = buf->bpl;
- }
- cx23885_risc_buffer(dev->pci, &buf->risc,
- dma->sglist, line0_offset,
- line1_offset,
- buf->bpl, buf->bpl,
- buf->vb.height >> 1);
- break;
- case V4L2_FIELD_SEQ_TB:
- cx23885_risc_buffer(dev->pci, &buf->risc,
- dma->sglist,
- 0, buf->bpl * (buf->vb.height >> 1),
- buf->bpl, 0,
- buf->vb.height >> 1);
- break;
- case V4L2_FIELD_SEQ_BT:
- cx23885_risc_buffer(dev->pci, &buf->risc,
- dma->sglist,
- buf->bpl * (buf->vb.height >> 1), 0,
- buf->bpl, 0,
- buf->vb.height >> 1);
- break;
- default:
- BUG();
+ line0_offset = 0;
+ line1_offset = buf->bpl;
}
+ cx23885_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, line0_offset,
+ line1_offset,
+ buf->bpl, buf->bpl,
+ dev->height >> 1);
+ break;
+ case V4L2_FIELD_SEQ_TB:
+ cx23885_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl,
+ 0, buf->bpl * (dev->height >> 1),
+ buf->bpl, 0,
+ dev->height >> 1);
+ break;
+ case V4L2_FIELD_SEQ_BT:
+ cx23885_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl,
+ buf->bpl * (dev->height >> 1), 0,
+ buf->bpl, 0,
+ dev->height >> 1);
+ break;
+ default:
+ BUG();
}
- dprintk(2, "[%p/%d] buffer_prep - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
- buf, buf->vb.i,
- fh->width, fh->height, fh->fmt->depth, fh->fmt->name,
+ dprintk(2, "[%p/%d] buffer_init - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
+ buf, buf->vb.v4l2_buf.index,
+ dev->width, dev->height, dev->fmt->depth, dev->fmt->name,
(unsigned long)buf->risc.dma);
-
- buf->vb.state = VIDEOBUF_PREPARED;
return 0;
+}
+
+static void buffer_finish(struct vb2_buffer *vb)
+{
+ struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
+ struct cx23885_buffer *buf = container_of(vb,
+ struct cx23885_buffer, vb);
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
+
+ cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
- fail:
- cx23885_free_buffer(q, buf);
- return rc;
+ dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
}
-static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+/*
+ * The risc program for each buffer works as follows: it starts with a simple
+ * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
+ * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
+ * the initial JUMP).
+ *
+ * This is the risc program of the first buffer to be queued if the active list
+ * is empty and it just keeps DMAing this buffer without generating any
+ * interrupts.
+ *
+ * If a new buffer is added then the initial JUMP in the code for that buffer
+ * will generate an interrupt which signals that the previous buffer has been
+ * DMAed successfully and that it can be returned to userspace.
+ *
+ * It also sets the final jump of the previous buffer to the start of the new
+ * buffer, thus chaining the new buffer into the DMA chain. This is a single
+ * atomic u32 write, so there is no race condition.
+ *
+ * The end-result of all this that you only get an interrupt when a buffer
+ * is ready, so the control flow is very easy.
+ */
+static void buffer_queue(struct vb2_buffer *vb)
{
+ struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
struct cx23885_buffer *buf = container_of(vb,
struct cx23885_buffer, vb);
struct cx23885_buffer *prev;
- struct cx23885_fh *fh = vq->priv_data;
- struct cx23885_dev *dev = fh->dev;
struct cx23885_dmaqueue *q = &dev->vidq;
+ unsigned long flags;
- /* add jump to stopper */
- buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
- buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma);
+ /* add jump to start */
+ buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
+ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
+ buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
- if (!list_empty(&q->queued)) {
- list_add_tail(&buf->vb.queue, &q->queued);
- buf->vb.state = VIDEOBUF_QUEUED;
- dprintk(2, "[%p/%d] buffer_queue - append to queued\n",
- buf, buf->vb.i);
-
- } else if (list_empty(&q->active)) {
- list_add_tail(&buf->vb.queue, &q->active);
- cx23885_start_video_dma(dev, q, buf);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
+ spin_lock_irqsave(&dev->slock, flags);
+ if (list_empty(&q->active)) {
+ list_add_tail(&buf->queue, &q->active);
dprintk(2, "[%p/%d] buffer_queue - first active\n",
- buf, buf->vb.i);
-
+ buf, buf->vb.v4l2_buf.index);
} else {
+ buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
prev = list_entry(q->active.prev, struct cx23885_buffer,
- vb.queue);
- if (prev->vb.width == buf->vb.width &&
- prev->vb.height == buf->vb.height &&
- prev->fmt == buf->fmt) {
- list_add_tail(&buf->vb.queue, &q->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buf->count = q->count++;
- prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
- /* 64 bit bits 63-32 */
- prev->risc.jmp[2] = cpu_to_le32(0);
- dprintk(2, "[%p/%d] buffer_queue - append to active\n",
- buf, buf->vb.i);
-
- } else {
- list_add_tail(&buf->vb.queue, &q->queued);
- buf->vb.state = VIDEOBUF_QUEUED;
- dprintk(2, "[%p/%d] buffer_queue - first queued\n",
- buf, buf->vb.i);
- }
- }
-}
-
-static void buffer_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
-{
- struct cx23885_buffer *buf = container_of(vb,
- struct cx23885_buffer, vb);
-
- cx23885_free_buffer(q, buf);
-}
-
-static struct videobuf_queue_ops cx23885_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
- .buf_queue = buffer_queue,
- .buf_release = buffer_release,
-};
-
-static struct videobuf_queue *get_queue(struct cx23885_fh *fh)
-{
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- return &fh->vidq;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- return &fh->vbiq;
- default:
- BUG();
- return NULL;
- }
-}
-
-static int get_resource(struct cx23885_fh *fh)
-{
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- return RESOURCE_VIDEO;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- return RESOURCE_VBI;
- default:
- BUG();
- return 0;
+ queue);
+ list_add_tail(&buf->queue, &q->active);
+ prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
+ dprintk(2, "[%p/%d] buffer_queue - append to active\n",
+ buf, buf->vb.v4l2_buf.index);
}
+ spin_unlock_irqrestore(&dev->slock, flags);
}
-static int video_open(struct file *file)
+static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
{
- struct video_device *vdev = video_devdata(file);
- struct cx23885_dev *dev = video_drvdata(file);
- struct cx23885_fh *fh;
- enum v4l2_buf_type type = 0;
- int radio = 0;
-
- switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- break;
- case VFL_TYPE_VBI:
- type = V4L2_BUF_TYPE_VBI_CAPTURE;
- break;
- case VFL_TYPE_RADIO:
- radio = 1;
- break;
- }
-
- dprintk(1, "open dev=%s radio=%d type=%s\n",
- video_device_node_name(vdev), radio, v4l2_type_names[type]);
-
- /* allocate + initialize per filehandle data */
- fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh)
- return -ENOMEM;
-
- file->private_data = fh;
- fh->dev = dev;
- fh->radio = radio;
- fh->type = type;
- fh->width = 320;
- fh->height = 240;
- fh->fmt = format_by_fourcc(V4L2_PIX_FMT_YUYV);
-
- videobuf_queue_sg_init(&fh->vidq, &cx23885_video_qops,
- &dev->pci->dev, &dev->slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct cx23885_buffer),
- fh, NULL);
-
- videobuf_queue_sg_init(&fh->vbiq, &cx23885_vbi_qops,
- &dev->pci->dev, &dev->slock,
- V4L2_BUF_TYPE_VBI_CAPTURE,
- V4L2_FIELD_SEQ_TB,
- sizeof(struct cx23885_buffer),
- fh, NULL);
-
-
- dprintk(1, "post videobuf_queue_init()\n");
+ struct cx23885_dev *dev = q->drv_priv;
+ struct cx23885_dmaqueue *dmaq = &dev->vidq;
+ struct cx23885_buffer *buf = list_entry(dmaq->active.next,
+ struct cx23885_buffer, queue);
+ cx23885_start_video_dma(dev, dmaq, buf);
return 0;
}
-static ssize_t video_read(struct file *file, char __user *data,
- size_t count, loff_t *ppos)
+static void cx23885_stop_streaming(struct vb2_queue *q)
{
- struct cx23885_fh *fh = file->private_data;
-
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (res_locked(fh->dev, RESOURCE_VIDEO))
- return -EBUSY;
- return videobuf_read_one(&fh->vidq, data, count, ppos,
- file->f_flags & O_NONBLOCK);
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- if (!res_get(fh->dev, fh, RESOURCE_VBI))
- return -EBUSY;
- return videobuf_read_stream(&fh->vbiq, data, count, ppos, 1,
- file->f_flags & O_NONBLOCK);
- default:
- BUG();
- return 0;
- }
-}
-
-static unsigned int video_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_buffer *buf;
- unsigned int rc = POLLERR;
-
- if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
- if (!res_get(fh->dev, fh, RESOURCE_VBI))
- return POLLERR;
- return videobuf_poll_stream(file, &fh->vbiq, wait);
- }
-
- mutex_lock(&fh->vidq.vb_lock);
- if (res_check(fh, RESOURCE_VIDEO)) {
- /* streaming capture */
- if (list_empty(&fh->vidq.stream))
- goto done;
- buf = list_entry(fh->vidq.stream.next,
- struct cx23885_buffer, vb.stream);
- } else {
- /* read() capture */
- buf = (struct cx23885_buffer *)fh->vidq.read_buf;
- if (NULL == buf)
- goto done;
- }
- poll_wait(file, &buf->vb.done, wait);
- if (buf->vb.state == VIDEOBUF_DONE ||
- buf->vb.state == VIDEOBUF_ERROR)
- rc = POLLIN|POLLRDNORM;
- else
- rc = 0;
-done:
- mutex_unlock(&fh->vidq.vb_lock);
- return rc;
-}
-
-static int video_release(struct file *file)
-{
- struct cx23885_fh *fh = file->private_data;
- struct cx23885_dev *dev = fh->dev;
-
- /* turn off overlay */
- if (res_check(fh, RESOURCE_OVERLAY)) {
- /* FIXME */
- res_free(dev, fh, RESOURCE_OVERLAY);
- }
+ struct cx23885_dev *dev = q->drv_priv;
+ struct cx23885_dmaqueue *dmaq = &dev->vidq;
+ unsigned long flags;
- /* stop video capture */
- if (res_check(fh, RESOURCE_VIDEO)) {
- videobuf_queue_cancel(&fh->vidq);
- res_free(dev, fh, RESOURCE_VIDEO);
- }
- if (fh->vidq.read_buf) {
- buffer_release(&fh->vidq, fh->vidq.read_buf);
- kfree(fh->vidq.read_buf);
- }
+ cx_clear(VID_A_DMA_CTL, 0x11);
+ spin_lock_irqsave(&dev->slock, flags);
+ while (!list_empty(&dmaq->active)) {
+ struct cx23885_buffer *buf = list_entry(dmaq->active.next,
+ struct cx23885_buffer, queue);
- /* stop vbi capture */
- if (res_check(fh, RESOURCE_VBI)) {
- if (fh->vbiq.streaming)
- videobuf_streamoff(&fh->vbiq);
- if (fh->vbiq.reading)
- videobuf_read_stop(&fh->vbiq);
- res_free(dev, fh, RESOURCE_VBI);
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
-
- videobuf_mmap_free(&fh->vidq);
- videobuf_mmap_free(&fh->vbiq);
-
- file->private_data = NULL;
- kfree(fh);
-
- /* We are not putting the tuner to sleep here on exit, because
- * we want to use the mpeg encoder in another session to capture
- * tuner video. Closing this will result in no video to the encoder.
- */
-
- return 0;
-}
-
-static int video_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct cx23885_fh *fh = file->private_data;
-
- return videobuf_mmap_mapper(get_queue(fh), vma);
-}
-
-/* ------------------------------------------------------------------ */
-/* VIDEO CTRL IOCTLS */
-
-int cx23885_get_control(struct cx23885_dev *dev,
- struct v4l2_control *ctl)
-{
- dprintk(1, "%s() calling cx25840(VIDIOC_G_CTRL)\n", __func__);
- call_all(dev, core, g_ctrl, ctl);
- return 0;
-}
-
-int cx23885_set_control(struct cx23885_dev *dev,
- struct v4l2_control *ctl)
-{
- dprintk(1, "%s() calling cx25840(VIDIOC_S_CTRL)\n", __func__);
- call_all(dev, core, s_ctrl, ctl);
-
- return 0;
+ spin_unlock_irqrestore(&dev->slock, flags);
}
-static void init_controls(struct cx23885_dev *dev)
-{
- struct v4l2_control ctrl;
- int i;
-
- for (i = 0; i < CX23885_CTLS; i++) {
- ctrl.id = cx23885_ctls[i].v.id;
- ctrl.value = cx23885_ctls[i].v.default_value;
-
- cx23885_set_control(dev, &ctrl);
- }
-}
+static struct vb2_ops cx23885_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_finish = buffer_finish,
+ .buf_queue = buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = cx23885_start_streaming,
+ .stop_streaming = cx23885_stop_streaming,
+};
/* ------------------------------------------------------------------ */
/* VIDEO IOCTLS */
@@ -1060,16 +522,17 @@ static void init_controls(struct cx23885_dev *dev)
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx23885_fh *fh = priv;
+ struct cx23885_dev *dev = video_drvdata(file);
- f->fmt.pix.width = fh->width;
- f->fmt.pix.height = fh->height;
- f->fmt.pix.field = fh->vidq.field;
- f->fmt.pix.pixelformat = fh->fmt->fourcc;
+ f->fmt.pix.width = dev->width;
+ f->fmt.pix.height = dev->height;
+ f->fmt.pix.field = dev->field;
+ f->fmt.pix.pixelformat = dev->fmt->fourcc;
f->fmt.pix.bytesperline =
- (f->fmt.pix.width * fh->fmt->depth) >> 3;
+ (f->fmt.pix.width * dev->fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
@@ -1077,7 +540,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
struct cx23885_fmt *fmt;
enum v4l2_field field;
unsigned int maxw, maxh;
@@ -1102,9 +565,12 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
maxh = maxh / 2;
break;
case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
break;
default:
- return -EINVAL;
+ field = V4L2_FIELD_INTERLACED;
+ break;
}
f->fmt.pix.field = field;
@@ -1114,6 +580,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
@@ -1121,8 +588,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx23885_fh *fh = priv;
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
struct v4l2_mbus_framefmt mbus_fmt;
int err;
@@ -1131,34 +597,44 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
if (0 != err)
return err;
- fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
- fh->width = f->fmt.pix.width;
- fh->height = f->fmt.pix.height;
- fh->vidq.field = f->fmt.pix.field;
+
+ if (vb2_is_busy(&dev->vb2_vidq) || vb2_is_busy(&dev->vb2_vbiq) ||
+ vb2_is_busy(&dev->vb2_mpegq))
+ return -EBUSY;
+
+ dev->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ dev->width = f->fmt.pix.width;
+ dev->height = f->fmt.pix.height;
+ dev->field = f->fmt.pix.field;
dprintk(2, "%s() width=%d height=%d field=%d\n", __func__,
- fh->width, fh->height, fh->vidq.field);
+ dev->width, dev->height, dev->field);
v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED);
call_all(dev, video, s_mbus_fmt, &mbus_fmt);
v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt);
+ /* s_mbus_fmt overwrites f->fmt.pix.field, restore it */
+ f->fmt.pix.field = dev->field;
return 0;
}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
strcpy(cap->driver, "cx23885");
strlcpy(cap->card, cx23885_boards[dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci));
- cap->capabilities =
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_VBI_CAPTURE;
+ cap->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | V4L2_CAP_AUDIO;
if (dev->tuner_type != TUNER_ABSENT)
- cap->capabilities |= V4L2_CAP_TUNER;
+ cap->device_caps |= V4L2_CAP_TUNER;
+ if (vdev->vfl_type == VFL_TYPE_VBI)
+ cap->device_caps |= V4L2_CAP_VBI_CAPTURE;
+ else
+ cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_VBI_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -1175,85 +651,9 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct cx23885_fh *fh = priv;
- return videobuf_reqbufs(get_queue(fh), p);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct cx23885_fh *fh = priv;
- return videobuf_querybuf(get_queue(fh), p);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct cx23885_fh *fh = priv;
- return videobuf_qbuf(get_queue(fh), p);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct cx23885_fh *fh = priv;
- return videobuf_dqbuf(get_queue(fh), p,
- file->f_flags & O_NONBLOCK);
-}
-
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type i)
-{
- struct cx23885_fh *fh = priv;
- struct cx23885_dev *dev = fh->dev;
- dprintk(1, "%s()\n", __func__);
-
- if ((fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
- (fh->type != V4L2_BUF_TYPE_VBI_CAPTURE))
- return -EINVAL;
- if (unlikely(i != fh->type))
- return -EINVAL;
-
- if (unlikely(!res_get(dev, fh, get_resource(fh))))
- return -EBUSY;
-
- /* Don't start VBI streaming unless vida streaming
- * has already started.
- */
- if ((fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) &&
- ((cx_read(VID_A_DMA_CTL) & 0x11) == 0))
- return -EINVAL;
-
- return videobuf_streamon(get_queue(fh));
-}
-
-static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct cx23885_fh *fh = priv;
- struct cx23885_dev *dev = fh->dev;
- int err, res;
- dprintk(1, "%s()\n", __func__);
-
- if ((fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
- (fh->type != V4L2_BUF_TYPE_VBI_CAPTURE))
- return -EINVAL;
- if (i != fh->type)
- return -EINVAL;
-
- res = get_resource(fh);
- err = videobuf_streamoff(get_queue(fh));
- if (err < 0)
- return err;
- res_free(dev, fh, res);
- return 0;
-}
-
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
dprintk(1, "%s()\n", __func__);
*id = dev->tvnorm;
@@ -1262,14 +662,10 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id tvnorms)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
dprintk(1, "%s()\n", __func__);
- mutex_lock(&dev->lock);
- cx23885_set_tvnorm(dev, tvnorms);
- mutex_unlock(&dev->lock);
-
- return 0;
+ return cx23885_set_tvnorm(dev, tvnorms);
}
int cx23885_enum_input(struct cx23885_dev *dev, struct v4l2_input *i)
@@ -1299,16 +695,16 @@ int cx23885_enum_input(struct cx23885_dev *dev, struct v4l2_input *i)
i->index = n;
i->type = V4L2_INPUT_TYPE_CAMERA;
strcpy(i->name, iname[INPUT(n)->type]);
+ i->std = CX23885_NORMS;
if ((CX23885_VMUX_TELEVISION == INPUT(n)->type) ||
(CX23885_VMUX_CABLE == INPUT(n)->type)) {
i->type = V4L2_INPUT_TYPE_TUNER;
- i->std = CX23885_NORMS;
+ i->audioset = 4;
+ } else {
+ /* Two selectable audio inputs for non-tv inputs */
+ i->audioset = 3;
}
- /* Two selectable audio inputs for non-tv inputs */
- if (INPUT(n)->type != CX23885_VMUX_TELEVISION)
- i->audioset = 0x3;
-
if (dev->input == n) {
/* enum'd input matches our configured input.
* Ask the video decoder to process the call
@@ -1324,14 +720,14 @@ int cx23885_enum_input(struct cx23885_dev *dev, struct v4l2_input *i)
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
dprintk(1, "%s()\n", __func__);
return cx23885_enum_input(dev, i);
}
int cx23885_get_input(struct file *file, void *priv, unsigned int *i)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
*i = dev->input;
dprintk(1, "%s() returns %d\n", __func__, *i);
@@ -1345,7 +741,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
int cx23885_set_input(struct file *file, void *priv, unsigned int i)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
dprintk(1, "%s(%d)\n", __func__, i);
@@ -1357,13 +753,11 @@ int cx23885_set_input(struct file *file, void *priv, unsigned int i)
if (INPUT(i)->type == 0)
return -EINVAL;
- mutex_lock(&dev->lock);
cx23885_video_mux(dev, i);
/* By default establish the default audio input for the card also */
/* Caller is free to use VIDIOC_S_AUDIO to override afterwards */
cx23885_audio_mux(dev, i);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -1374,39 +768,32 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
static int vidioc_log_status(struct file *file, void *priv)
{
- struct cx23885_fh *fh = priv;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
- printk(KERN_INFO
- "%s/0: ============ START LOG STATUS ============\n",
- dev->name);
call_all(dev, core, log_status);
- printk(KERN_INFO
- "%s/0: ============= END LOG STATUS =============\n",
- dev->name);
return 0;
}
static int cx23885_query_audinput(struct file *file, void *priv,
struct v4l2_audio *i)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
static const char *iname[] = {
[0] = "Baseband L/R 1",
[1] = "Baseband L/R 2",
+ [2] = "TV",
};
unsigned int n;
dprintk(1, "%s()\n", __func__);
n = i->index;
- if (n >= 2)
+ if (n >= 3)
return -EINVAL;
memset(i, 0, sizeof(*i));
i->index = n;
strcpy(i->name, iname[n]);
- i->capability = V4L2_AUDCAP_STEREO;
- i->mode = V4L2_AUDMODE_AVL;
+ i->capability = V4L2_AUDCAP_STEREO;
return 0;
}
@@ -1420,9 +807,13 @@ static int vidioc_enum_audinput(struct file *file, void *priv,
static int vidioc_g_audinput(struct file *file, void *priv,
struct v4l2_audio *i)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
- i->index = dev->audinput;
+ if ((CX23885_VMUX_TELEVISION == INPUT(dev->input)->type) ||
+ (CX23885_VMUX_CABLE == INPUT(dev->input)->type))
+ i->index = 2;
+ else
+ i->index = dev->audinput;
dprintk(1, "%s(input=%d)\n", __func__, i->index);
return cx23885_query_audinput(file, priv, i);
@@ -1431,8 +822,13 @@ static int vidioc_g_audinput(struct file *file, void *priv,
static int vidioc_s_audinput(struct file *file, void *priv,
const struct v4l2_audio *i)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
- if (i->index >= 2)
+ struct cx23885_dev *dev = video_drvdata(file);
+
+ if ((CX23885_VMUX_TELEVISION == INPUT(dev->input)->type) ||
+ (CX23885_VMUX_CABLE == INPUT(dev->input)->type)) {
+ return i->index != 2 ? -EINVAL : 0;
+ }
+ if (i->index > 1)
return -EINVAL;
dprintk(1, "%s(%d)\n", __func__, i->index);
@@ -1445,35 +841,10 @@ static int vidioc_s_audinput(struct file *file, void *priv,
return 0;
}
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qctrl)
-{
- qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
- if (unlikely(qctrl->id == 0))
- return -EINVAL;
- return cx23885_ctrl_query(qctrl);
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctl)
-{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
-
- return cx23885_get_control(dev, ctl);
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctl)
-{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
-
- return cx23885_set_control(dev, ctl);
-}
-
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
if (dev->tuner_type == TUNER_ABSENT)
return -EINVAL;
@@ -1489,7 +860,7 @@ static int vidioc_g_tuner(struct file *file, void *priv,
static int vidioc_s_tuner(struct file *file, void *priv,
const struct v4l2_tuner *t)
{
- struct cx23885_dev *dev = ((struct cx23885_fh *)priv)->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
if (dev->tuner_type == TUNER_ABSENT)
return -EINVAL;
@@ -1504,14 +875,12 @@ static int vidioc_s_tuner(struct file *file, void *priv,
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct cx23885_fh *fh = priv;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
if (dev->tuner_type == TUNER_ABSENT)
return -EINVAL;
- /* f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; */
- f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ f->type = V4L2_TUNER_ANALOG_TV;
f->frequency = dev->freq;
call_all(dev, tuner, g_frequency, f);
@@ -1521,20 +890,23 @@ static int vidioc_g_frequency(struct file *file, void *priv,
static int cx23885_set_freq(struct cx23885_dev *dev, const struct v4l2_frequency *f)
{
- struct v4l2_control ctrl;
+ struct v4l2_ctrl *mute;
+ int old_mute_val = 1;
if (dev->tuner_type == TUNER_ABSENT)
return -EINVAL;
if (unlikely(f->tuner != 0))
return -EINVAL;
- mutex_lock(&dev->lock);
dev->freq = f->frequency;
/* I need to mute audio here */
- ctrl.id = V4L2_CID_AUDIO_MUTE;
- ctrl.value = 1;
- cx23885_set_control(dev, &ctrl);
+ mute = v4l2_ctrl_find(&dev->ctrl_handler, V4L2_CID_AUDIO_MUTE);
+ if (mute) {
+ old_mute_val = v4l2_ctrl_g_ctrl(mute);
+ if (!old_mute_val)
+ v4l2_ctrl_s_ctrl(mute, 1);
+ }
call_all(dev, tuner, s_frequency, f);
@@ -1542,10 +914,8 @@ static int cx23885_set_freq(struct cx23885_dev *dev, const struct v4l2_frequency
msleep(100);
/* I need to unmute audio here */
- ctrl.value = 0;
- cx23885_set_control(dev, &ctrl);
-
- mutex_unlock(&dev->lock);
+ if (old_mute_val == 0)
+ v4l2_ctrl_s_ctrl(mute, old_mute_val);
return 0;
}
@@ -1553,8 +923,9 @@ static int cx23885_set_freq(struct cx23885_dev *dev, const struct v4l2_frequency
static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
const struct v4l2_frequency *f)
{
- struct v4l2_control ctrl;
- struct videobuf_dvb_frontend *vfe;
+ struct v4l2_ctrl *mute;
+ int old_mute_val = 1;
+ struct vb2_dvb_frontend *vfe;
struct dvb_frontend *fe;
struct analog_parameters params = {
@@ -1564,21 +935,22 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
.frequency = f->frequency
};
- mutex_lock(&dev->lock);
dev->freq = f->frequency;
/* I need to mute audio here */
- ctrl.id = V4L2_CID_AUDIO_MUTE;
- ctrl.value = 1;
- cx23885_set_control(dev, &ctrl);
+ mute = v4l2_ctrl_find(&dev->ctrl_handler, V4L2_CID_AUDIO_MUTE);
+ if (mute) {
+ old_mute_val = v4l2_ctrl_g_ctrl(mute);
+ if (!old_mute_val)
+ v4l2_ctrl_s_ctrl(mute, 1);
+ }
/* If HVR1850 */
dprintk(1, "%s() frequency=%d tuner=%d std=0x%llx\n", __func__,
params.frequency, f->tuner, params.std);
- vfe = videobuf_dvb_get_frontend(&dev->ts2.frontends, 1);
+ vfe = vb2_dvb_get_frontend(&dev->ts2.frontends, 1);
if (!vfe) {
- mutex_unlock(&dev->lock);
return -EINVAL;
}
@@ -1600,10 +972,8 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
msleep(100);
/* I need to unmute audio here */
- ctrl.value = 0;
- cx23885_set_control(dev, &ctrl);
-
- mutex_unlock(&dev->lock);
+ if (old_mute_val == 0)
+ v4l2_ctrl_s_ctrl(mute, old_mute_val);
return 0;
}
@@ -1611,8 +981,7 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
int cx23885_set_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct cx23885_fh *fh = priv;
- struct cx23885_dev *dev = fh->dev;
+ struct cx23885_dev *dev = video_drvdata(file);
int ret;
switch (dev->board) {
@@ -1636,28 +1005,6 @@ static int vidioc_s_frequency(struct file *file, void *priv,
/* ----------------------------------------------------------- */
-static void cx23885_vid_timeout(unsigned long data)
-{
- struct cx23885_dev *dev = (struct cx23885_dev *)data;
- struct cx23885_dmaqueue *q = &dev->vidq;
- struct cx23885_buffer *buf;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->slock, flags);
- while (!list_empty(&q->active)) {
- buf = list_entry(q->active.next,
- struct cx23885_buffer, vb.queue);
- list_del(&buf->vb.queue);
- buf->vb.state = VIDEOBUF_ERROR;
- wake_up(&buf->vb.done);
- printk(KERN_ERR "%s: [%p/%d] timeout - dma=0x%08lx\n",
- dev->name, buf, buf->vb.i,
- (unsigned long)buf->risc.dma);
- }
- cx23885_restart_video_queue(dev, q);
- spin_unlock_irqrestore(&dev->slock, flags);
-}
-
int cx23885_video_irq(struct cx23885_dev *dev, u32 status)
{
u32 mask, count;
@@ -1702,13 +1049,6 @@ int cx23885_video_irq(struct cx23885_dev *dev, u32 status)
spin_unlock(&dev->slock);
handled++;
}
- if (status & VID_BC_MSK_RISCI2) {
- dprintk(2, "stopper video\n");
- spin_lock(&dev->slock);
- cx23885_restart_video_queue(dev, &dev->vidq);
- spin_unlock(&dev->slock);
- handled++;
- }
/* Allow the VBI framework to process it's payload */
handled += cx23885_vbi_irq(dev, status);
@@ -1721,12 +1061,12 @@ int cx23885_video_irq(struct cx23885_dev *dev, u32 status)
static const struct v4l2_file_operations video_fops = {
.owner = THIS_MODULE,
- .open = video_open,
- .release = video_release,
- .read = video_read,
- .poll = video_poll,
- .mmap = video_mmap,
- .ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
@@ -1738,21 +1078,19 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_g_fmt_vbi_cap = cx23885_vbi_fmt,
.vidioc_try_fmt_vbi_cap = cx23885_vbi_fmt,
.vidioc_s_fmt_vbi_cap = cx23885_vbi_fmt,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_s_std = vidioc_s_std,
.vidioc_g_std = vidioc_g_std,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
.vidioc_log_status = vidioc_log_status,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
@@ -1765,6 +1103,8 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_enumaudio = vidioc_enum_audinput,
.vidioc_g_audio = vidioc_g_audinput,
.vidioc_s_audio = vidioc_s_audinput,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct video_device cx23885_vbi_template;
@@ -1775,14 +1115,6 @@ static struct video_device cx23885_video_template = {
.tvnorms = CX23885_NORMS,
};
-static const struct v4l2_file_operations radio_fops = {
- .owner = THIS_MODULE,
- .open = video_open,
- .release = video_release,
- .ioctl = video_ioctl2,
-};
-
-
void cx23885_video_unregister(struct cx23885_dev *dev)
{
dprintk(1, "%s()\n", __func__);
@@ -1794,7 +1126,6 @@ void cx23885_video_unregister(struct cx23885_dev *dev)
else
video_device_release(dev->vbi_dev);
dev->vbi_dev = NULL;
- btcx_riscmem_free(dev->pci, &dev->vbiq.stopper);
}
if (dev->video_dev) {
if (video_is_registered(dev->video_dev))
@@ -1802,8 +1133,6 @@ void cx23885_video_unregister(struct cx23885_dev *dev)
else
video_device_release(dev->video_dev);
dev->video_dev = NULL;
-
- btcx_riscmem_free(dev->pci, &dev->vidq.stopper);
}
if (dev->audio_dev)
@@ -1812,6 +1141,7 @@ void cx23885_video_unregister(struct cx23885_dev *dev)
int cx23885_video_register(struct cx23885_dev *dev)
{
+ struct vb2_queue *q;
int err;
dprintk(1, "%s()\n", __func__);
@@ -1822,24 +1152,16 @@ int cx23885_video_register(struct cx23885_dev *dev)
strcpy(cx23885_vbi_template.name, "cx23885-vbi");
dev->tvnorm = V4L2_STD_NTSC_M;
+ dev->fmt = format_by_fourcc(V4L2_PIX_FMT_YUYV);
+ dev->field = V4L2_FIELD_INTERLACED;
+ dev->width = 720;
+ dev->height = norm_maxh(dev->tvnorm);
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
- INIT_LIST_HEAD(&dev->vidq.queued);
- dev->vidq.timeout.function = cx23885_vid_timeout;
- dev->vidq.timeout.data = (unsigned long)dev;
- init_timer(&dev->vidq.timeout);
- cx23885_risc_stopper(dev->pci, &dev->vidq.stopper,
- VID_A_DMA_CTL, 0x11, 0x00);
/* init vbi dma queues */
INIT_LIST_HEAD(&dev->vbiq.active);
- INIT_LIST_HEAD(&dev->vbiq.queued);
- dev->vbiq.timeout.function = cx23885_vbi_timeout;
- dev->vbiq.timeout.data = (unsigned long)dev;
- init_timer(&dev->vbiq.timeout);
- cx23885_risc_stopper(dev->pci, &dev->vbiq.stopper,
- VID_A_DMA_CTL, 0x22, 0x00);
cx23885_irq_add_enable(dev, 0x01);
@@ -1893,9 +1215,49 @@ int cx23885_video_register(struct cx23885_dev *dev)
}
}
+ /* initial device configuration */
+ mutex_lock(&dev->lock);
+ cx23885_set_tvnorm(dev, dev->tvnorm);
+ cx23885_video_mux(dev, 0);
+ cx23885_audio_mux(dev, 0);
+ mutex_unlock(&dev->lock);
+
+ q = &dev->vb2_vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->gfp_flags = GFP_DMA32;
+ q->min_buffers_needed = 2;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx23885_buffer);
+ q->ops = &cx23885_video_qops;
+ q->mem_ops = &vb2_dma_sg_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &dev->lock;
+
+ err = vb2_queue_init(q);
+ if (err < 0)
+ goto fail_unreg;
+
+ q = &dev->vb2_vbiq;
+ q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->gfp_flags = GFP_DMA32;
+ q->min_buffers_needed = 2;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx23885_buffer);
+ q->ops = &cx23885_vbi_qops;
+ q->mem_ops = &vb2_dma_sg_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &dev->lock;
+
+ err = vb2_queue_init(q);
+ if (err < 0)
+ goto fail_unreg;
+
/* register Video device */
dev->video_dev = cx23885_vdev_init(dev, dev->pci,
&cx23885_video_template, "video");
+ dev->video_dev->queue = &dev->vb2_vidq;
err = video_register_device(dev->video_dev, VFL_TYPE_GRABBER,
video_nr[dev->nr]);
if (err < 0) {
@@ -1909,6 +1271,7 @@ int cx23885_video_register(struct cx23885_dev *dev)
/* register VBI device */
dev->vbi_dev = cx23885_vdev_init(dev, dev->pci,
&cx23885_vbi_template, "vbi");
+ dev->vbi_dev->queue = &dev->vb2_vbiq;
err = video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
vbi_nr[dev->nr]);
if (err < 0) {
@@ -1922,18 +1285,9 @@ int cx23885_video_register(struct cx23885_dev *dev)
/* Register ALSA audio device */
dev->audio_dev = cx23885_audio_register(dev);
- /* initial device configuration */
- mutex_lock(&dev->lock);
- cx23885_set_tvnorm(dev, dev->tvnorm);
- init_controls(dev);
- cx23885_video_mux(dev, 0);
- cx23885_audio_mux(dev, 0);
- mutex_unlock(&dev->lock);
-
return 0;
fail_unreg:
cx23885_video_unregister(dev);
return err;
}
-
diff --git a/drivers/media/pci/cx23885/cx23885-video.h b/drivers/media/pci/cx23885/cx23885-video.h
index c961a2b0de0f..291e8f3189f0 100644
--- a/drivers/media/pci/cx23885/cx23885-video.h
+++ b/drivers/media/pci/cx23885/cx23885-video.h
@@ -12,11 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#ifndef _CX23885_VIDEO_H_
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index 0e086c03da67..6c35e6115969 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/pci.h>
@@ -25,19 +21,20 @@
#include <linux/slab.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
#include <media/tuner.h>
#include <media/tveeprom.h>
-#include <media/videobuf-dma-sg.h>
-#include <media/videobuf-dvb.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/videobuf2-dvb.h>
#include <media/rc-core.h>
-#include "btcx-risc.h"
#include "cx23885-reg.h"
#include "media/cx2341x.h"
#include <linux/mutex.h>
-#define CX23885_VERSION "0.0.3"
+#define CX23885_VERSION "0.0.4"
#define UNSET (-1U)
@@ -46,9 +43,6 @@
/* Max number of inputs by card */
#define MAX_CX23885_INPUT 8
#define INPUT(nr) (&cx23885_boards[dev->board].input[nr])
-#define RESOURCE_OVERLAY 1
-#define RESOURCE_VIDEO 2
-#define RESOURCE_VBI 4
#define BUFFER_TIMEOUT (HZ) /* 0.5 seconds */
@@ -98,6 +92,7 @@
#define CX23885_BOARD_LEADTEK_WINFAST_PXPVR2200 42
#define CX23885_BOARD_HAUPPAUGE_IMPACTVCBE 43
#define CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP2 44
+#define CX23885_BOARD_DVBSKY_T9580 45
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
@@ -131,14 +126,6 @@ struct cx23885_fmt {
u32 cxformat;
};
-struct cx23885_ctrl {
- struct v4l2_queryctrl v;
- u32 off;
- u32 reg;
- u32 mask;
- u32 shift;
-};
-
struct cx23885_tvnorm {
char *name;
v4l2_std_id id;
@@ -146,30 +133,6 @@ struct cx23885_tvnorm {
u32 cxoformat;
};
-struct cx23885_fh {
- struct cx23885_dev *dev;
- enum v4l2_buf_type type;
- int radio;
- u32 resources;
-
- /* video overlay */
- struct v4l2_window win;
- struct v4l2_clip *clips;
- unsigned int nclips;
-
- /* video capture */
- struct cx23885_fmt *fmt;
- unsigned int width, height;
-
- /* vbi capture */
- struct videobuf_queue vidq;
- struct videobuf_queue vbiq;
-
- /* MPEG Encoder specifics ONLY */
- struct videobuf_queue mpegq;
- atomic_t v4l_reading;
-};
-
enum cx23885_itype {
CX23885_VMUX_COMPOSITE1 = 1,
CX23885_VMUX_COMPOSITE2,
@@ -189,14 +152,22 @@ enum cx23885_src_sel_type {
CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO
};
+struct cx23885_riscmem {
+ unsigned int size;
+ __le32 *cpu;
+ __le32 *jmp;
+ dma_addr_t dma;
+};
+
/* buffer for one video frame */
struct cx23885_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
+ struct vb2_buffer vb;
+ struct list_head queue;
/* cx23885 specific */
unsigned int bpl;
- struct btcx_riscmem risc;
+ struct cx23885_riscmem risc;
struct cx23885_fmt *fmt;
u32 count;
};
@@ -268,9 +239,6 @@ struct cx23885_i2c {
struct cx23885_dmaqueue {
struct list_head active;
- struct list_head queued;
- struct timer_list timeout;
- struct btcx_riscmem stopper;
u32 count;
};
@@ -280,7 +248,7 @@ struct cx23885_tsport {
int nr;
int sram_chno;
- struct videobuf_dvb_frontends frontends;
+ struct vb2_dvb_frontends frontends;
/* dma queues */
struct cx23885_dmaqueue mpegq;
@@ -326,7 +294,12 @@ struct cx23885_tsport {
/* Workaround for a temp dvb_frontend that the tuner can attached to */
struct dvb_frontend analog_fe;
+ struct i2c_client *i2c_client_demod;
+ struct i2c_client *i2c_client_tuner;
+
int (*set_frontend)(struct dvb_frontend *fe);
+ int (*fe_set_voltage)(struct dvb_frontend *fe,
+ fe_sec_voltage_t voltage);
};
struct cx23885_kernel_ir {
@@ -339,8 +312,11 @@ struct cx23885_kernel_ir {
struct cx23885_audio_buffer {
unsigned int bpl;
- struct btcx_riscmem risc;
- struct videobuf_dmabuf dma;
+ struct cx23885_riscmem risc;
+ void *vaddr;
+ struct scatterlist *sglist;
+ int sglen;
+ int nr_pages;
};
struct cx23885_audio_dev {
@@ -358,8 +334,6 @@ struct cx23885_audio_dev {
unsigned int period_size;
unsigned int num_periods;
- struct videobuf_dmabuf *dma_risc;
-
struct cx23885_audio_buffer *buf;
struct snd_pcm_substream *substream;
@@ -368,6 +342,7 @@ struct cx23885_audio_dev {
struct cx23885_dev {
atomic_t refcount;
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
/* pci stuff */
struct pci_dev *pci;
@@ -407,7 +382,6 @@ struct cx23885_dev {
} bridge;
/* Analog video */
- u32 resources;
unsigned int input;
unsigned int audinput; /* Selectable audio input */
u32 tvaudio;
@@ -417,7 +391,6 @@ struct cx23885_dev {
unsigned int tuner_bus;
unsigned int radio_type;
unsigned char radio_addr;
- unsigned int has_radio;
struct v4l2_subdev *sd_cx25840;
struct work_struct cx25840_work;
@@ -435,17 +408,24 @@ struct cx23885_dev {
u32 freq;
struct video_device *video_dev;
struct video_device *vbi_dev;
- struct video_device *radio_dev;
+
+ /* video capture */
+ struct cx23885_fmt *fmt;
+ unsigned int width, height;
+ unsigned field;
struct cx23885_dmaqueue vidq;
+ struct vb2_queue vb2_vidq;
struct cx23885_dmaqueue vbiq;
+ struct vb2_queue vb2_vbiq;
+
spinlock_t slock;
/* MPEG Encoder ONLY settings */
u32 cx23417_mailbox;
- struct cx2341x_mpeg_params mpeg_params;
+ struct cx2341x_handler cxhdl;
struct video_device *v4l_device;
- atomic_t v4l_reader_count;
+ struct vb2_queue vb2_mpegq;
struct cx23885_tvnorm encodernorm;
/* Analog raw audio */
@@ -521,26 +501,21 @@ extern int cx23885_sram_channel_setup(struct cx23885_dev *dev,
extern void cx23885_sram_channel_dump(struct cx23885_dev *dev,
struct sram_channel *ch);
-extern int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
- u32 reg, u32 mask, u32 value);
-
-extern int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
+extern int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
struct scatterlist *sglist,
unsigned int top_offset, unsigned int bottom_offset,
unsigned int bpl, unsigned int padding, unsigned int lines);
extern int cx23885_risc_vbibuffer(struct pci_dev *pci,
- struct btcx_riscmem *risc, struct scatterlist *sglist,
+ struct cx23885_riscmem *risc, struct scatterlist *sglist,
unsigned int top_offset, unsigned int bottom_offset,
unsigned int bpl, unsigned int padding, unsigned int lines);
+int cx23885_start_dma(struct cx23885_tsport *port,
+ struct cx23885_dmaqueue *q,
+ struct cx23885_buffer *buf);
void cx23885_cancel_buffers(struct cx23885_tsport *port);
-extern int cx23885_restart_queue(struct cx23885_tsport *port,
- struct cx23885_dmaqueue *q);
-
-extern void cx23885_wakeup(struct cx23885_tsport *port,
- struct cx23885_dmaqueue *q, u32 count);
extern void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask);
extern void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask);
@@ -574,13 +549,11 @@ extern void cx23885_card_setup_pre_i2c(struct cx23885_dev *dev);
extern int cx23885_dvb_register(struct cx23885_tsport *port);
extern int cx23885_dvb_unregister(struct cx23885_tsport *port);
-extern int cx23885_buf_prepare(struct videobuf_queue *q,
- struct cx23885_tsport *port,
- struct cx23885_buffer *buf,
- enum v4l2_field field);
+extern int cx23885_buf_prepare(struct cx23885_buffer *buf,
+ struct cx23885_tsport *port);
extern void cx23885_buf_queue(struct cx23885_tsport *port,
struct cx23885_buffer *buf);
-extern void cx23885_free_buffer(struct videobuf_queue *q,
+extern void cx23885_free_buffer(struct cx23885_dev *dev,
struct cx23885_buffer *buf);
/* ----------------------------------------------------------- */
@@ -595,8 +568,6 @@ int cx23885_enum_input(struct cx23885_dev *dev, struct v4l2_input *i);
int cx23885_set_input(struct file *file, void *priv, unsigned int i);
int cx23885_get_input(struct file *file, void *priv, unsigned int *i);
int cx23885_set_frequency(struct file *file, void *priv, const struct v4l2_frequency *f);
-int cx23885_set_control(struct cx23885_dev *dev, struct v4l2_control *ctl);
-int cx23885_get_control(struct cx23885_dev *dev, struct v4l2_control *ctl);
int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm);
/* ----------------------------------------------------------- */
@@ -604,9 +575,7 @@ int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm);
extern int cx23885_vbi_fmt(struct file *file, void *priv,
struct v4l2_format *f);
extern void cx23885_vbi_timeout(unsigned long data);
-extern struct videobuf_queue_ops cx23885_vbi_qops;
-extern int cx23885_restart_vbi_queue(struct cx23885_dev *dev,
- struct cx23885_dmaqueue *q);
+extern struct vb2_ops cx23885_vbi_qops;
extern int cx23885_vbi_irq(struct cx23885_dev *dev, u32 status);
/* cx23885-i2c.c */
@@ -638,7 +607,7 @@ extern struct cx23885_audio_dev *cx23885_audio_register(
extern void cx23885_audio_unregister(struct cx23885_dev *dev);
extern int cx23885_audio_irq(struct cx23885_dev *dev, u32 status, u32 mask);
extern int cx23885_risc_databuffer(struct pci_dev *pci,
- struct btcx_riscmem *risc,
+ struct cx23885_riscmem *risc,
struct scatterlist *sglist,
unsigned int bpl,
unsigned int lines,
@@ -649,15 +618,10 @@ extern int cx23885_risc_databuffer(struct pci_dev *pci,
static inline unsigned int norm_maxw(v4l2_std_id norm)
{
- return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 720 : 768;
+ return (norm & V4L2_STD_525_60) ? 720 : 768;
}
static inline unsigned int norm_maxh(v4l2_std_id norm)
{
- return (norm & V4L2_STD_625_50) ? 576 : 480;
-}
-
-static inline unsigned int norm_swidth(v4l2_std_id norm)
-{
- return (norm & (V4L2_STD_MN & ~V4L2_STD_PAL_Nc)) ? 754 : 922;
+ return (norm & V4L2_STD_525_60) ? 480 : 576;
}
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index c2ff5fc01157..c1aa888af705 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#include <linux/kfifo.h>
diff --git a/drivers/media/pci/cx23885/cx23888-ir.h b/drivers/media/pci/cx23885/cx23888-ir.h
index d2de41caaf1d..ff74a93575d6 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.h
+++ b/drivers/media/pci/cx23885/cx23888-ir.h
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#ifndef _CX23888_IR_H_
diff --git a/drivers/media/pci/cx23885/netup-eeprom.c b/drivers/media/pci/cx23885/netup-eeprom.c
index 98a48f500684..b6542ee4385b 100644
--- a/drivers/media/pci/cx23885/netup-eeprom.c
+++ b/drivers/media/pci/cx23885/netup-eeprom.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#
diff --git a/drivers/media/pci/cx23885/netup-eeprom.h b/drivers/media/pci/cx23885/netup-eeprom.h
index 13926e18feba..90cac5b655d5 100644
--- a/drivers/media/pci/cx23885/netup-eeprom.h
+++ b/drivers/media/pci/cx23885/netup-eeprom.h
@@ -16,10 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef NETUP_EEPROM_H
diff --git a/drivers/media/pci/cx23885/netup-init.c b/drivers/media/pci/cx23885/netup-init.c
index 0044fef7ca24..76d9487aafc8 100644
--- a/drivers/media/pci/cx23885/netup-init.c
+++ b/drivers/media/pci/cx23885/netup-init.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "cx23885.h"
diff --git a/drivers/media/pci/cx23885/netup-init.h b/drivers/media/pci/cx23885/netup-init.h
index d26ae4b1590e..daaa212adfba 100644
--- a/drivers/media/pci/cx23885/netup-init.h
+++ b/drivers/media/pci/cx23885/netup-init.h
@@ -17,9 +17,5 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
extern void netup_initialize(struct cx23885_dev *dev);
diff --git a/drivers/media/pci/cx25821/cx25821-video-upstream.c b/drivers/media/pci/cx25821/cx25821-video-upstream.c
index 1f43be0b04c8..a664997e1958 100644
--- a/drivers/media/pci/cx25821/cx25821-video-upstream.c
+++ b/drivers/media/pci/cx25821/cx25821-video-upstream.c
@@ -330,8 +330,9 @@ int cx25821_write_frame(struct cx25821_channel *chan,
if (frame_size - curpos < count)
count = frame_size - curpos;
- memcpy((char *)out->_data_buf_virt_addr + frame_offset + curpos,
- data, count);
+ if (copy_from_user((__force char *)out->_data_buf_virt_addr + frame_offset + curpos,
+ data, count))
+ return -EFAULT;
curpos += count;
if (curpos == frame_size) {
out->_frame_count++;
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index e18a7ace08b1..851754bf1291 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -78,19 +78,19 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 0,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE2,
.vmux = 1,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE3,
.vmux = 2,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE4,
.vmux = 3,
- }},
+ } },
},
[CX88_BOARD_HAUPPAUGE] = {
.name = "Hauppauge WinTV 34xxx models",
@@ -99,23 +99,23 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xff00, // internal decoder
- },{
+ }, {
.type = CX88_VMUX_DEBUG,
.vmux = 0,
.gpio0 = 0xff01, // mono from tuner chip
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xff02,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xff02,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0xff01,
@@ -127,13 +127,13 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
- }},
+ } },
},
[CX88_BOARD_PIXELVIEW] = {
.name = "PixelView",
@@ -141,17 +141,17 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xff00, // internal decoder
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0xff10,
@@ -164,19 +164,19 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x03ff,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x03fe,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x03fe,
- }},
+ } },
},
[CX88_BOARD_WINFAST2000XP_EXPERT] = {
.name = "Leadtek Winfast 2000XP Expert",
@@ -185,28 +185,28 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00F5e700,
.gpio1 = 0x00003004,
.gpio2 = 0x00F5e700,
.gpio3 = 0x02000000,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00F5c700,
.gpio1 = 0x00003004,
.gpio2 = 0x00F5c700,
.gpio3 = 0x02000000,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00F5c700,
.gpio1 = 0x00003004,
.gpio2 = 0x00F5c700,
.gpio3 = 0x02000000,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x00F5d700,
@@ -222,19 +222,19 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio1 = 0xe09f,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio1 = 0xe05f,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio1 = 0xe05f,
- }},
+ } },
.radio = {
.gpio1 = 0xe0df,
.type = CX88_RADIO,
@@ -249,25 +249,25 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER_NTSC,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x000040bf,
.gpio1 = 0x000080c0,
.gpio2 = 0x0000ff40,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x000040bf,
.gpio1 = 0x000080c0,
.gpio2 = 0x0000ff40,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x000040bf,
.gpio1 = 0x000080c0,
.gpio2 = 0x0000ff40,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.vmux = 3,
@@ -283,14 +283,14 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0035e700,
.gpio1 = 0x00003004,
.gpio2 = 0x0035e700,
.gpio3 = 0x02000000,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
@@ -298,14 +298,14 @@ static const struct cx88_board cx88_boards[] = {
.gpio1 = 0x00003004,
.gpio2 = 0x0035c700,
.gpio3 = 0x02000000,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0035c700,
.gpio1 = 0x0035c700,
.gpio2 = 0x02000000,
.gpio3 = 0x02000000,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x0035d700,
@@ -322,22 +322,22 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0000bde2,
.audioroute = 1,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0000bde6,
.audioroute = 1,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0000bde6,
.audioroute = 1,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x0000bd62,
@@ -351,16 +351,16 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 0,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE2,
.vmux = 1,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
- }},
+ } },
},
[CX88_BOARD_PROLINK_PLAYTVPVR] = {
.name = "Prolink PlayTV PVR",
@@ -369,19 +369,19 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xbff0,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xbff3,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xbff3,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0xbff0,
@@ -394,16 +394,16 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0000fde6,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0000fde6, // 0x0000fda6 L,R RCA audio in?
.audioroute = 1,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x0000fde2,
@@ -417,22 +417,22 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00000fbf,
.gpio2 = 0x0000fc08,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00000fbf,
.gpio2 = 0x0000fc68,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00000fbf,
.gpio2 = 0x0000fc68,
- }},
+ } },
},
[CX88_BOARD_KWORLD_DVB_T] = {
.name = "KWorld/VStream XPert DVB-T",
@@ -440,17 +440,17 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0700,
.gpio2 = 0x0101,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0700,
.gpio2 = 0x0101,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1] = {
@@ -459,15 +459,15 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x000027df,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x000027df,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_KWORLD_LTV883] = {
@@ -476,23 +476,23 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x07f8,
- },{
+ }, {
.type = CX88_VMUX_DEBUG,
.vmux = 0,
.gpio0 = 0x07f9, // mono from tuner chip
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x000007fa,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x000007fa,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x000007f8,
@@ -521,23 +521,23 @@ static const struct cx88_board cx88_boards[] = {
0 - normal RF
1 - high RF
*/
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0f0d,
- },{
+ }, {
.type = CX88_VMUX_CABLE,
.vmux = 0,
.gpio0 = 0x0f05,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0f00,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0f00,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_HAUPPAUGE_DVB_T1] = {
@@ -546,10 +546,10 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_CONEXANT_DVB_T1] = {
@@ -558,10 +558,10 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_PROVIDEO_PV259] = {
@@ -570,11 +570,11 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.audioroute = 1,
- }},
+ } },
.mpeg = CX88_MPEG_BLACKBIRD,
},
[CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS] = {
@@ -583,15 +583,15 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x000027df,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x000027df,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_DNTV_LIVE_DVB_T] = {
@@ -600,17 +600,17 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00000700,
.gpio2 = 0x00000101,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00000700,
.gpio2 = 0x00000101,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_PCHDTV_HD3000] = {
@@ -632,19 +632,19 @@ static const struct cx88_board cx88_boards[] = {
*
* GPIO[16] = Remote control input
*/
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00008484,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00008400,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00008400,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x00008404,
@@ -659,25 +659,25 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xed1a,
.gpio2 = 0x00ff,
- },{
+ }, {
.type = CX88_VMUX_DEBUG,
.vmux = 0,
.gpio0 = 0xff01,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xff02,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xed92,
.gpio2 = 0x00ff,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0xed96,
@@ -692,22 +692,22 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00009d80,
.audioroute = 1,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00009d76,
.audioroute = 1,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00009d76,
.audioroute = 1,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x00009d00,
@@ -722,19 +722,19 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 1,
.gpio1 = 0x0000e03f,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 2,
.gpio1 = 0x0000e07f,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 3,
.gpio1 = 0x0000e07f,
- }}
+ } }
},
[CX88_BOARD_PIXELVIEW_PLAYTV_ULTRA_PRO] = {
.name = "PixelView PlayTV Ultra Pro (Stereo)",
@@ -745,19 +745,19 @@ static const struct cx88_board cx88_boards[] = {
.radio_addr = ADDR_UNSET,
/* Some variants use a tda9874 and so need the tvaudio module. */
.audio_chip = CX88_AUDIO_TVAUDIO,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xbf61, /* internal decoder */
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xbf63,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xbf63,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0xbf60,
@@ -770,19 +770,19 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x97ed,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x97e9,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x97e9,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_ADSTECH_DVB_T_PCI] = {
@@ -791,32 +791,32 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0700,
.gpio2 = 0x0101,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0700,
.gpio2 = 0x0101,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1] = {
.name = "TerraTec Cinergy 1400 DVB-T",
.tuner_type = TUNER_ABSENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 2,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD] = {
@@ -826,19 +826,19 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x87fd,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x87f9,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x87f9,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_AVERMEDIA_ULTRATV_MC_550] = {
@@ -848,22 +848,22 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 0,
.gpio0 = 0x0000cd73,
.audioroute = 1,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 1,
.gpio0 = 0x0000cd73,
.audioroute = 1,
- },{
+ }, {
.type = CX88_VMUX_TELEVISION,
.vmux = 3,
.gpio0 = 0x0000cdb3,
.audioroute = 1,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.vmux = 2,
@@ -876,21 +876,21 @@ static const struct cx88_board cx88_boards[] = {
/* Alexander Wold <awold@bigfoot.com> */
.name = "Kworld V-Stream Xpert DVD",
.tuner_type = UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x03000000,
.gpio1 = 0x01000000,
.gpio2 = 0x02000000,
.gpio3 = 0x00100000,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x03000000,
.gpio1 = 0x01000000,
.gpio2 = 0x02000000,
.gpio3 = 0x00100000,
- }},
+ } },
},
[CX88_BOARD_ATI_HDTVWONDER] = {
.name = "ATI HDTV Wonder",
@@ -898,28 +898,28 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00000ff7,
.gpio1 = 0x000000ff,
.gpio2 = 0x00000001,
.gpio3 = 0x00000000,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00000ffe,
.gpio1 = 0x000000ff,
.gpio2 = 0x00000001,
.gpio3 = 0x00000000,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00000ffe,
.gpio1 = 0x000000ff,
.gpio2 = 0x00000001,
.gpio3 = 0x00000000,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_WINFAST_DTV1000] = {
@@ -928,16 +928,16 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_AVERTV_303] = {
@@ -947,28 +947,28 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00ff,
.gpio1 = 0xe09f,
.gpio2 = 0x0010,
.gpio3 = 0x0000,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00ff,
.gpio1 = 0xe05f,
.gpio2 = 0x0010,
.gpio3 = 0x0000,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00ff,
.gpio1 = 0xe05f,
.gpio2 = 0x0010,
.gpio3 = 0x0000,
- }},
+ } },
},
[CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1] = {
.name = "Hauppauge Nova-S-Plus DVB-S",
@@ -978,22 +978,22 @@ static const struct cx88_board cx88_boards[] = {
.radio_addr = ADDR_UNSET,
.audio_chip = CX88_AUDIO_WM8775,
.i2sinputcntl = 2,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
/* 2: Line-In */
.audioroute = 2,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
/* 2: Line-In */
.audioroute = 2,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
/* 2: Line-In */
.audioroute = 2,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_HAUPPAUGE_NOVASE2_S1] = {
@@ -1002,10 +1002,10 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_KWORLD_DVBS_100] = {
@@ -1015,22 +1015,22 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.audio_chip = CX88_AUDIO_WM8775,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
/* 2: Line-In */
.audioroute = 2,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
/* 2: Line-In */
.audioroute = 2,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
/* 2: Line-In */
.audioroute = 2,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_HAUPPAUGE_HVR1100] = {
@@ -1040,16 +1040,16 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
- }},
+ } },
/* fixme: Add radio support */
.mpeg = CX88_MPEG_DVB,
},
@@ -1060,13 +1060,13 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
- }},
+ } },
/* fixme: Add radio support */
.mpeg = CX88_MPEG_DVB,
},
@@ -1078,19 +1078,19 @@ static const struct cx88_board cx88_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE |
TDA9887_PORT2_ACTIVE,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xf80808,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xf80808,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xf80808,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0xf80808,
@@ -1106,17 +1106,17 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0700,
.gpio2 = 0x0101,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0700,
.gpio2 = 0x0101,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL] = {
@@ -1125,15 +1125,15 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x000067df,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x000067df,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT] = {
@@ -1142,22 +1142,22 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x3de2,
.gpio2 = 0x00ff,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x3de6,
.audioroute = 1,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x3de6,
.audioroute = 1,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x3de6,
@@ -1171,19 +1171,19 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0000a75f,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x0000a75b,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x0000a75b,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_PCHDTV_HD5500] = {
@@ -1193,19 +1193,19 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x87fd,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x87f9,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x87f9,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_KWORLD_MCE200_DELUXE] = {
@@ -1217,11 +1217,11 @@ static const struct cx88_board cx88_boards[] = {
.tda9887_conf = TDA9887_PRESENT,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0000BDE6
- }},
+ } },
.mpeg = CX88_MPEG_BLACKBIRD,
},
[CX88_BOARD_PIXELVIEW_PLAYTV_P7000] = {
@@ -1233,11 +1233,11 @@ static const struct cx88_board cx88_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE |
TDA9887_PORT2_ACTIVE,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x5da6,
- }},
+ } },
.mpeg = CX88_MPEG_BLACKBIRD,
},
[CX88_BOARD_NPGTECH_REALTV_TOP10FM] = {
@@ -1246,19 +1246,19 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0788,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x078b,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x078b,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x074a,
@@ -1271,7 +1271,7 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00017304,
@@ -1299,7 +1299,7 @@ static const struct cx88_board cx88_boards[] = {
.gpio1 = 0x0000b207,
.gpio2 = 0x0001d701,
.gpio3 = 0x02000000,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x00015702,
@@ -1316,35 +1316,35 @@ static const struct cx88_board cx88_boards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00017300,
.gpio1 = 0x00008207,
.gpio2 = 0x00000000,
.gpio3 = 0x02000000,
- },{
+ }, {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x00018300,
.gpio1 = 0x0000f207,
.gpio2 = 0x00017304,
.gpio3 = 0x02000000,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x00018301,
.gpio1 = 0x0000f207,
.gpio2 = 0x00017304,
.gpio3 = 0x02000000,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x00018301,
.gpio1 = 0x0000f207,
.gpio2 = 0x00017304,
.gpio3 = 0x02000000,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x00015702,
@@ -1360,13 +1360,13 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_HAUPPAUGE_HVR3000] = {
@@ -1377,25 +1377,25 @@ static const struct cx88_board cx88_boards[] = {
.radio_addr = ADDR_UNSET,
.tda9887_conf = TDA9887_PRESENT,
.audio_chip = CX88_AUDIO_WM8775,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x84bf,
/* 1: TV Audio / FM Mono */
.audioroute = 1,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x84bf,
/* 2: Line-In */
.audioroute = 2,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x84bf,
/* 2: Line-In */
.audioroute = 2,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x84bf,
@@ -1411,19 +1411,19 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0709,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x070b,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x070b,
- }},
+ } },
},
[CX88_BOARD_TE_DTV_250_OEM_SWANN] = {
.name = "Shenzhen Tungsten Ages Tech TE-DTV-250 / Swann OEM",
@@ -1431,28 +1431,28 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x003fffff,
.gpio1 = 0x00e00000,
.gpio2 = 0x003fffff,
.gpio3 = 0x02000000,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x003fffff,
.gpio1 = 0x00e00000,
.gpio2 = 0x003fffff,
.gpio3 = 0x02000000,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x003fffff,
.gpio1 = 0x00e00000,
.gpio2 = 0x003fffff,
.gpio3 = 0x02000000,
- }},
+ } },
},
[CX88_BOARD_HAUPPAUGE_HVR1300] = {
.name = "Hauppauge WinTV-HVR1300 DVB-T/Hybrid MPEG Encoder",
@@ -1465,25 +1465,25 @@ static const struct cx88_board cx88_boards[] = {
/*
* gpio0 as reported by Mike Crash <mike AT mikecrash.com>
*/
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xef88,
/* 1: TV Audio / FM Mono */
.audioroute = 1,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0xef88,
/* 2: Line-In */
.audioroute = 2,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0xef88,
/* 2: Line-In */
.audioroute = 2,
- }},
+ } },
.mpeg = CX88_MPEG_DVB | CX88_MPEG_BLACKBIRD,
.radio = {
.type = CX88_RADIO,
@@ -1510,19 +1510,19 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DEBUG,
.vmux = 3,
.gpio0 = 0x04ff,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x07fa,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x07fa,
- }},
+ } },
},
[CX88_BOARD_PINNACLE_PCTV_HD_800i] = {
.name = "Pinnacle PCTV HD 800i",
@@ -1530,24 +1530,24 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x04fb,
.gpio1 = 0x10ff,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x04fb,
.gpio1 = 0x10ef,
.audioroute = 1,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x04fb,
.gpio1 = 0x10ef,
.audioroute = 1,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO] = {
@@ -1557,7 +1557,7 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x000027df, /* Unconfirmed */
@@ -1815,19 +1815,19 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x10df,
- },{
+ }, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
.gpio0 = 0x16d9,
- },{
+ }, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
.gpio0 = 0x16d9,
- }},
+ } },
.mpeg = CX88_MPEG_DVB,
},
[CX88_BOARD_PROLINK_PV_8000GT] = {
@@ -1967,7 +1967,7 @@ static const struct cx88_board cx88_boards[] = {
* 3: Line-In Expansion
* 4: FM Stereo
*/
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0xc4bf,
@@ -2001,7 +2001,7 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
@@ -2013,7 +2013,7 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
@@ -2025,7 +2025,7 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
@@ -2037,7 +2037,7 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
@@ -2049,7 +2049,7 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
@@ -2061,7 +2061,7 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
@@ -2073,7 +2073,7 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
.gpio0 = 0x8080,
@@ -2086,7 +2086,7 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
@@ -2098,7 +2098,7 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
@@ -2110,7 +2110,7 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
@@ -2170,7 +2170,7 @@ static const struct cx88_board cx88_boards[] = {
* 13: audio source (0=tuner audio,1=line in)
* 14: FM (0=on,1=off ???)
*/
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0400, /* pin 2 = 0 */
@@ -2211,7 +2211,7 @@ static const struct cx88_board cx88_boards[] = {
* 13: audio source (0=tuner audio,1=line in)
* 14: FM (0=on,1=off ???)
*/
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0400, /* pin 2 = 0 */
@@ -2229,7 +2229,7 @@ static const struct cx88_board cx88_boards[] = {
.gpio0 = 0x0400, /* pin 2 = 0 */
.gpio1 = 0x6060, /* pin 13 = 1, pin 14 = 1 */
.gpio2 = 0x0000,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x0400, /* pin 2 = 0 */
@@ -2252,7 +2252,7 @@ static const struct cx88_board cx88_boards[] = {
* 14: 0: FM radio
* 16: 0: RF input is cable
*/
- .input = {{
+ .input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
.gpio0 = 0x0403,
@@ -2280,7 +2280,7 @@ static const struct cx88_board cx88_boards[] = {
.gpio1 = 0xF0F7,
.gpio2 = 0x0101,
.gpio3 = 0x0000,
- }},
+ } },
.radio = {
.type = CX88_RADIO,
.gpio0 = 0x0403,
@@ -2308,7 +2308,7 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .input = {{
+ .input = { {
.type = CX88_VMUX_DVB,
.vmux = 0,
} },
@@ -2324,19 +2324,19 @@ static const struct cx88_subid cx88_subids[] = {
.subvendor = 0x0070,
.subdevice = 0x3400,
.card = CX88_BOARD_HAUPPAUGE,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x3401,
.card = CX88_BOARD_HAUPPAUGE,
- },{
+ }, {
.subvendor = 0x14c7,
.subdevice = 0x0106,
.card = CX88_BOARD_GDI,
- },{
+ }, {
.subvendor = 0x14c7,
.subdevice = 0x0107, /* with mpeg encoder */
.card = CX88_BOARD_GDI,
- },{
+ }, {
.subvendor = PCI_VENDOR_ID_ATI,
.subdevice = 0x00f8,
.card = CX88_BOARD_ATI_WONDER_PRO,
@@ -2348,176 +2348,176 @@ static const struct cx88_subid cx88_subids[] = {
.subvendor = 0x107d,
.subdevice = 0x6611,
.card = CX88_BOARD_WINFAST2000XP_EXPERT,
- },{
+ }, {
.subvendor = 0x107d,
.subdevice = 0x6613, /* NTSC */
.card = CX88_BOARD_WINFAST2000XP_EXPERT,
- },{
+ }, {
.subvendor = 0x107d,
.subdevice = 0x6620,
.card = CX88_BOARD_WINFAST_DV2000,
- },{
+ }, {
.subvendor = 0x107d,
.subdevice = 0x663b,
.card = CX88_BOARD_LEADTEK_PVR2000,
- },{
+ }, {
.subvendor = 0x107d,
.subdevice = 0x663c,
.card = CX88_BOARD_LEADTEK_PVR2000,
- },{
+ }, {
.subvendor = 0x1461,
.subdevice = 0x000b,
.card = CX88_BOARD_AVERTV_STUDIO_303,
- },{
+ }, {
.subvendor = 0x1462,
.subdevice = 0x8606,
.card = CX88_BOARD_MSI_TVANYWHERE_MASTER,
- },{
+ }, {
.subvendor = 0x10fc,
.subdevice = 0xd003,
.card = CX88_BOARD_IODATA_GVVCP3PCI,
- },{
+ }, {
.subvendor = 0x1043,
.subdevice = 0x4823, /* with mpeg encoder */
.card = CX88_BOARD_ASUS_PVR_416,
- },{
+ }, {
.subvendor = 0x17de,
.subdevice = 0x08a6,
.card = CX88_BOARD_KWORLD_DVB_T,
- },{
+ }, {
.subvendor = 0x18ac,
.subdevice = 0xd810,
.card = CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q,
- },{
+ }, {
.subvendor = 0x18ac,
.subdevice = 0xd820,
.card = CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_T,
- },{
+ }, {
.subvendor = 0x18ac,
.subdevice = 0xdb00,
.card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x9002,
.card = CX88_BOARD_HAUPPAUGE_DVB_T1,
- },{
+ }, {
.subvendor = 0x14f1,
.subdevice = 0x0187,
.card = CX88_BOARD_CONEXANT_DVB_T1,
- },{
+ }, {
.subvendor = 0x1540,
.subdevice = 0x2580,
.card = CX88_BOARD_PROVIDEO_PV259,
- },{
+ }, {
.subvendor = 0x18ac,
.subdevice = 0xdb10,
.card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS,
- },{
+ }, {
.subvendor = 0x1554,
.subdevice = 0x4811,
.card = CX88_BOARD_PIXELVIEW,
- },{
+ }, {
.subvendor = 0x7063,
.subdevice = 0x3000, /* HD-3000 card */
.card = CX88_BOARD_PCHDTV_HD3000,
- },{
+ }, {
.subvendor = 0x17de,
.subdevice = 0xa8a6,
.card = CX88_BOARD_DNTV_LIVE_DVB_T,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x2801,
.card = CX88_BOARD_HAUPPAUGE_ROSLYN,
- },{
+ }, {
.subvendor = 0x14f1,
.subdevice = 0x0342,
.card = CX88_BOARD_DIGITALLOGIC_MEC,
- },{
+ }, {
.subvendor = 0x10fc,
.subdevice = 0xd035,
.card = CX88_BOARD_IODATA_GVBCTV7E,
- },{
+ }, {
.subvendor = 0x1421,
.subdevice = 0x0334,
.card = CX88_BOARD_ADSTECH_DVB_T_PCI,
- },{
+ }, {
.subvendor = 0x153b,
.subdevice = 0x1166,
.card = CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1,
- },{
+ }, {
.subvendor = 0x18ac,
.subdevice = 0xd500,
.card = CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD,
- },{
+ }, {
.subvendor = 0x1461,
.subdevice = 0x8011,
.card = CX88_BOARD_AVERMEDIA_ULTRATV_MC_550,
- },{
+ }, {
.subvendor = PCI_VENDOR_ID_ATI,
.subdevice = 0xa101,
.card = CX88_BOARD_ATI_HDTVWONDER,
- },{
+ }, {
.subvendor = 0x107d,
.subdevice = 0x665f,
.card = CX88_BOARD_WINFAST_DTV1000,
- },{
+ }, {
.subvendor = 0x1461,
.subdevice = 0x000a,
.card = CX88_BOARD_AVERTV_303,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x9200,
.card = CX88_BOARD_HAUPPAUGE_NOVASE2_S1,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x9201,
.card = CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x9202,
.card = CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1,
- },{
+ }, {
.subvendor = 0x17de,
.subdevice = 0x08b2,
.card = CX88_BOARD_KWORLD_DVBS_100,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x9400,
.card = CX88_BOARD_HAUPPAUGE_HVR1100,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x9402,
.card = CX88_BOARD_HAUPPAUGE_HVR1100,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x9800,
.card = CX88_BOARD_HAUPPAUGE_HVR1100LP,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x9802,
.card = CX88_BOARD_HAUPPAUGE_HVR1100LP,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x9001,
.card = CX88_BOARD_HAUPPAUGE_DVB_T1,
- },{
+ }, {
.subvendor = 0x1822,
.subdevice = 0x0025,
.card = CX88_BOARD_DNTV_LIVE_DVB_T_PRO,
- },{
+ }, {
.subvendor = 0x17de,
.subdevice = 0x08a1,
.card = CX88_BOARD_KWORLD_DVB_T_CX22702,
- },{
+ }, {
.subvendor = 0x18ac,
.subdevice = 0xdb50,
.card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL,
- },{
+ }, {
.subvendor = 0x18ac,
.subdevice = 0xdb54,
.card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL,
/* Re-branded DViCO: DigitalNow DVB-T Dual */
- },{
+ }, {
.subvendor = 0x18ac,
.subdevice = 0xdb11,
.card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS,
@@ -2530,55 +2530,55 @@ static const struct cx88_subid cx88_subids[] = {
.subvendor = 0x17de,
.subdevice = 0x0840,
.card = CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT,
- },{
+ }, {
.subvendor = 0x1421,
.subdevice = 0x0305,
.card = CX88_BOARD_KWORLD_HARDWARE_MPEG_TV_XPERT,
- },{
+ }, {
.subvendor = 0x18ac,
.subdevice = 0xdb40,
.card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID,
- },{
+ }, {
.subvendor = 0x18ac,
.subdevice = 0xdb44,
.card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID,
- },{
+ }, {
.subvendor = 0x7063,
.subdevice = 0x5500,
.card = CX88_BOARD_PCHDTV_HD5500,
- },{
+ }, {
.subvendor = 0x17de,
.subdevice = 0x0841,
.card = CX88_BOARD_KWORLD_MCE200_DELUXE,
- },{
+ }, {
.subvendor = 0x1822,
.subdevice = 0x0019,
.card = CX88_BOARD_DNTV_LIVE_DVB_T_PRO,
- },{
+ }, {
.subvendor = 0x1554,
.subdevice = 0x4813,
.card = CX88_BOARD_PIXELVIEW_PLAYTV_P7000,
- },{
+ }, {
.subvendor = 0x14f1,
.subdevice = 0x0842,
.card = CX88_BOARD_NPGTECH_REALTV_TOP10FM,
- },{
+ }, {
.subvendor = 0x107d,
.subdevice = 0x665e,
.card = CX88_BOARD_WINFAST_DTV2000H,
- },{
+ }, {
.subvendor = 0x107d,
.subdevice = 0x6f2b,
.card = CX88_BOARD_WINFAST_DTV2000H_J,
- },{
+ }, {
.subvendor = 0x18ac,
.subdevice = 0xd800, /* FusionHDTV 3 Gold (original revision) */
.card = CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q,
- },{
+ }, {
.subvendor = 0x14f1,
.subdevice = 0x0084,
.card = CX88_BOARD_GENIATECH_DVBS,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x1404,
.card = CX88_BOARD_HAUPPAUGE_HVR3000,
@@ -2590,60 +2590,60 @@ static const struct cx88_subid cx88_subids[] = {
.subvendor = 0x18ac,
.subdevice = 0xdccd,
.card = CX88_BOARD_SAMSUNG_SMT_7020,
- },{
+ }, {
.subvendor = 0x1461,
.subdevice = 0xc111, /* AverMedia M150-D */
/* This board is known to work with the ASUS PVR416 config */
.card = CX88_BOARD_ASUS_PVR_416,
- },{
+ }, {
.subvendor = 0xc180,
.subdevice = 0xc980,
.card = CX88_BOARD_TE_DTV_250_OEM_SWANN,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x9600,
.card = CX88_BOARD_HAUPPAUGE_HVR1300,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x9601,
.card = CX88_BOARD_HAUPPAUGE_HVR1300,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x9602,
.card = CX88_BOARD_HAUPPAUGE_HVR1300,
- },{
+ }, {
.subvendor = 0x107d,
.subdevice = 0x6632,
.card = CX88_BOARD_LEADTEK_PVR2000,
- },{
+ }, {
.subvendor = 0x12ab,
.subdevice = 0x2300, /* Club3D Zap TV2100 */
.card = CX88_BOARD_KWORLD_DVB_T_CX22702,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x9000,
.card = CX88_BOARD_HAUPPAUGE_DVB_T1,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x1400,
.card = CX88_BOARD_HAUPPAUGE_HVR3000,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x1401,
.card = CX88_BOARD_HAUPPAUGE_HVR3000,
- },{
+ }, {
.subvendor = 0x0070,
.subdevice = 0x1402,
.card = CX88_BOARD_HAUPPAUGE_HVR3000,
- },{
+ }, {
.subvendor = 0x1421,
.subdevice = 0x0341, /* ADS Tech InstantTV DVB-S */
.card = CX88_BOARD_KWORLD_DVBS_100,
- },{
+ }, {
.subvendor = 0x1421,
.subdevice = 0x0390,
.card = CX88_BOARD_ADSTECH_PTV_390,
- },{
+ }, {
.subvendor = 0x11bd,
.subdevice = 0x0051,
.card = CX88_BOARD_PINNACLE_PCTV_HD_800i,
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index ed8cb9037b6f..ce27e6d4f16e 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -696,7 +696,6 @@ static struct videobuf_queue *get_queue(struct file *file)
return &fh->vbiq;
default:
BUG();
- return NULL;
}
}
@@ -711,7 +710,6 @@ static int get_resource(struct file *file)
return RESOURCE_VBI;
default:
BUG();
- return 0;
}
}
@@ -812,7 +810,6 @@ video_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
file->f_flags & O_NONBLOCK);
default:
BUG();
- return 0;
}
}
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index da8f848be3b8..c82e855a0814 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -149,7 +149,7 @@ static u32 ddb_i2c_functionality(struct i2c_adapter *adap)
return I2C_FUNC_SMBUS_EMUL;
}
-struct i2c_algorithm ddb_i2c_algo = {
+static struct i2c_algorithm ddb_i2c_algo = {
.master_xfer = ddb_i2c_master_xfer,
.functionality = ddb_i2c_functionality,
};
@@ -266,7 +266,7 @@ static void io_free(struct pci_dev *pdev, u8 **vbuf,
for (i = 0; i < num; i++) {
if (vbuf[i]) {
pci_free_consistent(pdev, size, vbuf[i], pbuf[i]);
- vbuf[i] = 0;
+ vbuf[i] = NULL;
}
}
}
@@ -440,7 +440,7 @@ static u32 ddb_output_free(struct ddb_output *output)
}
static ssize_t ddb_output_write(struct ddb_output *output,
- const u8 *buf, size_t count)
+ const __user u8 *buf, size_t count)
{
struct ddb *dev = output->port->dev;
u32 idx, off, stat = output->stat;
@@ -506,7 +506,7 @@ static u32 ddb_input_avail(struct ddb_input *input)
return 0;
}
-static ssize_t ddb_input_read(struct ddb_input *input, u8 *buf, size_t count)
+static ssize_t ddb_input_read(struct ddb_input *input, __user u8 *buf, size_t count)
{
struct ddb *dev = input->port->dev;
u32 left = count;
@@ -849,7 +849,7 @@ static int dvb_input_attach(struct ddb_input *input)
return ret;
input->attached = 4;
- input->fe = 0;
+ input->fe = NULL;
switch (port->type) {
case DDB_TUNER_DVBS_ST:
if (demod_attach_stv0900(input, 0) < 0)
@@ -895,7 +895,7 @@ static int dvb_input_attach(struct ddb_input *input)
/****************************************************************************/
/****************************************************************************/
-static ssize_t ts_write(struct file *file, const char *buf,
+static ssize_t ts_write(struct file *file, const __user char *buf,
size_t count, loff_t *ppos)
{
struct dvb_device *dvbdev = file->private_data;
@@ -920,7 +920,7 @@ static ssize_t ts_write(struct file *file, const char *buf,
return (left == count) ? -EAGAIN : (count - left);
}
-static ssize_t ts_read(struct file *file, char *buf,
+static ssize_t ts_read(struct file *file, __user char *buf,
size_t count, loff_t *ppos)
{
struct dvb_device *dvbdev = file->private_data;
@@ -975,11 +975,9 @@ static const struct file_operations ci_fops = {
.open = dvb_generic_open,
.release = dvb_generic_release,
.poll = ts_poll,
- .mmap = 0,
};
static struct dvb_device dvbdev_ci = {
- .priv = 0,
.readers = -1,
.writers = -1,
.users = -1,
@@ -1038,7 +1036,7 @@ static void output_tasklet(unsigned long data)
}
-struct cxd2099_cfg cxd_cfg = {
+static struct cxd2099_cfg cxd_cfg = {
.bitrate = 62000,
.adr = 0x40,
.polarity = 1,
@@ -1127,7 +1125,7 @@ static void ddb_ports_detach(struct ddb *dev)
ddb_output_stop(port->output);
dvb_ca_en50221_release(port->en);
kfree(port->en);
- port->en = 0;
+ port->en = NULL;
dvb_unregister_adapter(&port->output->adap);
}
break;
@@ -1413,9 +1411,9 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
#define DDB_MAGIC 'd'
struct ddb_flashio {
- __u8 *write_buf;
+ __user __u8 *write_buf;
__u32 write_len;
- __u8 *read_buf;
+ __user __u8 *read_buf;
__u32 read_len;
};
@@ -1439,7 +1437,7 @@ static int ddb_open(struct inode *inode, struct file *file)
static long ddb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ddb *dev = file->private_data;
- void *parg = (void *)arg;
+ __user void *parg = (__user void *)arg;
int res;
switch (cmd) {
@@ -1558,7 +1556,7 @@ static void ddb_remove(struct pci_dev *pdev)
ddb_device_destroy(dev);
ddb_unmap(dev);
- pci_set_drvdata(pdev, 0);
+ pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
}
@@ -1637,7 +1635,7 @@ fail1:
fail:
printk(KERN_ERR "fail\n");
ddb_unmap(dev);
- pci_set_drvdata(pdev, 0);
+ pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return -1;
}
diff --git a/drivers/media/pci/ddbridge/ddbridge.h b/drivers/media/pci/ddbridge/ddbridge.h
index 8b1b41d2a52d..be87fbd90456 100644
--- a/drivers/media/pci/ddbridge/ddbridge.h
+++ b/drivers/media/pci/ddbridge/ddbridge.h
@@ -156,7 +156,7 @@ struct ddb_port {
struct ddb {
struct pci_dev *pdev;
- unsigned char *regs;
+ unsigned char __iomem *regs;
struct ddb_port port[DDB_MAX_PORT];
struct ddb_i2c i2c[DDB_MAX_I2C];
struct ddb_input input[DDB_MAX_INPUT];
@@ -173,12 +173,10 @@ struct ddb {
/****************************************************************************/
#define ddbwritel(_val, _adr) writel((_val), \
- (char *) (dev->regs+(_adr)))
-#define ddbreadl(_adr) readl((char *) (dev->regs+(_adr)))
-#define ddbcpyto(_adr, _src, _count) memcpy_toio((char *) \
- (dev->regs+(_adr)), (_src), (_count))
-#define ddbcpyfrom(_dst, _adr, _count) memcpy_fromio((_dst), (char *) \
- (dev->regs+(_adr)), (_count))
+ dev->regs+(_adr))
+#define ddbreadl(_adr) readl(dev->regs+(_adr))
+#define ddbcpyto(_adr, _src, _count) memcpy_toio(dev->regs+(_adr), (_src), (_count))
+#define ddbcpyfrom(_dst, _adr, _count) memcpy_fromio((_dst), dev->regs+(_adr), (_count))
/****************************************************************************/
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index e8826c535ccd..ed11716731e9 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -614,7 +614,7 @@ static int dm1105_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
static void dm1105_set_dma_addr(struct dm1105_dev *dev)
{
- dm_writel(DM1105_STADR, cpu_to_le32(dev->dma_addr));
+ dm_writel(DM1105_STADR, (__force u32)cpu_to_le32(dev->dma_addr));
}
static int dm1105_dma_map(struct dm1105_dev *dev)
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
index 7a9b98bc208b..7bf9cbca4fa6 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
@@ -81,7 +81,7 @@ static void ivtv_alsa_announce_pcm_data(struct snd_ivtv_card *itvsc,
int period_elapsed = 0;
int length;
- dprintk("ivtv alsa announce ptr=%p data=%p num_bytes=%zd\n", itvsc,
+ dprintk("ivtv alsa announce ptr=%p data=%p num_bytes=%zu\n", itvsc,
pcm_data, num_bytes);
substream = itvsc->capture_pcm_substream;
diff --git a/drivers/media/pci/ivtv/ivtv-firmware.c b/drivers/media/pci/ivtv/ivtv-firmware.c
index ed73edd2bcd3..4b0e758a7bce 100644
--- a/drivers/media/pci/ivtv/ivtv-firmware.c
+++ b/drivers/media/pci/ivtv/ivtv-firmware.c
@@ -65,7 +65,7 @@ retry:
the wrong file was sometimes loaded. So we check filesizes to
see if at least the right-sized file was loaded. If not, then we
retry. */
- IVTV_INFO("Retry: file loaded was not %s (expected size %ld, got %zd)\n", fn, size, fw->size);
+ IVTV_INFO("Retry: file loaded was not %s (expected size %ld, got %zu)\n", fn, size, fw->size);
release_firmware(fw);
retries--;
goto retry;
@@ -76,7 +76,7 @@ retry:
dst++;
src++;
}
- IVTV_INFO("Loaded %s firmware (%zd bytes)\n", fn, fw->size);
+ IVTV_INFO("Loaded %s firmware (%zu bytes)\n", fn, fw->size);
release_firmware(fw);
return size;
}
diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c
index 19a7c9b990a3..ab6d5d25aa6f 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.c
+++ b/drivers/media/pci/ivtv/ivtv-irq.c
@@ -192,11 +192,11 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
- write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
+ write_dec_sync(DMA_MAGIC_COOKIE, offset - IVTV_DECODER_OFFSET);
}
else {
s->pending_backup = read_enc(offset);
- write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
+ write_enc_sync(DMA_MAGIC_COOKIE, offset);
}
s->pending_offset = offset;
}
@@ -275,13 +275,11 @@ static void dma_post(struct ivtv_stream *s)
if (x == 0 && ivtv_use_dma(s)) {
offset = s->dma_last_offset;
- if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
+ if (le32_to_cpu(u32buf[offset / 4]) != DMA_MAGIC_COOKIE)
{
- for (offset = 0; offset < 64; offset++) {
- if (u32buf[offset] == DMA_MAGIC_COOKIE) {
+ for (offset = 0; offset < 64; offset++)
+ if (le32_to_cpu(u32buf[offset]) == DMA_MAGIC_COOKIE)
break;
- }
- }
offset *= 4;
if (offset == 256) {
IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
diff --git a/drivers/media/pci/mantis/hopper_vp3028.c b/drivers/media/pci/mantis/hopper_vp3028.c
index 68a29f8bdf73..1032db6bb789 100644
--- a/drivers/media/pci/mantis/hopper_vp3028.c
+++ b/drivers/media/pci/mantis/hopper_vp3028.c
@@ -34,7 +34,7 @@
#include "mantis_dvb.h"
#include "hopper_vp3028.h"
-struct zl10353_config hopper_vp3028_config = {
+static struct zl10353_config hopper_vp3028_config = {
.demod_address = 0x0f,
};
diff --git a/drivers/media/pci/mantis/mantis_common.h b/drivers/media/pci/mantis/mantis_common.h
index f2410cf0a6bf..8ff448bb792d 100644
--- a/drivers/media/pci/mantis/mantis_common.h
+++ b/drivers/media/pci/mantis/mantis_common.h
@@ -127,7 +127,7 @@ struct mantis_pci {
u32 last_block;
u8 *buf_cpu;
dma_addr_t buf_dma;
- u32 *risc_cpu;
+ __le32 *risc_cpu;
dma_addr_t risc_dma;
struct tasklet_struct tasklet;
diff --git a/drivers/media/pci/mantis/mantis_vp1033.c b/drivers/media/pci/mantis/mantis_vp1033.c
index 115003e8d19d..12a6adb2bd7e 100644
--- a/drivers/media/pci/mantis/mantis_vp1033.c
+++ b/drivers/media/pci/mantis/mantis_vp1033.c
@@ -35,7 +35,7 @@
#include "mantis_vp1033.h"
#include "mantis_reg.h"
-u8 lgtdqcs001f_inittab[] = {
+static u8 lgtdqcs001f_inittab[] = {
0x01, 0x15,
0x02, 0x30,
0x03, 0x00,
@@ -150,7 +150,7 @@ static int lgtdqcs001f_set_symbol_rate(struct dvb_frontend *fe,
return 0;
}
-struct stv0299_config lgtdqcs001f_config = {
+static struct stv0299_config lgtdqcs001f_config = {
.demod_address = 0x68,
.inittab = lgtdqcs001f_inittab,
.mclk = 88000000UL,
diff --git a/drivers/media/pci/mantis/mantis_vp1034.c b/drivers/media/pci/mantis/mantis_vp1034.c
index 430ae84ce528..7c1bd167225c 100644
--- a/drivers/media/pci/mantis/mantis_vp1034.c
+++ b/drivers/media/pci/mantis/mantis_vp1034.c
@@ -36,7 +36,7 @@
#include "mantis_vp1034.h"
#include "mantis_reg.h"
-struct mb86a16_config vp1034_mb86a16_config = {
+static struct mb86a16_config vp1034_mb86a16_config = {
.demod_address = 0x08,
.set_voltage = vp1034_set_voltage,
};
diff --git a/drivers/media/pci/mantis/mantis_vp1041.c b/drivers/media/pci/mantis/mantis_vp1041.c
index 07a20748b707..7082fcbc94a1 100644
--- a/drivers/media/pci/mantis/mantis_vp1041.c
+++ b/drivers/media/pci/mantis/mantis_vp1041.c
@@ -263,7 +263,7 @@ static const struct stb0899_s1_reg vp1041_stb0899_s1_init_3[] = {
{ 0xffff , 0xff },
};
-struct stb0899_config vp1041_stb0899_config = {
+static struct stb0899_config vp1041_stb0899_config = {
.init_dev = vp1041_stb0899_s1_init_1,
.init_s2_demod = stb0899_s2_init_2,
.init_s1_demod = vp1041_stb0899_s1_init_3,
@@ -300,7 +300,7 @@ struct stb0899_config vp1041_stb0899_config = {
.tuner_set_rfsiggain = NULL,
};
-struct stb6100_config vp1041_stb6100_config = {
+static struct stb6100_config vp1041_stb6100_config = {
.tuner_address = 0x60,
.refclock = 27000000,
};
diff --git a/drivers/media/pci/mantis/mantis_vp2033.c b/drivers/media/pci/mantis/mantis_vp2033.c
index 1ca6837fbe46..8d48b5abe04a 100644
--- a/drivers/media/pci/mantis/mantis_vp2033.c
+++ b/drivers/media/pci/mantis/mantis_vp2033.c
@@ -37,12 +37,12 @@
#define MANTIS_MODEL_NAME "VP-2033"
#define MANTIS_DEV_TYPE "DVB-C"
-struct tda1002x_config vp2033_tda1002x_cu1216_config = {
+static struct tda1002x_config vp2033_tda1002x_cu1216_config = {
.demod_address = 0x18 >> 1,
.invert = 1,
};
-struct tda10023_config vp2033_tda10023_cu1216_config = {
+static struct tda10023_config vp2033_tda10023_cu1216_config = {
.demod_address = 0x18 >> 1,
.invert = 1,
};
diff --git a/drivers/media/pci/mantis/mantis_vp2040.c b/drivers/media/pci/mantis/mantis_vp2040.c
index d480741afd78..8dd17d7c0881 100644
--- a/drivers/media/pci/mantis/mantis_vp2040.c
+++ b/drivers/media/pci/mantis/mantis_vp2040.c
@@ -37,12 +37,12 @@
#define MANTIS_MODEL_NAME "VP-2040"
#define MANTIS_DEV_TYPE "DVB-C"
-struct tda1002x_config vp2040_tda1002x_cu1216_config = {
+static struct tda1002x_config vp2040_tda1002x_cu1216_config = {
.demod_address = 0x18 >> 1,
.invert = 1,
};
-struct tda10023_config vp2040_tda10023_cu1216_config = {
+static struct tda10023_config vp2040_tda10023_cu1216_config = {
.demod_address = 0x18 >> 1,
.invert = 1,
};
diff --git a/drivers/media/pci/mantis/mantis_vp3030.c b/drivers/media/pci/mantis/mantis_vp3030.c
index c09308cd3ac6..5c1dd925bdd5 100644
--- a/drivers/media/pci/mantis/mantis_vp3030.c
+++ b/drivers/media/pci/mantis/mantis_vp3030.c
@@ -35,11 +35,11 @@
#include "mantis_dvb.h"
#include "mantis_vp3030.h"
-struct zl10353_config mantis_vp3030_config = {
+static struct zl10353_config mantis_vp3030_config = {
.demod_address = 0x0f,
};
-struct tda665x_config env57h12d5_config = {
+static struct tda665x_config env57h12d5_config = {
.name = "ENV57H12D5 (ET-50DT)",
.addr = 0x60,
.frequency_min = 47000000,
diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
index 9e82d2105d53..039bed3cc919 100644
--- a/drivers/media/pci/ngene/ngene-cards.c
+++ b/drivers/media/pci/ngene/ngene-cards.c
@@ -696,7 +696,7 @@ static struct ngene_info ngene_info_m780 = {
.demod_attach = { NULL, demod_attach_lg330x },
/* Ensure these are NULL else the frame will call them (as funcs) */
- .tuner_attach = { 0, 0, 0, 0 },
+ .tuner_attach = { NULL, NULL, NULL, NULL },
.fe_config = { NULL, &aver_m780 },
.avf = { 0 },
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index 4930b55fd5f4..e29bc3af4baf 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -57,15 +57,13 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define dprintk if (debug) printk
-#define ngwriteb(dat, adr) writeb((dat), (char *)(dev->iomem + (adr)))
-#define ngwritel(dat, adr) writel((dat), (char *)(dev->iomem + (adr)))
-#define ngwriteb(dat, adr) writeb((dat), (char *)(dev->iomem + (adr)))
+#define ngwriteb(dat, adr) writeb((dat), dev->iomem + (adr))
+#define ngwritel(dat, adr) writel((dat), dev->iomem + (adr))
+#define ngwriteb(dat, adr) writeb((dat), dev->iomem + (adr))
#define ngreadl(adr) readl(dev->iomem + (adr))
#define ngreadb(adr) readb(dev->iomem + (adr))
-#define ngcpyto(adr, src, count) memcpy_toio((char *) \
- (dev->iomem + (adr)), (src), (count))
-#define ngcpyfrom(dst, adr, count) memcpy_fromio((dst), (char *) \
- (dev->iomem + (adr)), (count))
+#define ngcpyto(adr, src, count) memcpy_toio(dev->iomem + (adr), (src), (count))
+#define ngcpyfrom(dst, adr, count) memcpy_fromio((dst), dev->iomem + (adr), (count))
/****************************************************************************/
/* nGene interrupt handler **************************************************/
@@ -1592,7 +1590,7 @@ static void cxd_detach(struct ngene *dev)
dvb_ca_en50221_release(ci->en);
kfree(ci->en);
- ci->en = 0;
+ ci->en = NULL;
}
/***********************************/
diff --git a/drivers/media/pci/ngene/ngene-dvb.c b/drivers/media/pci/ngene/ngene-dvb.c
index fcb16a615aab..59bb2858c8d0 100644
--- a/drivers/media/pci/ngene/ngene-dvb.c
+++ b/drivers/media/pci/ngene/ngene-dvb.c
@@ -47,7 +47,7 @@
/* COMMAND API interface ****************************************************/
/****************************************************************************/
-static ssize_t ts_write(struct file *file, const char *buf,
+static ssize_t ts_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct dvb_device *dvbdev = file->private_data;
@@ -59,12 +59,12 @@ static ssize_t ts_write(struct file *file, const char *buf,
(&dev->tsout_rbuf) >= count) < 0)
return 0;
- dvb_ringbuffer_write(&dev->tsout_rbuf, buf, count);
+ dvb_ringbuffer_write_user(&dev->tsout_rbuf, buf, count);
return count;
}
-static ssize_t ts_read(struct file *file, char *buf,
+static ssize_t ts_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct dvb_device *dvbdev = file->private_data;
@@ -97,7 +97,6 @@ static const struct file_operations ci_fops = {
};
struct dvb_device ngene_dvbdev_ci = {
- .priv = 0,
.readers = -1,
.writers = -1,
.users = -1,
diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h
index 22c39ff6bfa0..51e2fbd18b1b 100644
--- a/drivers/media/pci/ngene/ngene.h
+++ b/drivers/media/pci/ngene/ngene.h
@@ -737,7 +737,7 @@ typedef void (tx_cb_t)(struct ngene *, u32);
struct ngene {
int nr;
struct pci_dev *pci_dev;
- unsigned char *iomem;
+ unsigned char __iomem *iomem;
/*struct i2c_adapter i2c_adapter;*/
diff --git a/drivers/media/pci/pt3/Kconfig b/drivers/media/pci/pt3/Kconfig
new file mode 100644
index 000000000000..16c208ae0079
--- /dev/null
+++ b/drivers/media/pci/pt3/Kconfig
@@ -0,0 +1,10 @@
+config DVB_PT3
+ tristate "Earthsoft PT3 cards"
+ depends on DVB_CORE && PCI && I2C
+ select DVB_TC90522 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_QM1D1C0042 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_MXL301RF if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Support for Earthsoft PT3 PCIe cards.
+
+ Say Y or M if you own such a device and want to use it.
diff --git a/drivers/media/pci/pt3/Makefile b/drivers/media/pci/pt3/Makefile
new file mode 100644
index 000000000000..396f146b1c18
--- /dev/null
+++ b/drivers/media/pci/pt3/Makefile
@@ -0,0 +1,8 @@
+
+earth-pt3-objs += pt3.o pt3_i2c.o pt3_dma.o
+
+obj-$(CONFIG_DVB_PT3) += earth-pt3.o
+
+ccflags-y += -Idrivers/media/dvb-core
+ccflags-y += -Idrivers/media/dvb-frontends
+ccflags-y += -Idrivers/media/tuners
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
new file mode 100644
index 000000000000..1fdeac11501a
--- /dev/null
+++ b/drivers/media/pci/pt3/pt3.c
@@ -0,0 +1,876 @@
+/*
+ * Earthsoft PT3 driver
+ *
+ * Copyright (C) 2014 Akihiro Tsukada <tskd08@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/freezer.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+
+#include "pt3.h"
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+static bool one_adapter;
+module_param(one_adapter, bool, 0444);
+MODULE_PARM_DESC(one_adapter, "Place FE's together under one adapter.");
+
+static int num_bufs = 4;
+module_param(num_bufs, int, 0444);
+MODULE_PARM_DESC(num_bufs, "Number of DMA buffer (188KiB) per FE.");
+
+
+static const struct i2c_algorithm pt3_i2c_algo = {
+ .master_xfer = &pt3_i2c_master_xfer,
+ .functionality = &pt3_i2c_functionality,
+};
+
+static const struct pt3_adap_config adap_conf[PT3_NUM_FE] = {
+ {
+ .demod_info = {
+ I2C_BOARD_INFO(TC90522_I2C_DEV_SAT, 0x11),
+ },
+ .tuner_info = {
+ I2C_BOARD_INFO("qm1d1c0042", 0x63),
+ },
+ .tuner_cfg.qm1d1c0042 = {
+ .lpf = 1,
+ },
+ .init_freq = 1049480 - 300,
+ },
+ {
+ .demod_info = {
+ I2C_BOARD_INFO(TC90522_I2C_DEV_TER, 0x10),
+ },
+ .tuner_info = {
+ I2C_BOARD_INFO("mxl301rf", 0x62),
+ },
+ .init_freq = 515142857,
+ },
+ {
+ .demod_info = {
+ I2C_BOARD_INFO(TC90522_I2C_DEV_SAT, 0x13),
+ },
+ .tuner_info = {
+ I2C_BOARD_INFO("qm1d1c0042", 0x60),
+ },
+ .tuner_cfg.qm1d1c0042 = {
+ .lpf = 1,
+ },
+ .init_freq = 1049480 + 300,
+ },
+ {
+ .demod_info = {
+ I2C_BOARD_INFO(TC90522_I2C_DEV_TER, 0x12),
+ },
+ .tuner_info = {
+ I2C_BOARD_INFO("mxl301rf", 0x61),
+ },
+ .init_freq = 521142857,
+ },
+};
+
+
+struct reg_val {
+ u8 reg;
+ u8 val;
+};
+
+static int
+pt3_demod_write(struct pt3_adapter *adap, const struct reg_val *data, int num)
+{
+ struct i2c_msg msg;
+ int i, ret;
+
+ ret = 0;
+ msg.addr = adap->i2c_demod->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ for (i = 0; i < num; i++) {
+ msg.buf = (u8 *)&data[i];
+ ret = i2c_transfer(adap->i2c_demod->adapter, &msg, 1);
+ if (ret == 0)
+ ret = -EREMOTE;
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static inline void pt3_lnb_ctrl(struct pt3_board *pt3, bool on)
+{
+ iowrite32((on ? 0x0f : 0x0c), pt3->regs[0] + REG_SYSTEM_W);
+}
+
+static inline struct pt3_adapter *pt3_find_adapter(struct dvb_frontend *fe)
+{
+ struct pt3_board *pt3;
+ int i;
+
+ if (one_adapter) {
+ pt3 = fe->dvb->priv;
+ for (i = 0; i < PT3_NUM_FE; i++)
+ if (pt3->adaps[i]->fe == fe)
+ return pt3->adaps[i];
+ }
+ return container_of(fe->dvb, struct pt3_adapter, dvb_adap);
+}
+
+/*
+ * all 4 tuners in PT3 are packaged in a can module (Sharp VA4M6JC2103).
+ * it seems that they share the power lines and Amp power line and
+ * adaps[3] controls those powers.
+ */
+static int
+pt3_set_tuner_power(struct pt3_board *pt3, bool tuner_on, bool amp_on)
+{
+ struct reg_val rv = { 0x1e, 0x99 };
+
+ if (tuner_on)
+ rv.val |= 0x40;
+ if (amp_on)
+ rv.val |= 0x04;
+ return pt3_demod_write(pt3->adaps[PT3_NUM_FE - 1], &rv, 1);
+}
+
+static int pt3_set_lna(struct dvb_frontend *fe)
+{
+ struct pt3_adapter *adap;
+ struct pt3_board *pt3;
+ u32 val;
+ int ret;
+
+ /* LNA is shared btw. 2 TERR-tuners */
+
+ adap = pt3_find_adapter(fe);
+ val = fe->dtv_property_cache.lna;
+ if (val == LNA_AUTO || val == adap->cur_lna)
+ return 0;
+
+ pt3 = adap->dvb_adap.priv;
+ if (mutex_lock_interruptible(&pt3->lock))
+ return -ERESTARTSYS;
+ if (val)
+ pt3->lna_on_cnt++;
+ else
+ pt3->lna_on_cnt--;
+
+ if (val && pt3->lna_on_cnt <= 1) {
+ pt3->lna_on_cnt = 1;
+ ret = pt3_set_tuner_power(pt3, true, true);
+ } else if (!val && pt3->lna_on_cnt <= 0) {
+ pt3->lna_on_cnt = 0;
+ ret = pt3_set_tuner_power(pt3, true, false);
+ } else
+ ret = 0;
+ mutex_unlock(&pt3->lock);
+ adap->cur_lna = (val != 0);
+ return ret;
+}
+
+static int pt3_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t volt)
+{
+ struct pt3_adapter *adap;
+ struct pt3_board *pt3;
+ bool on;
+
+ /* LNB power is shared btw. 2 SAT-tuners */
+
+ adap = pt3_find_adapter(fe);
+ on = (volt != SEC_VOLTAGE_OFF);
+ if (on == adap->cur_lnb)
+ return 0;
+ adap->cur_lnb = on;
+ pt3 = adap->dvb_adap.priv;
+ if (mutex_lock_interruptible(&pt3->lock))
+ return -ERESTARTSYS;
+ if (on)
+ pt3->lnb_on_cnt++;
+ else
+ pt3->lnb_on_cnt--;
+
+ if (on && pt3->lnb_on_cnt <= 1) {
+ pt3->lnb_on_cnt = 1;
+ pt3_lnb_ctrl(pt3, true);
+ } else if (!on && pt3->lnb_on_cnt <= 0) {
+ pt3->lnb_on_cnt = 0;
+ pt3_lnb_ctrl(pt3, false);
+ }
+ mutex_unlock(&pt3->lock);
+ return 0;
+}
+
+/* register values used in pt3_fe_init() */
+
+static const struct reg_val init0_sat[] = {
+ { 0x03, 0x01 },
+ { 0x1e, 0x10 },
+};
+static const struct reg_val init0_ter[] = {
+ { 0x01, 0x40 },
+ { 0x1c, 0x10 },
+};
+static const struct reg_val cfg_sat[] = {
+ { 0x1c, 0x15 },
+ { 0x1f, 0x04 },
+};
+static const struct reg_val cfg_ter[] = {
+ { 0x1d, 0x01 },
+};
+
+/*
+ * pt3_fe_init: initialize demod sub modules and ISDB-T tuners all at once.
+ *
+ * As for demod IC (TC90522) and ISDB-T tuners (MxL301RF),
+ * the i2c sequences for init'ing them are not public and hidden in a ROM,
+ * and include the board specific configurations as well.
+ * They are stored in a lump and cannot be taken out / accessed separately,
+ * thus cannot be moved to the FE/tuner driver.
+ */
+static int pt3_fe_init(struct pt3_board *pt3)
+{
+ int i, ret;
+ struct dvb_frontend *fe;
+
+ pt3_i2c_reset(pt3);
+ ret = pt3_init_all_demods(pt3);
+ if (ret < 0) {
+ dev_warn(&pt3->pdev->dev, "Failed to init demod chips.");
+ return ret;
+ }
+
+ /* additional config? */
+ for (i = 0; i < PT3_NUM_FE; i++) {
+ fe = pt3->adaps[i]->fe;
+
+ if (fe->ops.delsys[0] == SYS_ISDBS)
+ ret = pt3_demod_write(pt3->adaps[i],
+ init0_sat, ARRAY_SIZE(init0_sat));
+ else
+ ret = pt3_demod_write(pt3->adaps[i],
+ init0_ter, ARRAY_SIZE(init0_ter));
+ if (ret < 0) {
+ dev_warn(&pt3->pdev->dev,
+ "demod[%d] faild in init sequence0.", i);
+ return ret;
+ }
+ ret = fe->ops.init(fe);
+ if (ret < 0)
+ return ret;
+ }
+
+ usleep_range(2000, 4000);
+ ret = pt3_set_tuner_power(pt3, true, false);
+ if (ret < 0) {
+ dev_warn(&pt3->pdev->dev, "Failed to control tuner module.");
+ return ret;
+ }
+
+ /* output pin configuration */
+ for (i = 0; i < PT3_NUM_FE; i++) {
+ fe = pt3->adaps[i]->fe;
+ if (fe->ops.delsys[0] == SYS_ISDBS)
+ ret = pt3_demod_write(pt3->adaps[i],
+ cfg_sat, ARRAY_SIZE(cfg_sat));
+ else
+ ret = pt3_demod_write(pt3->adaps[i],
+ cfg_ter, ARRAY_SIZE(cfg_ter));
+ if (ret < 0) {
+ dev_warn(&pt3->pdev->dev,
+ "demod[%d] faild in init sequence1.", i);
+ return ret;
+ }
+ }
+ usleep_range(4000, 6000);
+
+ for (i = 0; i < PT3_NUM_FE; i++) {
+ fe = pt3->adaps[i]->fe;
+ if (fe->ops.delsys[0] != SYS_ISDBS)
+ continue;
+ /* init and wake-up ISDB-S tuners */
+ ret = fe->ops.tuner_ops.init(fe);
+ if (ret < 0) {
+ dev_warn(&pt3->pdev->dev,
+ "Failed to init SAT-tuner[%d].", i);
+ return ret;
+ }
+ }
+ ret = pt3_init_all_mxl301rf(pt3);
+ if (ret < 0) {
+ dev_warn(&pt3->pdev->dev, "Failed to init TERR-tuners.");
+ return ret;
+ }
+
+ ret = pt3_set_tuner_power(pt3, true, true);
+ if (ret < 0) {
+ dev_warn(&pt3->pdev->dev, "Failed to control tuner module.");
+ return ret;
+ }
+
+ /* Wake up all tuners and make an initial tuning,
+ * in order to avoid interference among the tuners in the module,
+ * according to the doc from the manufacturer.
+ */
+ for (i = 0; i < PT3_NUM_FE; i++) {
+ fe = pt3->adaps[i]->fe;
+ ret = 0;
+ if (fe->ops.delsys[0] == SYS_ISDBT)
+ ret = fe->ops.tuner_ops.init(fe);
+ /* set only when called from pt3_probe(), not resume() */
+ if (ret == 0 && fe->dtv_property_cache.frequency == 0) {
+ fe->dtv_property_cache.frequency =
+ adap_conf[i].init_freq;
+ ret = fe->ops.tuner_ops.set_params(fe);
+ }
+ if (ret < 0) {
+ dev_warn(&pt3->pdev->dev,
+ "Failed in initial tuning of tuner[%d].", i);
+ return ret;
+ }
+ }
+
+ /* and sleep again, waiting to be opened by users. */
+ for (i = 0; i < PT3_NUM_FE; i++) {
+ fe = pt3->adaps[i]->fe;
+ if (fe->ops.tuner_ops.sleep)
+ ret = fe->ops.tuner_ops.sleep(fe);
+ if (ret < 0)
+ break;
+ if (fe->ops.sleep)
+ ret = fe->ops.sleep(fe);
+ if (ret < 0)
+ break;
+ if (fe->ops.delsys[0] == SYS_ISDBS)
+ fe->ops.set_voltage = &pt3_set_voltage;
+ else
+ fe->ops.set_lna = &pt3_set_lna;
+ }
+ if (i < PT3_NUM_FE) {
+ dev_warn(&pt3->pdev->dev, "FE[%d] failed to standby.", i);
+ return ret;
+ }
+ return 0;
+}
+
+
+static int pt3_attach_fe(struct pt3_board *pt3, int i)
+{
+ struct i2c_board_info info;
+ struct tc90522_config cfg;
+ struct i2c_client *cl;
+ struct dvb_adapter *dvb_adap;
+ int ret;
+
+ info = adap_conf[i].demod_info;
+ cfg = adap_conf[i].demod_cfg;
+ cfg.tuner_i2c = NULL;
+ info.platform_data = &cfg;
+
+ ret = -ENODEV;
+ request_module("tc90522");
+ cl = i2c_new_device(&pt3->i2c_adap, &info);
+ if (!cl || !cl->dev.driver)
+ return -ENODEV;
+ pt3->adaps[i]->i2c_demod = cl;
+ if (!try_module_get(cl->dev.driver->owner))
+ goto err_demod_i2c_unregister_device;
+
+ if (!strncmp(cl->name, TC90522_I2C_DEV_SAT, sizeof(cl->name))) {
+ struct qm1d1c0042_config tcfg;
+
+ tcfg = adap_conf[i].tuner_cfg.qm1d1c0042;
+ tcfg.fe = cfg.fe;
+ info = adap_conf[i].tuner_info;
+ info.platform_data = &tcfg;
+ request_module("qm1d1c0042");
+ cl = i2c_new_device(cfg.tuner_i2c, &info);
+ } else {
+ struct mxl301rf_config tcfg;
+
+ tcfg = adap_conf[i].tuner_cfg.mxl301rf;
+ tcfg.fe = cfg.fe;
+ info = adap_conf[i].tuner_info;
+ info.platform_data = &tcfg;
+ request_module("mxl301rf");
+ cl = i2c_new_device(cfg.tuner_i2c, &info);
+ }
+ if (!cl || !cl->dev.driver)
+ goto err_demod_module_put;
+ pt3->adaps[i]->i2c_tuner = cl;
+ if (!try_module_get(cl->dev.driver->owner))
+ goto err_tuner_i2c_unregister_device;
+
+ dvb_adap = &pt3->adaps[one_adapter ? 0 : i]->dvb_adap;
+ ret = dvb_register_frontend(dvb_adap, cfg.fe);
+ if (ret < 0)
+ goto err_tuner_module_put;
+ pt3->adaps[i]->fe = cfg.fe;
+ return 0;
+
+err_tuner_module_put:
+ module_put(pt3->adaps[i]->i2c_tuner->dev.driver->owner);
+err_tuner_i2c_unregister_device:
+ i2c_unregister_device(pt3->adaps[i]->i2c_tuner);
+err_demod_module_put:
+ module_put(pt3->adaps[i]->i2c_demod->dev.driver->owner);
+err_demod_i2c_unregister_device:
+ i2c_unregister_device(pt3->adaps[i]->i2c_demod);
+
+ return ret;
+}
+
+
+static int pt3_fetch_thread(void *data)
+{
+ struct pt3_adapter *adap = data;
+ ktime_t delay;
+ bool was_frozen;
+
+#define PT3_INITIAL_BUF_DROPS 4
+#define PT3_FETCH_DELAY 10
+#define PT3_FETCH_DELAY_DELTA 2
+
+ pt3_init_dmabuf(adap);
+ adap->num_discard = PT3_INITIAL_BUF_DROPS;
+
+ dev_dbg(adap->dvb_adap.device,
+ "PT3: [%s] started.\n", adap->thread->comm);
+ set_freezable();
+ while (!kthread_freezable_should_stop(&was_frozen)) {
+ if (was_frozen)
+ adap->num_discard = PT3_INITIAL_BUF_DROPS;
+
+ pt3_proc_dma(adap);
+
+ delay = ktime_set(0, PT3_FETCH_DELAY * NSEC_PER_MSEC);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ freezable_schedule_hrtimeout_range(&delay,
+ PT3_FETCH_DELAY_DELTA * NSEC_PER_MSEC,
+ HRTIMER_MODE_REL);
+ }
+ dev_dbg(adap->dvb_adap.device,
+ "PT3: [%s] exited.\n", adap->thread->comm);
+ adap->thread = NULL;
+ return 0;
+}
+
+static int pt3_start_streaming(struct pt3_adapter *adap)
+{
+ struct task_struct *thread;
+
+ /* start fetching thread */
+ thread = kthread_run(pt3_fetch_thread, adap, "pt3-ad%i-dmx%i",
+ adap->dvb_adap.num, adap->dmxdev.dvbdev->id);
+ if (IS_ERR(thread)) {
+ int ret = PTR_ERR(thread);
+
+ dev_warn(adap->dvb_adap.device,
+ "PT3 (adap:%d, dmx:%d): failed to start kthread.\n",
+ adap->dvb_adap.num, adap->dmxdev.dvbdev->id);
+ return ret;
+ }
+ adap->thread = thread;
+
+ return pt3_start_dma(adap);
+}
+
+static int pt3_stop_streaming(struct pt3_adapter *adap)
+{
+ int ret;
+
+ ret = pt3_stop_dma(adap);
+ if (ret)
+ dev_warn(adap->dvb_adap.device,
+ "PT3: failed to stop streaming of adap:%d/FE:%d\n",
+ adap->dvb_adap.num, adap->fe->id);
+
+ /* kill the fetching thread */
+ ret = kthread_stop(adap->thread);
+ return ret;
+}
+
+static int pt3_start_feed(struct dvb_demux_feed *feed)
+{
+ struct pt3_adapter *adap;
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ adap = container_of(feed->demux, struct pt3_adapter, demux);
+ adap->num_feeds++;
+ if (adap->thread)
+ return 0;
+ if (adap->num_feeds != 1) {
+ dev_warn(adap->dvb_adap.device,
+ "%s: unmatched start/stop_feed in adap:%i/dmx:%i.\n",
+ __func__, adap->dvb_adap.num, adap->dmxdev.dvbdev->id);
+ adap->num_feeds = 1;
+ }
+
+ return pt3_start_streaming(adap);
+
+}
+
+static int pt3_stop_feed(struct dvb_demux_feed *feed)
+{
+ struct pt3_adapter *adap;
+
+ adap = container_of(feed->demux, struct pt3_adapter, demux);
+
+ adap->num_feeds--;
+ if (adap->num_feeds > 0 || !adap->thread)
+ return 0;
+ adap->num_feeds = 0;
+
+ return pt3_stop_streaming(adap);
+}
+
+
+static int pt3_alloc_adapter(struct pt3_board *pt3, int index)
+{
+ int ret;
+ struct pt3_adapter *adap;
+ struct dvb_adapter *da;
+
+ adap = kzalloc(sizeof(*adap), GFP_KERNEL);
+ if (!adap) {
+ dev_err(&pt3->pdev->dev, "failed to alloc mem for adapter.\n");
+ return -ENOMEM;
+ }
+ pt3->adaps[index] = adap;
+ adap->adap_idx = index;
+
+ if (index == 0 || !one_adapter) {
+ ret = dvb_register_adapter(&adap->dvb_adap, "PT3 DVB",
+ THIS_MODULE, &pt3->pdev->dev, adapter_nr);
+ if (ret < 0) {
+ dev_err(&pt3->pdev->dev,
+ "failed to register adapter dev.\n");
+ goto err_mem;
+ }
+ da = &adap->dvb_adap;
+ } else
+ da = &pt3->adaps[0]->dvb_adap;
+
+ adap->dvb_adap.priv = pt3;
+ adap->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING;
+ adap->demux.priv = adap;
+ adap->demux.feednum = 256;
+ adap->demux.filternum = 256;
+ adap->demux.start_feed = pt3_start_feed;
+ adap->demux.stop_feed = pt3_stop_feed;
+ ret = dvb_dmx_init(&adap->demux);
+ if (ret < 0) {
+ dev_err(&pt3->pdev->dev, "failed to init dmx dev.\n");
+ goto err_adap;
+ }
+
+ adap->dmxdev.filternum = 256;
+ adap->dmxdev.demux = &adap->demux.dmx;
+ ret = dvb_dmxdev_init(&adap->dmxdev, da);
+ if (ret < 0) {
+ dev_err(&pt3->pdev->dev, "failed to init dmxdev.\n");
+ goto err_demux;
+ }
+
+ ret = pt3_alloc_dmabuf(adap);
+ if (ret) {
+ dev_err(&pt3->pdev->dev, "failed to alloc DMA buffers.\n");
+ goto err_dmabuf;
+ }
+
+ return 0;
+
+err_dmabuf:
+ pt3_free_dmabuf(adap);
+ dvb_dmxdev_release(&adap->dmxdev);
+err_demux:
+ dvb_dmx_release(&adap->demux);
+err_adap:
+ if (index == 0 || !one_adapter)
+ dvb_unregister_adapter(da);
+err_mem:
+ kfree(adap);
+ pt3->adaps[index] = NULL;
+ return ret;
+}
+
+static void pt3_cleanup_adapter(struct pt3_board *pt3, int index)
+{
+ struct pt3_adapter *adap;
+ struct dmx_demux *dmx;
+
+ adap = pt3->adaps[index];
+ if (adap == NULL)
+ return;
+
+ /* stop demux kthread */
+ if (adap->thread)
+ pt3_stop_streaming(adap);
+
+ dmx = &adap->demux.dmx;
+ dmx->close(dmx);
+ if (adap->fe) {
+ adap->fe->callback = NULL;
+ if (adap->fe->frontend_priv)
+ dvb_unregister_frontend(adap->fe);
+ if (adap->i2c_tuner) {
+ module_put(adap->i2c_tuner->dev.driver->owner);
+ i2c_unregister_device(adap->i2c_tuner);
+ }
+ if (adap->i2c_demod) {
+ module_put(adap->i2c_demod->dev.driver->owner);
+ i2c_unregister_device(adap->i2c_demod);
+ }
+ }
+ pt3_free_dmabuf(adap);
+ dvb_dmxdev_release(&adap->dmxdev);
+ dvb_dmx_release(&adap->demux);
+ if (index == 0 || !one_adapter)
+ dvb_unregister_adapter(&adap->dvb_adap);
+ kfree(adap);
+ pt3->adaps[index] = NULL;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int pt3_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pt3_board *pt3 = pci_get_drvdata(pdev);
+ int i;
+ struct pt3_adapter *adap;
+
+ for (i = 0; i < PT3_NUM_FE; i++) {
+ adap = pt3->adaps[i];
+ if (adap->num_feeds > 0)
+ pt3_stop_dma(adap);
+ dvb_frontend_suspend(adap->fe);
+ pt3_free_dmabuf(adap);
+ }
+
+ pt3_lnb_ctrl(pt3, false);
+ pt3_set_tuner_power(pt3, false, false);
+ return 0;
+}
+
+static int pt3_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pt3_board *pt3 = pci_get_drvdata(pdev);
+ int i, ret;
+ struct pt3_adapter *adap;
+
+ ret = pt3_fe_init(pt3);
+ if (ret)
+ return ret;
+
+ if (pt3->lna_on_cnt > 0)
+ pt3_set_tuner_power(pt3, true, true);
+ if (pt3->lnb_on_cnt > 0)
+ pt3_lnb_ctrl(pt3, true);
+
+ for (i = 0; i < PT3_NUM_FE; i++) {
+ adap = pt3->adaps[i];
+ dvb_frontend_resume(adap->fe);
+ ret = pt3_alloc_dmabuf(adap);
+ if (ret) {
+ dev_err(&pt3->pdev->dev, "failed to alloc DMA bufs.\n");
+ continue;
+ }
+ if (adap->num_feeds > 0)
+ pt3_start_dma(adap);
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+
+static void pt3_remove(struct pci_dev *pdev)
+{
+ struct pt3_board *pt3;
+ int i;
+
+ pt3 = pci_get_drvdata(pdev);
+ for (i = PT3_NUM_FE - 1; i >= 0; i--)
+ pt3_cleanup_adapter(pt3, i);
+ i2c_del_adapter(&pt3->i2c_adap);
+ kfree(pt3->i2c_buf);
+ pci_iounmap(pt3->pdev, pt3->regs[0]);
+ pci_iounmap(pt3->pdev, pt3->regs[1]);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ kfree(pt3);
+}
+
+static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ u8 rev;
+ u32 ver;
+ int i, ret;
+ struct pt3_board *pt3;
+ struct i2c_adapter *i2c;
+
+ if (pci_read_config_byte(pdev, PCI_REVISION_ID, &rev) || rev != 1)
+ return -ENODEV;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0)
+ return -ENODEV;
+ pci_set_master(pdev);
+
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret < 0)
+ goto err_disable_device;
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret == 0)
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ else {
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret == 0)
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ else {
+ dev_err(&pdev->dev, "Failed to set DMA mask.\n");
+ goto err_release_regions;
+ }
+ dev_info(&pdev->dev, "Use 32bit DMA.\n");
+ }
+
+ pt3 = kzalloc(sizeof(*pt3), GFP_KERNEL);
+ if (!pt3) {
+ dev_err(&pdev->dev, "Failed to alloc mem for this dev.\n");
+ ret = -ENOMEM;
+ goto err_release_regions;
+ }
+ pci_set_drvdata(pdev, pt3);
+ pt3->pdev = pdev;
+ mutex_init(&pt3->lock);
+ pt3->regs[0] = pci_ioremap_bar(pdev, 0);
+ pt3->regs[1] = pci_ioremap_bar(pdev, 2);
+ if (pt3->regs[0] == NULL || pt3->regs[1] == NULL) {
+ dev_err(&pdev->dev, "Failed to ioremap.\n");
+ ret = -ENOMEM;
+ goto err_kfree;
+ }
+
+ ver = ioread32(pt3->regs[0] + REG_VERSION);
+ if ((ver >> 16) != 0x0301) {
+ dev_warn(&pdev->dev, "PT%d, I/F-ver.:%d not supported",
+ ver >> 24, (ver & 0x00ff0000) >> 16);
+ ret = -ENODEV;
+ goto err_iounmap;
+ }
+
+ pt3->num_bufs = clamp_val(num_bufs, MIN_DATA_BUFS, MAX_DATA_BUFS);
+
+ pt3->i2c_buf = kmalloc(sizeof(*pt3->i2c_buf), GFP_KERNEL);
+ if (pt3->i2c_buf == NULL) {
+ dev_err(&pdev->dev, "Failed to alloc mem for i2c.\n");
+ ret = -ENOMEM;
+ goto err_iounmap;
+ }
+ i2c = &pt3->i2c_adap;
+ i2c->owner = THIS_MODULE;
+ i2c->algo = &pt3_i2c_algo;
+ i2c->algo_data = NULL;
+ i2c->dev.parent = &pdev->dev;
+ strlcpy(i2c->name, DRV_NAME, sizeof(i2c->name));
+ i2c_set_adapdata(i2c, pt3);
+ ret = i2c_add_adapter(i2c);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add i2c adapter.\n");
+ goto err_i2cbuf;
+ }
+
+ for (i = 0; i < PT3_NUM_FE; i++) {
+ ret = pt3_alloc_adapter(pt3, i);
+ if (ret < 0)
+ break;
+
+ ret = pt3_attach_fe(pt3, i);
+ if (ret < 0)
+ break;
+ }
+ if (i < PT3_NUM_FE) {
+ dev_err(&pdev->dev, "Failed to create FE%d.\n", i);
+ goto err_cleanup_adapters;
+ }
+
+ ret = pt3_fe_init(pt3);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to init frontends.\n");
+ i = PT3_NUM_FE - 1;
+ goto err_cleanup_adapters;
+ }
+
+ dev_info(&pdev->dev,
+ "successfully init'ed PT%d (fw:0x%02x, I/F:0x%02x).\n",
+ ver >> 24, (ver >> 8) & 0xff, (ver >> 16) & 0xff);
+ return 0;
+
+err_cleanup_adapters:
+ while (i >= 0)
+ pt3_cleanup_adapter(pt3, i--);
+ i2c_del_adapter(i2c);
+err_i2cbuf:
+ kfree(pt3->i2c_buf);
+err_iounmap:
+ if (pt3->regs[0])
+ pci_iounmap(pdev, pt3->regs[0]);
+ if (pt3->regs[1])
+ pci_iounmap(pdev, pt3->regs[1]);
+err_kfree:
+ kfree(pt3);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+
+}
+
+static const struct pci_device_id pt3_id_table[] = {
+ { PCI_DEVICE_SUB(0x1172, 0x4c15, 0xee8d, 0x0368) },
+ { },
+};
+MODULE_DEVICE_TABLE(pci, pt3_id_table);
+
+static SIMPLE_DEV_PM_OPS(pt3_pm_ops, pt3_suspend, pt3_resume);
+
+static struct pci_driver pt3_driver = {
+ .name = DRV_NAME,
+ .probe = pt3_probe,
+ .remove = pt3_remove,
+ .id_table = pt3_id_table,
+
+ .driver.pm = &pt3_pm_ops,
+};
+
+module_pci_driver(pt3_driver);
+
+MODULE_DESCRIPTION("Earthsoft PT3 Driver");
+MODULE_AUTHOR("Akihiro TSUKADA");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/pt3/pt3.h b/drivers/media/pci/pt3/pt3.h
new file mode 100644
index 000000000000..1b3f2ad25db3
--- /dev/null
+++ b/drivers/media/pci/pt3/pt3.h
@@ -0,0 +1,186 @@
+/*
+ * Earthsoft PT3 driver
+ *
+ * Copyright (C) 2014 Akihiro Tsukada <tskd08@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PT3_H
+#define PT3_H
+
+#include <linux/atomic.h>
+#include <linux/types.h>
+
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dmxdev.h"
+
+#include "tc90522.h"
+#include "mxl301rf.h"
+#include "qm1d1c0042.h"
+
+#define DRV_NAME KBUILD_MODNAME
+
+#define PT3_NUM_FE 4
+
+/*
+ * register index of the FPGA chip
+ */
+#define REG_VERSION 0x00
+#define REG_BUS 0x04
+#define REG_SYSTEM_W 0x08
+#define REG_SYSTEM_R 0x0c
+#define REG_I2C_W 0x10
+#define REG_I2C_R 0x14
+#define REG_RAM_W 0x18
+#define REG_RAM_R 0x1c
+#define REG_DMA_BASE 0x40 /* regs for FE[i] = REG_DMA_BASE + 0x18 * i */
+#define OFST_DMA_DESC_L 0x00
+#define OFST_DMA_DESC_H 0x04
+#define OFST_DMA_CTL 0x08
+#define OFST_TS_CTL 0x0c
+#define OFST_STATUS 0x10
+#define OFST_TS_ERR 0x14
+
+/*
+ * internal buffer for I2C
+ */
+#define PT3_I2C_MAX 4091
+struct pt3_i2cbuf {
+ u8 data[PT3_I2C_MAX];
+ u8 tmp;
+ u32 num_cmds;
+};
+
+/*
+ * DMA things
+ */
+#define TS_PACKET_SZ 188
+/* DMA transfers must not cross 4GiB, so use one page / transfer */
+#define DATA_XFER_SZ 4096
+#define DATA_BUF_XFERS 47
+/* (num_bufs * DATA_BUF_SZ) % TS_PACKET_SZ must be 0 */
+#define DATA_BUF_SZ (DATA_BUF_XFERS * DATA_XFER_SZ)
+#define MAX_DATA_BUFS 16
+#define MIN_DATA_BUFS 2
+
+#define DESCS_IN_PAGE (PAGE_SIZE / sizeof(struct xfer_desc))
+#define MAX_NUM_XFERS (MAX_DATA_BUFS * DATA_BUF_XFERS)
+#define MAX_DESC_BUFS DIV_ROUND_UP(MAX_NUM_XFERS, DESCS_IN_PAGE)
+
+/* DMA transfer description.
+ * device is passed a pointer to this struct, dma-reads it,
+ * and gets the DMA buffer ring for storing TS data.
+ */
+struct xfer_desc {
+ u32 addr_l; /* bus address of target data buffer */
+ u32 addr_h;
+ u32 size;
+ u32 next_l; /* bus adddress of the next xfer_desc */
+ u32 next_h;
+};
+
+/* A DMA mapping of a page containing xfer_desc's */
+struct xfer_desc_buffer {
+ dma_addr_t b_addr;
+ struct xfer_desc *descs; /* PAGE_SIZE (xfer_desc[DESCS_IN_PAGE]) */
+};
+
+/* A DMA mapping of a data buffer */
+struct dma_data_buffer {
+ dma_addr_t b_addr;
+ u8 *data; /* size: u8[PAGE_SIZE] */
+};
+
+/*
+ * device things
+ */
+struct pt3_adap_config {
+ struct i2c_board_info demod_info;
+ struct tc90522_config demod_cfg;
+
+ struct i2c_board_info tuner_info;
+ union tuner_config {
+ struct qm1d1c0042_config qm1d1c0042;
+ struct mxl301rf_config mxl301rf;
+ } tuner_cfg;
+ u32 init_freq;
+};
+
+struct pt3_adapter {
+ struct dvb_adapter dvb_adap; /* dvb_adap.priv => struct pt3_board */
+ int adap_idx;
+
+ struct dvb_demux demux;
+ struct dmxdev dmxdev;
+ struct dvb_frontend *fe;
+ struct i2c_client *i2c_demod;
+ struct i2c_client *i2c_tuner;
+
+ /* data fetch thread */
+ struct task_struct *thread;
+ int num_feeds;
+
+ bool cur_lna;
+ bool cur_lnb; /* current LNB power status (on/off) */
+
+ /* items below are for DMA */
+ struct dma_data_buffer buffer[MAX_DATA_BUFS];
+ int buf_idx;
+ int buf_ofs;
+ int num_bufs; /* == pt3_board->num_bufs */
+ int num_discard; /* how many access units to discard initially */
+
+ struct xfer_desc_buffer desc_buf[MAX_DESC_BUFS];
+ int num_desc_bufs; /* == num_bufs * DATA_BUF_XFERS / DESCS_IN_PAGE */
+};
+
+
+struct pt3_board {
+ struct pci_dev *pdev;
+ void __iomem *regs[2];
+ /* regs[0]: registers, regs[1]: internal memory, used for I2C */
+
+ struct mutex lock;
+
+ /* LNB power shared among sat-FEs */
+ int lnb_on_cnt; /* LNB power on count */
+
+ /* LNA shared among terr-FEs */
+ int lna_on_cnt; /* booster enabled count */
+
+ int num_bufs; /* number of DMA buffers allocated/mapped per FE */
+
+ struct i2c_adapter i2c_adap;
+ struct pt3_i2cbuf *i2c_buf;
+
+ struct pt3_adapter *adaps[PT3_NUM_FE];
+};
+
+
+/*
+ * prototypes
+ */
+extern int pt3_alloc_dmabuf(struct pt3_adapter *adap);
+extern void pt3_init_dmabuf(struct pt3_adapter *adap);
+extern void pt3_free_dmabuf(struct pt3_adapter *adap);
+extern int pt3_start_dma(struct pt3_adapter *adap);
+extern int pt3_stop_dma(struct pt3_adapter *adap);
+extern int pt3_proc_dma(struct pt3_adapter *adap);
+
+extern int pt3_i2c_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num);
+extern u32 pt3_i2c_functionality(struct i2c_adapter *adap);
+extern void pt3_i2c_reset(struct pt3_board *pt3);
+extern int pt3_init_all_demods(struct pt3_board *pt3);
+extern int pt3_init_all_mxl301rf(struct pt3_board *pt3);
+#endif /* PT3_H */
diff --git a/drivers/media/pci/pt3/pt3_dma.c b/drivers/media/pci/pt3/pt3_dma.c
new file mode 100644
index 000000000000..f0ce90437fac
--- /dev/null
+++ b/drivers/media/pci/pt3/pt3_dma.c
@@ -0,0 +1,225 @@
+/*
+ * Earthsoft PT3 driver
+ *
+ * Copyright (C) 2014 Akihiro Tsukada <tskd08@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+#include "pt3.h"
+
+#define PT3_ACCESS_UNIT (TS_PACKET_SZ * 128)
+#define PT3_BUF_CANARY (0x74)
+
+static u32 get_dma_base(int idx)
+{
+ int i;
+
+ i = (idx == 1 || idx == 2) ? 3 - idx : idx;
+ return REG_DMA_BASE + 0x18 * i;
+}
+
+int pt3_stop_dma(struct pt3_adapter *adap)
+{
+ struct pt3_board *pt3 = adap->dvb_adap.priv;
+ u32 base;
+ u32 stat;
+ int retry;
+
+ base = get_dma_base(adap->adap_idx);
+ stat = ioread32(pt3->regs[0] + base + OFST_STATUS);
+ if (!(stat & 0x01))
+ return 0;
+
+ iowrite32(0x02, pt3->regs[0] + base + OFST_DMA_CTL);
+ for (retry = 0; retry < 5; retry++) {
+ stat = ioread32(pt3->regs[0] + base + OFST_STATUS);
+ if (!(stat & 0x01))
+ return 0;
+ msleep(50);
+ }
+ return -EIO;
+}
+
+int pt3_start_dma(struct pt3_adapter *adap)
+{
+ struct pt3_board *pt3 = adap->dvb_adap.priv;
+ u32 base = get_dma_base(adap->adap_idx);
+
+ iowrite32(0x02, pt3->regs[0] + base + OFST_DMA_CTL);
+ iowrite32(lower_32_bits(adap->desc_buf[0].b_addr),
+ pt3->regs[0] + base + OFST_DMA_DESC_L);
+ iowrite32(upper_32_bits(adap->desc_buf[0].b_addr),
+ pt3->regs[0] + base + OFST_DMA_DESC_H);
+ iowrite32(0x01, pt3->regs[0] + base + OFST_DMA_CTL);
+ return 0;
+}
+
+
+static u8 *next_unit(struct pt3_adapter *adap, int *idx, int *ofs)
+{
+ *ofs += PT3_ACCESS_UNIT;
+ if (*ofs >= DATA_BUF_SZ) {
+ *ofs -= DATA_BUF_SZ;
+ (*idx)++;
+ if (*idx == adap->num_bufs)
+ *idx = 0;
+ }
+ return &adap->buffer[*idx].data[*ofs];
+}
+
+int pt3_proc_dma(struct pt3_adapter *adap)
+{
+ int idx, ofs;
+
+ idx = adap->buf_idx;
+ ofs = adap->buf_ofs;
+
+ if (adap->buffer[idx].data[ofs] == PT3_BUF_CANARY)
+ return 0;
+
+ while (*next_unit(adap, &idx, &ofs) != PT3_BUF_CANARY) {
+ u8 *p;
+
+ p = &adap->buffer[adap->buf_idx].data[adap->buf_ofs];
+ if (adap->num_discard > 0)
+ adap->num_discard--;
+ else if (adap->buf_ofs + PT3_ACCESS_UNIT > DATA_BUF_SZ) {
+ dvb_dmx_swfilter_packets(&adap->demux, p,
+ (DATA_BUF_SZ - adap->buf_ofs) / TS_PACKET_SZ);
+ dvb_dmx_swfilter_packets(&adap->demux,
+ adap->buffer[idx].data, ofs / TS_PACKET_SZ);
+ } else
+ dvb_dmx_swfilter_packets(&adap->demux, p,
+ PT3_ACCESS_UNIT / TS_PACKET_SZ);
+
+ *p = PT3_BUF_CANARY;
+ adap->buf_idx = idx;
+ adap->buf_ofs = ofs;
+ }
+ return 0;
+}
+
+void pt3_init_dmabuf(struct pt3_adapter *adap)
+{
+ int idx, ofs;
+ u8 *p;
+
+ idx = 0;
+ ofs = 0;
+ p = adap->buffer[0].data;
+ /* mark the whole buffers as "not written yet" */
+ while (idx < adap->num_bufs) {
+ p[ofs] = PT3_BUF_CANARY;
+ ofs += PT3_ACCESS_UNIT;
+ if (ofs >= DATA_BUF_SZ) {
+ ofs -= DATA_BUF_SZ;
+ idx++;
+ p = adap->buffer[idx].data;
+ }
+ }
+ adap->buf_idx = 0;
+ adap->buf_ofs = 0;
+}
+
+void pt3_free_dmabuf(struct pt3_adapter *adap)
+{
+ struct pt3_board *pt3;
+ int i;
+
+ pt3 = adap->dvb_adap.priv;
+ for (i = 0; i < adap->num_bufs; i++)
+ dma_free_coherent(&pt3->pdev->dev, DATA_BUF_SZ,
+ adap->buffer[i].data, adap->buffer[i].b_addr);
+ adap->num_bufs = 0;
+
+ for (i = 0; i < adap->num_desc_bufs; i++)
+ dma_free_coherent(&pt3->pdev->dev, PAGE_SIZE,
+ adap->desc_buf[i].descs, adap->desc_buf[i].b_addr);
+ adap->num_desc_bufs = 0;
+}
+
+
+int pt3_alloc_dmabuf(struct pt3_adapter *adap)
+{
+ struct pt3_board *pt3;
+ void *p;
+ int i, j;
+ int idx, ofs;
+ int num_desc_bufs;
+ dma_addr_t data_addr, desc_addr;
+ struct xfer_desc *d;
+
+ pt3 = adap->dvb_adap.priv;
+ adap->num_bufs = 0;
+ adap->num_desc_bufs = 0;
+ for (i = 0; i < pt3->num_bufs; i++) {
+ p = dma_alloc_coherent(&pt3->pdev->dev, DATA_BUF_SZ,
+ &adap->buffer[i].b_addr, GFP_KERNEL);
+ if (p == NULL)
+ goto failed;
+ adap->buffer[i].data = p;
+ adap->num_bufs++;
+ }
+ pt3_init_dmabuf(adap);
+
+ /* build circular-linked pointers (xfer_desc) to the data buffers*/
+ idx = 0;
+ ofs = 0;
+ num_desc_bufs =
+ DIV_ROUND_UP(adap->num_bufs * DATA_BUF_XFERS, DESCS_IN_PAGE);
+ for (i = 0; i < num_desc_bufs; i++) {
+ p = dma_alloc_coherent(&pt3->pdev->dev, PAGE_SIZE,
+ &desc_addr, GFP_KERNEL);
+ if (p == NULL)
+ goto failed;
+ adap->num_desc_bufs++;
+ adap->desc_buf[i].descs = p;
+ adap->desc_buf[i].b_addr = desc_addr;
+
+ if (i > 0) {
+ d = &adap->desc_buf[i - 1].descs[DESCS_IN_PAGE - 1];
+ d->next_l = lower_32_bits(desc_addr);
+ d->next_h = upper_32_bits(desc_addr);
+ }
+ for (j = 0; j < DESCS_IN_PAGE; j++) {
+ data_addr = adap->buffer[idx].b_addr + ofs;
+ d = &adap->desc_buf[i].descs[j];
+ d->addr_l = lower_32_bits(data_addr);
+ d->addr_h = upper_32_bits(data_addr);
+ d->size = DATA_XFER_SZ;
+
+ desc_addr += sizeof(struct xfer_desc);
+ d->next_l = lower_32_bits(desc_addr);
+ d->next_h = upper_32_bits(desc_addr);
+
+ ofs += DATA_XFER_SZ;
+ if (ofs >= DATA_BUF_SZ) {
+ ofs -= DATA_BUF_SZ;
+ idx++;
+ if (idx >= adap->num_bufs) {
+ desc_addr = adap->desc_buf[0].b_addr;
+ d->next_l = lower_32_bits(desc_addr);
+ d->next_h = upper_32_bits(desc_addr);
+ return 0;
+ }
+ }
+ }
+ }
+ return 0;
+
+failed:
+ pt3_free_dmabuf(adap);
+ return -ENOMEM;
+}
diff --git a/drivers/media/pci/pt3/pt3_i2c.c b/drivers/media/pci/pt3/pt3_i2c.c
new file mode 100644
index 000000000000..ec6a8a2e4744
--- /dev/null
+++ b/drivers/media/pci/pt3/pt3_i2c.c
@@ -0,0 +1,240 @@
+/*
+ * Earthsoft PT3 driver
+ *
+ * Copyright (C) 2014 Akihiro Tsukada <tskd08@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+
+#include "pt3.h"
+
+#define PT3_I2C_BASE 2048
+#define PT3_CMD_ADDR_NORMAL 0
+#define PT3_CMD_ADDR_INIT_DEMOD 4096
+#define PT3_CMD_ADDR_INIT_TUNER (4096 + 2042)
+
+/* masks for I2C status register */
+#define STAT_SEQ_RUNNING 0x1
+#define STAT_SEQ_ERROR 0x6
+#define STAT_NO_SEQ 0x8
+
+#define PT3_I2C_RUN (1 << 16)
+#define PT3_I2C_RESET (1 << 17)
+
+enum ctl_cmd {
+ I_END,
+ I_ADDRESS,
+ I_CLOCK_L,
+ I_CLOCK_H,
+ I_DATA_L,
+ I_DATA_H,
+ I_RESET,
+ I_SLEEP,
+ I_DATA_L_NOP = 0x08,
+ I_DATA_H_NOP = 0x0c,
+ I_DATA_H_READ = 0x0d,
+ I_DATA_H_ACK0 = 0x0e,
+ I_DATA_H_ACK1 = 0x0f,
+};
+
+
+static void cmdbuf_add(struct pt3_i2cbuf *cbuf, enum ctl_cmd cmd)
+{
+ int buf_idx;
+
+ if ((cbuf->num_cmds % 2) == 0)
+ cbuf->tmp = cmd;
+ else {
+ cbuf->tmp |= cmd << 4;
+ buf_idx = cbuf->num_cmds / 2;
+ if (buf_idx < ARRAY_SIZE(cbuf->data))
+ cbuf->data[buf_idx] = cbuf->tmp;
+ }
+ cbuf->num_cmds++;
+}
+
+static void put_end(struct pt3_i2cbuf *cbuf)
+{
+ cmdbuf_add(cbuf, I_END);
+ if (cbuf->num_cmds % 2)
+ cmdbuf_add(cbuf, I_END);
+}
+
+static void put_start(struct pt3_i2cbuf *cbuf)
+{
+ cmdbuf_add(cbuf, I_DATA_H);
+ cmdbuf_add(cbuf, I_CLOCK_H);
+ cmdbuf_add(cbuf, I_DATA_L);
+ cmdbuf_add(cbuf, I_CLOCK_L);
+}
+
+static void put_byte_write(struct pt3_i2cbuf *cbuf, u8 val)
+{
+ u8 mask;
+
+ mask = 0x80;
+ for (mask = 0x80; mask > 0; mask >>= 1)
+ cmdbuf_add(cbuf, (val & mask) ? I_DATA_H_NOP : I_DATA_L_NOP);
+ cmdbuf_add(cbuf, I_DATA_H_ACK0);
+}
+
+static void put_byte_read(struct pt3_i2cbuf *cbuf, u32 size)
+{
+ int i, j;
+
+ for (i = 0; i < size; i++) {
+ for (j = 0; j < 8; j++)
+ cmdbuf_add(cbuf, I_DATA_H_READ);
+ cmdbuf_add(cbuf, (i == size - 1) ? I_DATA_H_NOP : I_DATA_L_NOP);
+ }
+}
+
+static void put_stop(struct pt3_i2cbuf *cbuf)
+{
+ cmdbuf_add(cbuf, I_DATA_L);
+ cmdbuf_add(cbuf, I_CLOCK_H);
+ cmdbuf_add(cbuf, I_DATA_H);
+}
+
+
+/* translates msgs to internal commands for bit-banging */
+static void translate(struct pt3_i2cbuf *cbuf, struct i2c_msg *msgs, int num)
+{
+ int i, j;
+ bool rd;
+
+ cbuf->num_cmds = 0;
+ for (i = 0; i < num; i++) {
+ rd = !!(msgs[i].flags & I2C_M_RD);
+ put_start(cbuf);
+ put_byte_write(cbuf, msgs[i].addr << 1 | rd);
+ if (rd)
+ put_byte_read(cbuf, msgs[i].len);
+ else
+ for (j = 0; j < msgs[i].len; j++)
+ put_byte_write(cbuf, msgs[i].buf[j]);
+ }
+ if (num > 0) {
+ put_stop(cbuf);
+ put_end(cbuf);
+ }
+}
+
+static int wait_i2c_result(struct pt3_board *pt3, u32 *result, int max_wait)
+{
+ int i;
+ u32 v;
+
+ for (i = 0; i < max_wait; i++) {
+ v = ioread32(pt3->regs[0] + REG_I2C_R);
+ if (!(v & STAT_SEQ_RUNNING))
+ break;
+ usleep_range(500, 750);
+ }
+ if (i >= max_wait)
+ return -EIO;
+ if (result)
+ *result = v;
+ return 0;
+}
+
+/* send [pre-]translated i2c msgs stored at addr */
+static int send_i2c_cmd(struct pt3_board *pt3, u32 addr)
+{
+ u32 ret;
+
+ /* make sure that previous transactions had finished */
+ if (wait_i2c_result(pt3, NULL, 50)) {
+ dev_warn(&pt3->pdev->dev, "(%s) prev. transaction stalled\n",
+ __func__);
+ return -EIO;
+ }
+
+ iowrite32(PT3_I2C_RUN | addr, pt3->regs[0] + REG_I2C_W);
+ usleep_range(200, 300);
+ /* wait for the current transaction to finish */
+ if (wait_i2c_result(pt3, &ret, 500) || (ret & STAT_SEQ_ERROR)) {
+ dev_warn(&pt3->pdev->dev, "(%s) failed.\n", __func__);
+ return -EIO;
+ }
+ return 0;
+}
+
+
+/* init commands for each demod are combined into one transaction
+ * and hidden in ROM with the address PT3_CMD_ADDR_INIT_DEMOD.
+ */
+int pt3_init_all_demods(struct pt3_board *pt3)
+{
+ ioread32(pt3->regs[0] + REG_I2C_R);
+ return send_i2c_cmd(pt3, PT3_CMD_ADDR_INIT_DEMOD);
+}
+
+/* init commands for two ISDB-T tuners are hidden in ROM. */
+int pt3_init_all_mxl301rf(struct pt3_board *pt3)
+{
+ usleep_range(1000, 2000);
+ return send_i2c_cmd(pt3, PT3_CMD_ADDR_INIT_TUNER);
+}
+
+void pt3_i2c_reset(struct pt3_board *pt3)
+{
+ iowrite32(PT3_I2C_RESET, pt3->regs[0] + REG_I2C_W);
+}
+
+/*
+ * I2C algorithm
+ */
+int
+pt3_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct pt3_board *pt3;
+ struct pt3_i2cbuf *cbuf;
+ int i;
+ void __iomem *p;
+
+ pt3 = i2c_get_adapdata(adap);
+ cbuf = pt3->i2c_buf;
+
+ for (i = 0; i < num; i++)
+ if (msgs[i].flags & I2C_M_RECV_LEN) {
+ dev_warn(&pt3->pdev->dev,
+ "(%s) I2C_M_RECV_LEN not supported.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ translate(cbuf, msgs, num);
+ memcpy_toio(pt3->regs[1] + PT3_I2C_BASE + PT3_CMD_ADDR_NORMAL / 2,
+ cbuf->data, cbuf->num_cmds);
+
+ if (send_i2c_cmd(pt3, PT3_CMD_ADDR_NORMAL) < 0)
+ return -EIO;
+
+ p = pt3->regs[1] + PT3_I2C_BASE;
+ for (i = 0; i < num; i++)
+ if ((msgs[i].flags & I2C_M_RD) && msgs[i].len > 0) {
+ memcpy_fromio(msgs[i].buf, p, msgs[i].len);
+ p += msgs[i].len;
+ }
+
+ return num;
+}
+
+u32 pt3_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C;
+}
diff --git a/drivers/media/pci/saa7134/Kconfig b/drivers/media/pci/saa7134/Kconfig
index 18ae75546302..b44e0d70907e 100644
--- a/drivers/media/pci/saa7134/Kconfig
+++ b/drivers/media/pci/saa7134/Kconfig
@@ -63,3 +63,11 @@ config VIDEO_SAA7134_DVB
To compile this driver as a module, choose M here: the
module will be called saa7134-dvb.
+
+config VIDEO_SAA7134_GO7007
+ tristate "go7007 support for saa7134 based TV cards"
+ depends on VIDEO_SAA7134
+ depends on VIDEO_GO7007
+ ---help---
+ Enables saa7134 driver support for boards with go7007
+ MPEG encoder (WIS Voyager or compatible).
diff --git a/drivers/media/pci/saa7134/Makefile b/drivers/media/pci/saa7134/Makefile
index 58de9b085689..09c43da67588 100644
--- a/drivers/media/pci/saa7134/Makefile
+++ b/drivers/media/pci/saa7134/Makefile
@@ -5,6 +5,7 @@ saa7134-y += saa7134-video.o
saa7134-$(CONFIG_VIDEO_SAA7134_RC) += saa7134-input.o
obj-$(CONFIG_VIDEO_SAA7134) += saa7134.o saa7134-empress.o
+obj-$(CONFIG_VIDEO_SAA7134_GO7007) += saa7134-go7007.o
obj-$(CONFIG_VIDEO_SAA7134_ALSA) += saa7134-alsa.o
@@ -14,3 +15,4 @@ ccflags-y += -I$(srctree)/drivers/media/i2c
ccflags-y += -I$(srctree)/drivers/media/tuners
ccflags-y += -I$(srctree)/drivers/media/dvb-core
ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
+ccflags-y += -I$(srctree)/drivers/media/usb/go7007
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index 6e4bdb90aa92..3ca078057755 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -5827,6 +5827,29 @@ struct saa7134_board saa7134_boards[] = {
.gpio = 0x0000800,
},
},
+ [SAA7134_BOARD_WIS_VOYAGER] = {
+ .name = "WIS Voyager or compatible",
+ .audio_clock = 0x00200000,
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .mpeg = SAA7134_MPEG_GO7007,
+ .inputs = { {
+ .name = name_comp1,
+ .vmux = 0,
+ .amux = LINE2,
+ }, {
+ .name = name_tv,
+ .vmux = 3,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_svideo,
+ .vmux = 6,
+ .amux = LINE1,
+ } },
+ },
};
@@ -7080,6 +7103,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subdevice = 0x2055, /* AverTV Satellite Hybrid+FM A706 */
.driver_data = SAA7134_BOARD_AVERMEDIA_A706,
}, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x1905, /* WIS */
+ .subdevice = 0x7007,
+ .driver_data = SAA7134_BOARD_WIS_VOYAGER,
+ }, {
/* --- boards without eeprom + subsystem ID --- */
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 9ff03a69ced4..236ed725f933 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -160,6 +160,8 @@ static void request_module_async(struct work_struct *work){
request_module("saa7134-empress");
if (card_is_dvb(dev))
request_module("saa7134-dvb");
+ if (card_is_go7007(dev))
+ request_module("saa7134-go7007");
if (alsa) {
if (dev->pci->device != PCI_DEVICE_ID_PHILIPS_SAA7130)
request_module("saa7134-alsa");
@@ -563,8 +565,12 @@ static irqreturn_t saa7134_irq(int irq, void *dev_id)
saa7134_irq_vbi_done(dev,status);
if ((report & SAA7134_IRQ_REPORT_DONE_RA2) &&
- card_has_mpeg(dev))
- saa7134_irq_ts_done(dev,status);
+ card_has_mpeg(dev)) {
+ if (dev->mops->irq_ts_done != NULL)
+ dev->mops->irq_ts_done(dev, status);
+ else
+ saa7134_irq_ts_done(dev, status);
+ }
if (report & SAA7134_IRQ_REPORT_GPIO16) {
switch (dev->has_remote) {
diff --git a/drivers/media/pci/saa7134/saa7134-go7007.c b/drivers/media/pci/saa7134/saa7134-go7007.c
new file mode 100644
index 000000000000..54e650b4dff1
--- /dev/null
+++ b/drivers/media/pci/saa7134/saa7134-go7007.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright (C) 2005-2006 Micronas USA Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/usb.h>
+#include <linux/i2c.h>
+#include <asm/byteorder.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "saa7134.h"
+#include "saa7134-reg.h"
+#include "go7007-priv.h"
+
+/*#define GO7007_HPI_DEBUG*/
+
+enum hpi_address {
+ HPI_ADDR_VIDEO_BUFFER = 0xe4,
+ HPI_ADDR_INIT_BUFFER = 0xea,
+ HPI_ADDR_INTR_RET_VALUE = 0xee,
+ HPI_ADDR_INTR_RET_DATA = 0xec,
+ HPI_ADDR_INTR_STATUS = 0xf4,
+ HPI_ADDR_INTR_WR_PARAM = 0xf6,
+ HPI_ADDR_INTR_WR_INDEX = 0xf8,
+};
+
+enum gpio_command {
+ GPIO_COMMAND_RESET = 0x00, /* 000b */
+ GPIO_COMMAND_REQ1 = 0x04, /* 001b */
+ GPIO_COMMAND_WRITE = 0x20, /* 010b */
+ GPIO_COMMAND_REQ2 = 0x24, /* 011b */
+ GPIO_COMMAND_READ = 0x80, /* 100b */
+ GPIO_COMMAND_VIDEO = 0x84, /* 101b */
+ GPIO_COMMAND_IDLE = 0xA0, /* 110b */
+ GPIO_COMMAND_ADDR = 0xA4, /* 111b */
+};
+
+struct saa7134_go7007 {
+ struct v4l2_subdev sd;
+ struct saa7134_dev *dev;
+ u8 *top;
+ u8 *bottom;
+ dma_addr_t top_dma;
+ dma_addr_t bottom_dma;
+};
+
+static inline struct saa7134_go7007 *to_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct saa7134_go7007, sd);
+}
+
+static const struct go7007_board_info board_voyager = {
+ .flags = 0,
+ .sensor_flags = GO7007_SENSOR_656 |
+ GO7007_SENSOR_VALID_ENABLE |
+ GO7007_SENSOR_TV |
+ GO7007_SENSOR_VBI,
+ .audio_flags = GO7007_AUDIO_I2S_MODE_1 |
+ GO7007_AUDIO_WORD_16,
+ .audio_rate = 48000,
+ .audio_bclk_div = 8,
+ .audio_main_div = 2,
+ .hpi_buffer_cap = 7,
+ .num_inputs = 1,
+ .inputs = {
+ {
+ .name = "SAA7134",
+ },
+ },
+};
+
+/********************* Driver for GPIO HPI interface *********************/
+
+static int gpio_write(struct saa7134_dev *dev, u8 addr, u16 data)
+{
+ saa_writeb(SAA7134_GPIO_GPMODE0, 0xff);
+
+ /* Write HPI address */
+ saa_writeb(SAA7134_GPIO_GPSTATUS0, addr);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_ADDR);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE);
+
+ /* Write low byte */
+ saa_writeb(SAA7134_GPIO_GPSTATUS0, data & 0xff);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_WRITE);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE);
+
+ /* Write high byte */
+ saa_writeb(SAA7134_GPIO_GPSTATUS0, data >> 8);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_WRITE);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE);
+
+ return 0;
+}
+
+static int gpio_read(struct saa7134_dev *dev, u8 addr, u16 *data)
+{
+ saa_writeb(SAA7134_GPIO_GPMODE0, 0xff);
+
+ /* Write HPI address */
+ saa_writeb(SAA7134_GPIO_GPSTATUS0, addr);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_ADDR);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE);
+
+ saa_writeb(SAA7134_GPIO_GPMODE0, 0x00);
+
+ /* Read low byte */
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_READ);
+ saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+ saa_setb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+ *data = saa_readb(SAA7134_GPIO_GPSTATUS0);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE);
+
+ /* Read high byte */
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_READ);
+ saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+ saa_setb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+ *data |= saa_readb(SAA7134_GPIO_GPSTATUS0) << 8;
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE);
+
+ return 0;
+}
+
+static int saa7134_go7007_interface_reset(struct go7007 *go)
+{
+ struct saa7134_go7007 *saa = go->hpi_context;
+ struct saa7134_dev *dev = saa->dev;
+ u16 intr_val, intr_data;
+ int count = 20;
+
+ saa_clearb(SAA7134_TS_PARALLEL, 0x80); /* Disable TS interface */
+ saa_writeb(SAA7134_GPIO_GPMODE2, 0xa4);
+ saa_writeb(SAA7134_GPIO_GPMODE0, 0xff);
+
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_REQ1);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_RESET);
+ msleep(1);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_REQ1);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_REQ2);
+ msleep(10);
+
+ saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+ saa_setb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+
+ saa_readb(SAA7134_GPIO_GPSTATUS2);
+ /*pr_debug("status is %s\n", saa_readb(SAA7134_GPIO_GPSTATUS2) & 0x40 ? "OK" : "not OK"); */
+
+ /* enter command mode...(?) */
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_REQ1);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_REQ2);
+
+ do {
+ saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+ saa_setb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+ saa_readb(SAA7134_GPIO_GPSTATUS2);
+ /*pr_info("gpio is %08x\n", saa_readl(SAA7134_GPIO_GPSTATUS0 >> 2)); */
+ } while (--count > 0);
+
+ /* Wait for an interrupt to indicate successful hardware reset */
+ if (go7007_read_interrupt(go, &intr_val, &intr_data) < 0 ||
+ (intr_val & ~0x1) != 0x55aa) {
+ pr_err("saa7134-go7007: unable to reset the GO7007\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int saa7134_go7007_write_interrupt(struct go7007 *go, int addr, int data)
+{
+ struct saa7134_go7007 *saa = go->hpi_context;
+ struct saa7134_dev *dev = saa->dev;
+ int i;
+ u16 status_reg;
+
+#ifdef GO7007_HPI_DEBUG
+ pr_debug("saa7134-go7007: WriteInterrupt: %04x %04x\n", addr, data);
+#endif
+
+ for (i = 0; i < 100; ++i) {
+ gpio_read(dev, HPI_ADDR_INTR_STATUS, &status_reg);
+ if (!(status_reg & 0x0010))
+ break;
+ msleep(10);
+ }
+ if (i == 100) {
+ pr_err("saa7134-go7007: device is hung, status reg = 0x%04x\n",
+ status_reg);
+ return -1;
+ }
+ gpio_write(dev, HPI_ADDR_INTR_WR_PARAM, data);
+ gpio_write(dev, HPI_ADDR_INTR_WR_INDEX, addr);
+
+ return 0;
+}
+
+static int saa7134_go7007_read_interrupt(struct go7007 *go)
+{
+ struct saa7134_go7007 *saa = go->hpi_context;
+ struct saa7134_dev *dev = saa->dev;
+
+ /* XXX we need to wait if there is no interrupt available */
+ go->interrupt_available = 1;
+ gpio_read(dev, HPI_ADDR_INTR_RET_VALUE, &go->interrupt_value);
+ gpio_read(dev, HPI_ADDR_INTR_RET_DATA, &go->interrupt_data);
+#ifdef GO7007_HPI_DEBUG
+ pr_debug("saa7134-go7007: ReadInterrupt: %04x %04x\n",
+ go->interrupt_value, go->interrupt_data);
+#endif
+ return 0;
+}
+
+static void saa7134_go7007_irq_ts_done(struct saa7134_dev *dev,
+ unsigned long status)
+{
+ struct go7007 *go = video_get_drvdata(dev->empress_dev);
+ struct saa7134_go7007 *saa = go->hpi_context;
+
+ if (!vb2_is_streaming(&go->vidq))
+ return;
+ if (0 != (status & 0x000f0000))
+ pr_debug("saa7134-go7007: irq: lost %ld\n",
+ (status >> 16) & 0x0f);
+ if (status & 0x100000) {
+ dma_sync_single_for_cpu(&dev->pci->dev,
+ saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ go7007_parse_video_stream(go, saa->bottom, PAGE_SIZE);
+ saa_writel(SAA7134_RS_BA2(5), saa->bottom_dma);
+ } else {
+ dma_sync_single_for_cpu(&dev->pci->dev,
+ saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ go7007_parse_video_stream(go, saa->top, PAGE_SIZE);
+ saa_writel(SAA7134_RS_BA1(5), saa->top_dma);
+ }
+}
+
+static int saa7134_go7007_stream_start(struct go7007 *go)
+{
+ struct saa7134_go7007 *saa = go->hpi_context;
+ struct saa7134_dev *dev = saa->dev;
+
+ saa->top_dma = dma_map_page(&dev->pci->dev, virt_to_page(saa->top),
+ 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&dev->pci->dev, saa->top_dma))
+ return -ENOMEM;
+ saa->bottom_dma = dma_map_page(&dev->pci->dev,
+ virt_to_page(saa->bottom),
+ 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&dev->pci->dev, saa->bottom_dma)) {
+ dma_unmap_page(&dev->pci->dev, saa->top_dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ return -ENOMEM;
+ }
+
+ saa_writel(SAA7134_VIDEO_PORT_CTRL0 >> 2, 0xA300B000);
+ saa_writel(SAA7134_VIDEO_PORT_CTRL4 >> 2, 0x40000200);
+
+ /* Set HPI interface for video */
+ saa_writeb(SAA7134_GPIO_GPMODE0, 0xff);
+ saa_writeb(SAA7134_GPIO_GPSTATUS0, HPI_ADDR_VIDEO_BUFFER);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_ADDR);
+ saa_writeb(SAA7134_GPIO_GPMODE0, 0x00);
+
+ /* Enable TS interface */
+ saa_writeb(SAA7134_TS_PARALLEL, 0xe6);
+
+ /* Reset TS interface */
+ saa_setb(SAA7134_TS_SERIAL1, 0x01);
+ saa_clearb(SAA7134_TS_SERIAL1, 0x01);
+
+ /* Set up transfer block size */
+ saa_writeb(SAA7134_TS_PARALLEL_SERIAL, 128 - 1);
+ saa_writeb(SAA7134_TS_DMA0, (PAGE_SIZE >> 7) - 1);
+ saa_writeb(SAA7134_TS_DMA1, 0);
+ saa_writeb(SAA7134_TS_DMA2, 0);
+
+ /* Enable video streaming mode */
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_VIDEO);
+
+ saa_writel(SAA7134_RS_BA1(5), saa->top_dma);
+ saa_writel(SAA7134_RS_BA2(5), saa->bottom_dma);
+ saa_writel(SAA7134_RS_PITCH(5), 128);
+ saa_writel(SAA7134_RS_CONTROL(5), SAA7134_RS_CONTROL_BURST_MAX);
+
+ /* Enable TS FIFO */
+ saa_setl(SAA7134_MAIN_CTRL, SAA7134_MAIN_CTRL_TE5);
+
+ /* Enable DMA IRQ */
+ saa_setl(SAA7134_IRQ1,
+ SAA7134_IRQ1_INTE_RA2_1 | SAA7134_IRQ1_INTE_RA2_0);
+
+ return 0;
+}
+
+static int saa7134_go7007_stream_stop(struct go7007 *go)
+{
+ struct saa7134_go7007 *saa = go->hpi_context;
+ struct saa7134_dev *dev;
+
+ if (!saa)
+ return -EINVAL;
+ dev = saa->dev;
+ if (!dev)
+ return -EINVAL;
+
+ /* Shut down TS FIFO */
+ saa_clearl(SAA7134_MAIN_CTRL, SAA7134_MAIN_CTRL_TE5);
+
+ /* Disable DMA IRQ */
+ saa_clearl(SAA7134_IRQ1,
+ SAA7134_IRQ1_INTE_RA2_1 | SAA7134_IRQ1_INTE_RA2_0);
+
+ /* Disable TS interface */
+ saa_clearb(SAA7134_TS_PARALLEL, 0x80);
+
+ dma_unmap_page(&dev->pci->dev, saa->top_dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ dma_unmap_page(&dev->pci->dev, saa->bottom_dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+
+ return 0;
+}
+
+static int saa7134_go7007_send_firmware(struct go7007 *go, u8 *data, int len)
+{
+ struct saa7134_go7007 *saa = go->hpi_context;
+ struct saa7134_dev *dev = saa->dev;
+ u16 status_reg;
+ int i;
+
+#ifdef GO7007_HPI_DEBUG
+ pr_debug("saa7134-go7007: DownloadBuffer sending %d bytes\n", len);
+#endif
+
+ while (len > 0) {
+ i = len > 64 ? 64 : len;
+ saa_writeb(SAA7134_GPIO_GPMODE0, 0xff);
+ saa_writeb(SAA7134_GPIO_GPSTATUS0, HPI_ADDR_INIT_BUFFER);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_ADDR);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE);
+ while (i-- > 0) {
+ saa_writeb(SAA7134_GPIO_GPSTATUS0, *data);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_WRITE);
+ saa_writeb(SAA7134_GPIO_GPSTATUS2, GPIO_COMMAND_IDLE);
+ ++data;
+ --len;
+ }
+ for (i = 0; i < 100; ++i) {
+ gpio_read(dev, HPI_ADDR_INTR_STATUS, &status_reg);
+ if (!(status_reg & 0x0002))
+ break;
+ }
+ if (i == 100) {
+ pr_err("saa7134-go7007: device is hung, status reg = 0x%04x\n",
+ status_reg);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static struct go7007_hpi_ops saa7134_go7007_hpi_ops = {
+ .interface_reset = saa7134_go7007_interface_reset,
+ .write_interrupt = saa7134_go7007_write_interrupt,
+ .read_interrupt = saa7134_go7007_read_interrupt,
+ .stream_start = saa7134_go7007_stream_start,
+ .stream_stop = saa7134_go7007_stream_stop,
+ .send_firmware = saa7134_go7007_send_firmware,
+};
+MODULE_FIRMWARE("go7007/go7007tv.bin");
+
+/* --------------------------------------------------------------------------*/
+
+static int saa7134_go7007_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
+{
+#if 0
+ struct saa7134_go7007 *saa = to_state(sd);
+ struct saa7134_dev *dev = saa->dev;
+
+ return saa7134_s_std_internal(dev, NULL, norm);
+#else
+ return 0;
+#endif
+}
+
+static const struct v4l2_subdev_video_ops saa7134_go7007_video_ops = {
+ .s_std = saa7134_go7007_s_std,
+};
+
+static const struct v4l2_subdev_ops saa7134_go7007_sd_ops = {
+ .video = &saa7134_go7007_video_ops,
+};
+
+/* --------------------------------------------------------------------------*/
+
+
+/********************* Add/remove functions *********************/
+
+static int saa7134_go7007_init(struct saa7134_dev *dev)
+{
+ struct go7007 *go;
+ struct saa7134_go7007 *saa;
+ struct v4l2_subdev *sd;
+
+ pr_debug("saa7134-go7007: probing new SAA713X board\n");
+
+ go = go7007_alloc(&board_voyager, &dev->pci->dev);
+ if (go == NULL)
+ return -ENOMEM;
+
+ saa = kzalloc(sizeof(struct saa7134_go7007), GFP_KERNEL);
+ if (saa == NULL) {
+ kfree(go);
+ return -ENOMEM;
+ }
+
+ go->board_id = GO7007_BOARDID_PCI_VOYAGER;
+ snprintf(go->bus_info, sizeof(go->bus_info), "PCI:%s", pci_name(dev->pci));
+ strlcpy(go->name, saa7134_boards[dev->board].name, sizeof(go->name));
+ go->hpi_ops = &saa7134_go7007_hpi_ops;
+ go->hpi_context = saa;
+ saa->dev = dev;
+
+ /* Init the subdevice interface */
+ sd = &saa->sd;
+ v4l2_subdev_init(sd, &saa7134_go7007_sd_ops);
+ v4l2_set_subdevdata(sd, saa);
+ strncpy(sd->name, "saa7134-go7007", sizeof(sd->name));
+
+ /* Allocate a couple pages for receiving the compressed stream */
+ saa->top = (u8 *)get_zeroed_page(GFP_KERNEL);
+ if (!saa->top)
+ goto allocfail;
+ saa->bottom = (u8 *)get_zeroed_page(GFP_KERNEL);
+ if (!saa->bottom)
+ goto allocfail;
+
+ /* Boot the GO7007 */
+ if (go7007_boot_encoder(go, go->board_info->flags &
+ GO7007_BOARD_USE_ONBOARD_I2C) < 0)
+ goto allocfail;
+
+ /* Do any final GO7007 initialization, then register the
+ * V4L2 and ALSA interfaces */
+ if (go7007_register_encoder(go, go->board_info->num_i2c_devs) < 0)
+ goto allocfail;
+
+ /* Register the subdevice interface with the go7007 device */
+ if (v4l2_device_register_subdev(&go->v4l2_dev, sd) < 0)
+ pr_info("saa7134-go7007: register subdev failed\n");
+
+ dev->empress_dev = &go->vdev;
+
+ go->status = STATUS_ONLINE;
+ return 0;
+
+allocfail:
+ if (saa->top)
+ free_page((unsigned long)saa->top);
+ if (saa->bottom)
+ free_page((unsigned long)saa->bottom);
+ kfree(saa);
+ kfree(go);
+ return -ENOMEM;
+}
+
+static int saa7134_go7007_fini(struct saa7134_dev *dev)
+{
+ struct go7007 *go;
+ struct saa7134_go7007 *saa;
+
+ if (NULL == dev->empress_dev)
+ return 0;
+
+ go = video_get_drvdata(dev->empress_dev);
+ if (go->audio_enabled)
+ go7007_snd_remove(go);
+
+ saa = go->hpi_context;
+ go->status = STATUS_SHUTDOWN;
+ free_page((unsigned long)saa->top);
+ free_page((unsigned long)saa->bottom);
+ v4l2_device_unregister_subdev(&saa->sd);
+ kfree(saa);
+ video_unregister_device(&go->vdev);
+
+ v4l2_device_put(&go->v4l2_dev);
+ dev->empress_dev = NULL;
+
+ return 0;
+}
+
+static struct saa7134_mpeg_ops saa7134_go7007_ops = {
+ .type = SAA7134_MPEG_GO7007,
+ .init = saa7134_go7007_init,
+ .fini = saa7134_go7007_fini,
+ .irq_ts_done = saa7134_go7007_irq_ts_done,
+};
+
+static int __init saa7134_go7007_mod_init(void)
+{
+ return saa7134_ts_register(&saa7134_go7007_ops);
+}
+
+static void __exit saa7134_go7007_mod_cleanup(void)
+{
+ saa7134_ts_unregister(&saa7134_go7007_ops);
+}
+
+module_init(saa7134_go7007_mod_init);
+module_exit(saa7134_go7007_mod_cleanup);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index c06dbe17a87f..4f0b1012e4f3 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -43,7 +43,7 @@ MODULE_PARM_DESC(vbibufs,"number of vbi buffers, range 2-32");
/* ------------------------------------------------------------------ */
-#define VBI_LINE_COUNT 16
+#define VBI_LINE_COUNT 17
#define VBI_LINE_LENGTH 2048
#define VBI_SCALE 0x200
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 0cfa2ca6a32a..fc4a427cb51f 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -201,7 +201,7 @@ static struct saa7134_format formats[] = {
.video_v_start = 24, \
.video_v_stop = 311, \
.vbi_v_start_0 = 7, \
- .vbi_v_stop_0 = 22, \
+ .vbi_v_stop_0 = 23, \
.vbi_v_start_1 = 319, \
.src_timing = 4
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index e47edd4b57ce..1a82dd07205b 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -338,6 +338,7 @@ struct saa7134_card_ir {
#define SAA7134_BOARD_ASUSTeK_PS3_100 190
#define SAA7134_BOARD_HAWELL_HW_9004V1 191
#define SAA7134_BOARD_AVERMEDIA_A706 192
+#define SAA7134_BOARD_WIS_VOYAGER 193
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
@@ -368,6 +369,7 @@ enum saa7134_mpeg_type {
SAA7134_MPEG_UNUSED,
SAA7134_MPEG_EMPRESS,
SAA7134_MPEG_DVB,
+ SAA7134_MPEG_GO7007,
};
enum saa7134_mpeg_ts_type {
@@ -407,6 +409,7 @@ struct saa7134_board {
#define card_has_radio(dev) (NULL != saa7134_boards[dev->board].radio.name)
#define card_is_empress(dev) (SAA7134_MPEG_EMPRESS == saa7134_boards[dev->board].mpeg)
#define card_is_dvb(dev) (SAA7134_MPEG_DVB == saa7134_boards[dev->board].mpeg)
+#define card_is_go7007(dev) (SAA7134_MPEG_GO7007 == saa7134_boards[dev->board].mpeg)
#define card_has_mpeg(dev) (SAA7134_MPEG_UNUSED != saa7134_boards[dev->board].mpeg)
#define card(dev) (saa7134_boards[dev->board])
#define card_in(dev,n) (saa7134_boards[dev->board].inputs[n])
@@ -522,6 +525,8 @@ struct saa7134_mpeg_ops {
int (*init)(struct saa7134_dev *dev);
int (*fini)(struct saa7134_dev *dev);
void (*signal_change)(struct saa7134_dev *dev);
+ void (*irq_ts_done)(struct saa7134_dev *dev,
+ unsigned long status);
};
/* global device status */
diff --git a/drivers/media/pci/saa7164/saa7164-api.c b/drivers/media/pci/saa7164/saa7164-api.c
index e042963d377d..4f3b1dd18ba4 100644
--- a/drivers/media/pci/saa7164/saa7164-api.c
+++ b/drivers/media/pci/saa7164/saa7164-api.c
@@ -680,7 +680,6 @@ static int saa7164_api_set_dif(struct saa7164_port *port, u8 reg, u8 val)
int saa7164_api_configure_dif(struct saa7164_port *port, u32 std)
{
struct saa7164_dev *dev = port->dev;
- int ret = 0;
u8 agc_disable;
dprintk(DBGLVL_API, "%s(nr=%d, 0x%x)\n", __func__, port->nr, std);
@@ -733,7 +732,7 @@ int saa7164_api_configure_dif(struct saa7164_port *port, u32 std)
saa7164_api_set_dif(port, 0x04, 0x00); /* Active (again) */
msleep(100);
- return ret;
+ return 0;
}
/* Ensure the dif is in the correct state for the operating mode
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 1bf06970ca3e..cc1be8a7a451 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -52,7 +52,7 @@ unsigned int saa_debug;
module_param_named(debug, saa_debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
-unsigned int fw_debug;
+static unsigned int fw_debug;
module_param(fw_debug, int, 0644);
MODULE_PARM_DESC(fw_debug, "Firmware debug level def:2");
@@ -72,7 +72,7 @@ static unsigned int card[] = {[0 ... (SAA7164_MAXBOARDS - 1)] = UNSET };
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "card type");
-unsigned int print_histogram = 64;
+static unsigned int print_histogram = 64;
module_param(print_histogram, int, 0644);
MODULE_PARM_DESC(print_histogram, "print histogram values once");
@@ -80,7 +80,7 @@ unsigned int crc_checking = 1;
module_param(crc_checking, int, 0644);
MODULE_PARM_DESC(crc_checking, "enable crc sanity checking on buffers");
-unsigned int guard_checking = 1;
+static unsigned int guard_checking = 1;
module_param(guard_checking, int, 0644);
MODULE_PARM_DESC(guard_checking,
"enable dma sanity checking for buffer overruns");
diff --git a/drivers/media/pci/solo6x10/Kconfig b/drivers/media/pci/solo6x10/Kconfig
index d9e06a6bf1eb..0fb91dc7ca73 100644
--- a/drivers/media/pci/solo6x10/Kconfig
+++ b/drivers/media/pci/solo6x10/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_SOLO6X10
tristate "Bluecherry / Softlogic 6x10 capture cards (MPEG-4/H.264)"
depends on PCI && VIDEO_DEV && SND && I2C
+ depends on HAS_DMA
select BITREVERSE
select FONT_SUPPORT
select FONT_8x16
diff --git a/drivers/media/pci/solo6x10/solo6x10-disp.c b/drivers/media/pci/solo6x10/solo6x10-disp.c
index 5ea9cac03968..11c98f0625e4 100644
--- a/drivers/media/pci/solo6x10/solo6x10-disp.c
+++ b/drivers/media/pci/solo6x10/solo6x10-disp.c
@@ -172,7 +172,7 @@ static void solo_vout_config(struct solo_dev *solo_dev)
static int solo_dma_vin_region(struct solo_dev *solo_dev, u32 off,
u16 val, int reg_size)
{
- u16 *buf;
+ __le16 *buf;
const int n = 64, size = n * sizeof(*buf);
int i, ret = 0;
@@ -211,7 +211,7 @@ int solo_set_motion_block(struct solo_dev *solo_dev, u8 ch,
{
const unsigned size = sizeof(u16) * 64;
u32 off = SOLO_MOT_FLAG_AREA + ch * SOLO_MOT_THRESH_SIZE * 2;
- u16 *buf;
+ __le16 *buf;
int x, y;
int ret = 0;
diff --git a/drivers/media/pci/solo6x10/solo6x10-eeprom.c b/drivers/media/pci/solo6x10/solo6x10-eeprom.c
index af40b3aba410..da25ce4a6952 100644
--- a/drivers/media/pci/solo6x10/solo6x10-eeprom.c
+++ b/drivers/media/pci/solo6x10/solo6x10-eeprom.c
@@ -100,7 +100,7 @@ unsigned int solo_eeprom_ewen(struct solo_dev *solo_dev, int w_en)
return retval;
}
-unsigned short solo_eeprom_read(struct solo_dev *solo_dev, int loc)
+__be16 solo_eeprom_read(struct solo_dev *solo_dev, int loc)
{
int read_cmd = loc | (EE_READ_CMD << ADDR_LEN);
unsigned short retval = 0;
@@ -117,11 +117,11 @@ unsigned short solo_eeprom_read(struct solo_dev *solo_dev, int loc)
solo_eeprom_reg_write(solo_dev, ~EE_CS);
- return retval;
+ return (__force __be16)retval;
}
int solo_eeprom_write(struct solo_dev *solo_dev, int loc,
- unsigned short data)
+ __be16 data)
{
int write_cmd = loc | (EE_WRITE_CMD << ADDR_LEN);
unsigned int retval;
@@ -130,7 +130,7 @@ int solo_eeprom_write(struct solo_dev *solo_dev, int loc,
solo_eeprom_cmd(solo_dev, write_cmd);
for (i = 15; i >= 0; i--) {
- unsigned int dataval = (data >> i) & 1;
+ unsigned int dataval = ((__force unsigned)data >> i) & 1;
solo_eeprom_reg_write(solo_dev, EE_ENB);
solo_eeprom_reg_write(solo_dev,
diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
index c6154b00fcbd..72017b7f0a75 100644
--- a/drivers/media/pci/solo6x10/solo6x10.h
+++ b/drivers/media/pci/solo6x10/solo6x10.h
@@ -394,9 +394,9 @@ int solo_osd_print(struct solo_enc_dev *solo_enc);
/* EEPROM commands */
unsigned int solo_eeprom_ewen(struct solo_dev *solo_dev, int w_en);
-unsigned short solo_eeprom_read(struct solo_dev *solo_dev, int loc);
+__be16 solo_eeprom_read(struct solo_dev *solo_dev, int loc);
int solo_eeprom_write(struct solo_dev *solo_dev, int loc,
- unsigned short data);
+ __be16 data);
/* JPEG Qp functions */
void solo_s_jpeg_qp(struct solo_dev *solo_dev, unsigned int ch,
diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
index 03130157db83..f6f30abc088b 100644
--- a/drivers/media/pci/sta2x11/Kconfig
+++ b/drivers/media/pci/sta2x11/Kconfig
@@ -1,6 +1,7 @@
config STA2X11_VIP
tristate "STA2X11 VIP Video For Linux"
depends on STA2X11
+ depends on HAS_DMA
select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
select VIDEOBUF2_DMA_CONTIG
depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 365bd21301ba..22450f583da1 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -152,7 +152,7 @@ struct sta2x11_vip {
int tcount, bcount;
int overflow;
- void *iomem; /* I/O Memory */
+ void __iomem *iomem; /* I/O Memory */
struct vip_config *config;
};
diff --git a/drivers/media/pci/ttpci/Kconfig b/drivers/media/pci/ttpci/Kconfig
index 0dcb8cd77676..7b83151ed6c4 100644
--- a/drivers/media/pci/ttpci/Kconfig
+++ b/drivers/media/pci/ttpci/Kconfig
@@ -1,8 +1,12 @@
+config DVB_AV7110_IR
+ bool
+
config DVB_AV7110
tristate "AV7110 cards"
depends on DVB_CORE && PCI && I2C
select TTPCI_EEPROM
select VIDEO_SAA7146_VV
+ select DVB_AV7110_IR if INPUT_EVDEV=y || INPUT_EVDEV=DVB_AV7110
depends on VIDEO_DEV # dependencies of VIDEO_SAA7146_VV
select DVB_VES1820 if MEDIA_SUBDRV_AUTOSELECT
select DVB_VES1X93 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/pci/ttpci/Makefile b/drivers/media/pci/ttpci/Makefile
index 98905963ff08..49f71b1eaf14 100644
--- a/drivers/media/pci/ttpci/Makefile
+++ b/drivers/media/pci/ttpci/Makefile
@@ -5,7 +5,7 @@
dvb-ttpci-objs := av7110_hw.o av7110_v4l.o av7110_av.o av7110_ca.o av7110.o av7110_ipack.o
-ifdef CONFIG_INPUT_EVDEV
+ifdef CONFIG_DVB_AV7110_IR
dvb-ttpci-objs += av7110_ir.o
endif
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index f38329d29daa..c1f0617a6973 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -235,7 +235,7 @@ static void recover_arm(struct av7110 *av7110)
restart_feeds(av7110);
-#if IS_ENABLED(CONFIG_INPUT_EVDEV)
+#if IS_ENABLED(CONFIG_DVB_AV7110_IR)
av7110_check_ir_config(av7110, true);
#endif
}
@@ -268,7 +268,7 @@ static int arm_thread(void *data)
if (!av7110->arm_ready)
continue;
-#if IS_ENABLED(CONFIG_INPUT_EVDEV)
+#if IS_ENABLED(CONFIG_DVB_AV7110_IR)
av7110_check_ir_config(av7110, false);
#endif
@@ -2725,7 +2725,7 @@ static int av7110_attach(struct saa7146_dev* dev,
mutex_init(&av7110->ioctl_mutex);
-#if IS_ENABLED(CONFIG_INPUT_EVDEV)
+#if IS_ENABLED(CONFIG_DVB_AV7110_IR)
av7110_ir_init(av7110);
#endif
printk(KERN_INFO "dvb-ttpci: found av7110-%d.\n", av7110_num);
@@ -2768,7 +2768,7 @@ static int av7110_detach(struct saa7146_dev* saa)
struct av7110 *av7110 = saa->ext_priv;
dprintk(4, "%p\n", av7110);
-#if IS_ENABLED(CONFIG_INPUT_EVDEV)
+#if IS_ENABLED(CONFIG_DVB_AV7110_IR)
av7110_ir_exit(av7110);
#endif
if (budgetpatch || av7110->full_ts) {
diff --git a/drivers/media/pci/tw68/Kconfig b/drivers/media/pci/tw68/Kconfig
new file mode 100644
index 000000000000..5425ba1e320d
--- /dev/null
+++ b/drivers/media/pci/tw68/Kconfig
@@ -0,0 +1,10 @@
+config VIDEO_TW68
+ tristate "Techwell tw68x Video For Linux"
+ depends on VIDEO_DEV && PCI && VIDEO_V4L2
+ select I2C_ALGOBIT
+ select VIDEOBUF2_DMA_SG
+ ---help---
+ Support for Techwell tw68xx based frame grabber boards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tw68.
diff --git a/drivers/media/pci/tw68/Makefile b/drivers/media/pci/tw68/Makefile
new file mode 100644
index 000000000000..3d02f28b14fb
--- /dev/null
+++ b/drivers/media/pci/tw68/Makefile
@@ -0,0 +1,3 @@
+tw68-objs := tw68-core.o tw68-video.o tw68-risc.o
+
+obj-$(CONFIG_VIDEO_TW68) += tw68.o
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
new file mode 100644
index 000000000000..a6fb48cf7aae
--- /dev/null
+++ b/drivers/media/pci/tw68/tw68-core.c
@@ -0,0 +1,434 @@
+/*
+ * tw68-core.c
+ * Core functions for the Techwell 68xx driver
+ *
+ * Much of this code is derived from the cx88 and sa7134 drivers, which
+ * were in turn derived from the bt87x driver. The original work was by
+ * Gerd Knorr; more recently the code was enhanced by Mauro Carvalho Chehab,
+ * Hans Verkuil, Andy Walls and many others. Their work is gratefully
+ * acknowledged. Full credit goes to them - any problems within this code
+ * are mine.
+ *
+ * Copyright (C) 2009 William M. Brack
+ *
+ * Refactored and updated to the latest v4l core frameworks:
+ *
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/kmod.h>
+#include <linux/sound.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm.h>
+
+#include <media/v4l2-dev.h>
+#include "tw68.h"
+#include "tw68-reg.h"
+
+MODULE_DESCRIPTION("v4l2 driver module for tw6800 based video capture cards");
+MODULE_AUTHOR("William M. Brack");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_LICENSE("GPL");
+
+static unsigned int latency = UNSET;
+module_param(latency, int, 0444);
+MODULE_PARM_DESC(latency, "pci latency timer");
+
+static unsigned int video_nr[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
+module_param_array(video_nr, int, NULL, 0444);
+MODULE_PARM_DESC(video_nr, "video device number");
+
+static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
+module_param_array(card, int, NULL, 0444);
+MODULE_PARM_DESC(card, "card type");
+
+static atomic_t tw68_instance = ATOMIC_INIT(0);
+
+/* ------------------------------------------------------------------ */
+
+/*
+ * Please add any new PCI IDs to: http://pci-ids.ucw.cz. This keeps
+ * the PCI ID database up to date. Note that the entries must be
+ * added under vendor 0x1797 (Techwell Inc.) as subsystem IDs.
+ */
+static const struct pci_device_id tw68_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6800)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6801)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6804)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6816_1)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6816_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6816_3)},
+ {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_6816_4)},
+ {0,}
+};
+
+/* ------------------------------------------------------------------ */
+
+
+/*
+ * The device is given a "soft reset". According to the specifications,
+ * after this "all register content remain unchanged", so we also write
+ * to all specified registers manually as well (mostly to manufacturer's
+ * specified reset values)
+ */
+static int tw68_hw_init1(struct tw68_dev *dev)
+{
+ /* Assure all interrupts are disabled */
+ tw_writel(TW68_INTMASK, 0); /* 020 */
+ /* Clear any pending interrupts */
+ tw_writel(TW68_INTSTAT, 0xffffffff); /* 01C */
+ /* Stop risc processor, set default buffer level */
+ tw_writel(TW68_DMAC, 0x1600);
+
+ tw_writeb(TW68_ACNTL, 0x80); /* 218 soft reset */
+ msleep(100);
+
+ tw_writeb(TW68_INFORM, 0x40); /* 208 mux0, 27mhz xtal */
+ tw_writeb(TW68_OPFORM, 0x04); /* 20C analog line-lock */
+ tw_writeb(TW68_HSYNC, 0); /* 210 color-killer high sens */
+ tw_writeb(TW68_ACNTL, 0x42); /* 218 int vref #2, chroma adc off */
+
+ tw_writeb(TW68_CROP_HI, 0x02); /* 21C Hactive m.s. bits */
+ tw_writeb(TW68_VDELAY_LO, 0x12);/* 220 Mfg specified reset value */
+ tw_writeb(TW68_VACTIVE_LO, 0xf0);
+ tw_writeb(TW68_HDELAY_LO, 0x0f);
+ tw_writeb(TW68_HACTIVE_LO, 0xd0);
+
+ tw_writeb(TW68_CNTRL1, 0xcd); /* 230 Wide Chroma BPF B/W
+ * Secam reduction, Adap comb for
+ * NTSC, Op Mode 1 */
+
+ tw_writeb(TW68_VSCALE_LO, 0); /* 234 */
+ tw_writeb(TW68_SCALE_HI, 0x11); /* 238 */
+ tw_writeb(TW68_HSCALE_LO, 0); /* 23c */
+ tw_writeb(TW68_BRIGHT, 0); /* 240 */
+ tw_writeb(TW68_CONTRAST, 0x5c); /* 244 */
+ tw_writeb(TW68_SHARPNESS, 0x51);/* 248 */
+ tw_writeb(TW68_SAT_U, 0x80); /* 24C */
+ tw_writeb(TW68_SAT_V, 0x80); /* 250 */
+ tw_writeb(TW68_HUE, 0x00); /* 254 */
+
+ /* TODO - Check that none of these are set by control defaults */
+ tw_writeb(TW68_SHARP2, 0x53); /* 258 Mfg specified reset val */
+ tw_writeb(TW68_VSHARP, 0x80); /* 25C Sharpness Coring val 8 */
+ tw_writeb(TW68_CORING, 0x44); /* 260 CTI and Vert Peak coring */
+ tw_writeb(TW68_CNTRL2, 0x00); /* 268 No power saving enabled */
+ tw_writeb(TW68_SDT, 0x07); /* 270 Enable shadow reg, auto-det */
+ tw_writeb(TW68_SDTR, 0x7f); /* 274 All stds recog, don't start */
+ tw_writeb(TW68_CLMPG, 0x50); /* 280 Clamp end at 40 sys clocks */
+ tw_writeb(TW68_IAGC, 0x22); /* 284 Mfg specified reset val */
+ tw_writeb(TW68_AGCGAIN, 0xf0); /* 288 AGC gain when loop disabled */
+ tw_writeb(TW68_PEAKWT, 0xd8); /* 28C White peak threshold */
+ tw_writeb(TW68_CLMPL, 0x3c); /* 290 Y channel clamp level */
+/* tw_writeb(TW68_SYNCT, 0x38);*/ /* 294 Sync amplitude */
+ tw_writeb(TW68_SYNCT, 0x30); /* 294 Sync amplitude */
+ tw_writeb(TW68_MISSCNT, 0x44); /* 298 Horiz sync, VCR detect sens */
+ tw_writeb(TW68_PCLAMP, 0x28); /* 29C Clamp pos from PLL sync */
+ /* Bit DETV of VCNTL1 helps sync multi cams/chip board */
+ tw_writeb(TW68_VCNTL1, 0x04); /* 2A0 */
+ tw_writeb(TW68_VCNTL2, 0); /* 2A4 */
+ tw_writeb(TW68_CKILL, 0x68); /* 2A8 Mfg specified reset val */
+ tw_writeb(TW68_COMB, 0x44); /* 2AC Mfg specified reset val */
+ tw_writeb(TW68_LDLY, 0x30); /* 2B0 Max positive luma delay */
+ tw_writeb(TW68_MISC1, 0x14); /* 2B4 Mfg specified reset val */
+ tw_writeb(TW68_LOOP, 0xa5); /* 2B8 Mfg specified reset val */
+ tw_writeb(TW68_MISC2, 0xe0); /* 2BC Enable colour killer */
+ tw_writeb(TW68_MVSN, 0); /* 2C0 */
+ tw_writeb(TW68_CLMD, 0x05); /* 2CC slice level auto, clamp med. */
+ tw_writeb(TW68_IDCNTL, 0); /* 2D0 Writing zero to this register
+ * selects NTSC ID detection,
+ * but doesn't change the
+ * sensitivity (which has a reset
+ * value of 1E). Since we are
+ * not doing auto-detection, it
+ * has no real effect */
+ tw_writeb(TW68_CLCNTL1, 0); /* 2D4 */
+ tw_writel(TW68_VBIC, 0x03); /* 010 */
+ tw_writel(TW68_CAP_CTL, 0x03); /* 040 Enable both even & odd flds */
+ tw_writel(TW68_DMAC, 0x2000); /* patch set had 0x2080 */
+ tw_writel(TW68_TESTREG, 0); /* 02C */
+
+ /*
+ * Some common boards, especially inexpensive single-chip models,
+ * use the GPIO bits 0-3 to control an on-board video-output mux.
+ * For these boards, we need to set up the GPIO register into
+ * "normal" mode, set bits 0-3 as output, and then set those bits
+ * zero.
+ *
+ * Eventually, it would be nice if we could identify these boards
+ * uniquely, and only do this initialisation if the board has been
+ * identify. For the moment, however, it shouldn't hurt anything
+ * to do these steps.
+ */
+ tw_writel(TW68_GPIOC, 0); /* Set the GPIO to "normal", no ints */
+ tw_writel(TW68_GPOE, 0x0f); /* Set bits 0-3 to "output" */
+ tw_writel(TW68_GPDATA, 0); /* Set all bits to low state */
+
+ /* Initialize the device control structures */
+ mutex_init(&dev->lock);
+ spin_lock_init(&dev->slock);
+
+ /* Initialize any subsystems */
+ tw68_video_init1(dev);
+ return 0;
+}
+
+static irqreturn_t tw68_irq(int irq, void *dev_id)
+{
+ struct tw68_dev *dev = dev_id;
+ u32 status, orig;
+ int loop;
+
+ status = orig = tw_readl(TW68_INTSTAT) & dev->pci_irqmask;
+ /* Check if anything to do */
+ if (0 == status)
+ return IRQ_NONE; /* Nope - return */
+ for (loop = 0; loop < 10; loop++) {
+ if (status & dev->board_virqmask) /* video interrupt */
+ tw68_irq_video_done(dev, status);
+ status = tw_readl(TW68_INTSTAT) & dev->pci_irqmask;
+ if (0 == status)
+ return IRQ_HANDLED;
+ }
+ dev_dbg(&dev->pci->dev, "%s: **** INTERRUPT NOT HANDLED - clearing mask (orig 0x%08x, cur 0x%08x)",
+ dev->name, orig, tw_readl(TW68_INTSTAT));
+ dev_dbg(&dev->pci->dev, "%s: pci_irqmask 0x%08x; board_virqmask 0x%08x ****\n",
+ dev->name, dev->pci_irqmask, dev->board_virqmask);
+ tw_clearl(TW68_INTMASK, dev->pci_irqmask);
+ return IRQ_HANDLED;
+}
+
+static int tw68_initdev(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
+{
+ struct tw68_dev *dev;
+ int vidnr = -1;
+ int err;
+
+ dev = devm_kzalloc(&pci_dev->dev, sizeof(*dev), GFP_KERNEL);
+ if (NULL == dev)
+ return -ENOMEM;
+
+ dev->instance = v4l2_device_set_name(&dev->v4l2_dev, "tw68",
+ &tw68_instance);
+
+ err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
+ if (err)
+ return err;
+
+ /* pci init */
+ dev->pci = pci_dev;
+ if (pci_enable_device(pci_dev)) {
+ err = -EIO;
+ goto fail1;
+ }
+
+ dev->name = dev->v4l2_dev.name;
+
+ if (UNSET != latency) {
+ pr_info("%s: setting pci latency timer to %d\n",
+ dev->name, latency);
+ pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
+ }
+
+ /* print pci info */
+ pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
+ pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
+ pr_info("%s: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+ dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
+ dev->pci_lat, (u64)pci_resource_start(pci_dev, 0));
+ pci_set_master(pci_dev);
+ if (!pci_dma_supported(pci_dev, DMA_BIT_MASK(32))) {
+ pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name);
+ err = -EIO;
+ goto fail1;
+ }
+
+ switch (pci_id->device) {
+ case PCI_DEVICE_ID_6800: /* TW6800 */
+ dev->vdecoder = TW6800;
+ dev->board_virqmask = TW68_VID_INTS;
+ break;
+ case PCI_DEVICE_ID_6801: /* Video decoder for TW6802 */
+ dev->vdecoder = TW6801;
+ dev->board_virqmask = TW68_VID_INTS | TW68_VID_INTSX;
+ break;
+ case PCI_DEVICE_ID_6804: /* Video decoder for TW6804 */
+ dev->vdecoder = TW6804;
+ dev->board_virqmask = TW68_VID_INTS | TW68_VID_INTSX;
+ break;
+ default:
+ dev->vdecoder = TWXXXX; /* To be announced */
+ dev->board_virqmask = TW68_VID_INTS | TW68_VID_INTSX;
+ break;
+ }
+
+ /* get mmio */
+ if (!request_mem_region(pci_resource_start(pci_dev, 0),
+ pci_resource_len(pci_dev, 0),
+ dev->name)) {
+ err = -EBUSY;
+ pr_err("%s: can't get MMIO memory @ 0x%llx\n",
+ dev->name,
+ (unsigned long long)pci_resource_start(pci_dev, 0));
+ goto fail1;
+ }
+ dev->lmmio = ioremap(pci_resource_start(pci_dev, 0),
+ pci_resource_len(pci_dev, 0));
+ dev->bmmio = (__u8 __iomem *)dev->lmmio;
+ if (NULL == dev->lmmio) {
+ err = -EIO;
+ pr_err("%s: can't ioremap() MMIO memory\n",
+ dev->name);
+ goto fail2;
+ }
+ /* initialize hardware #1 */
+ /* Then do any initialisation wanted before interrupts are on */
+ tw68_hw_init1(dev);
+
+ /* get irq */
+ err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw68_irq,
+ IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+ if (err < 0) {
+ pr_err("%s: can't get IRQ %d\n",
+ dev->name, pci_dev->irq);
+ goto fail3;
+ }
+
+ /*
+ * Now do remainder of initialisation, first for
+ * things unique for this card, then for general board
+ */
+ if (dev->instance < TW68_MAXBOARDS)
+ vidnr = video_nr[dev->instance];
+ /* initialise video function first */
+ err = tw68_video_init2(dev, vidnr);
+ if (err < 0) {
+ pr_err("%s: can't register video device\n",
+ dev->name);
+ goto fail4;
+ }
+ tw_setl(TW68_INTMASK, dev->pci_irqmask);
+
+ pr_info("%s: registered device %s\n",
+ dev->name, video_device_node_name(&dev->vdev));
+
+ return 0;
+
+fail4:
+ video_unregister_device(&dev->vdev);
+fail3:
+ iounmap(dev->lmmio);
+fail2:
+ release_mem_region(pci_resource_start(pci_dev, 0),
+ pci_resource_len(pci_dev, 0));
+fail1:
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return err;
+}
+
+static void tw68_finidev(struct pci_dev *pci_dev)
+{
+ struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
+ struct tw68_dev *dev =
+ container_of(v4l2_dev, struct tw68_dev, v4l2_dev);
+
+ /* shutdown subsystems */
+ tw_clearl(TW68_DMAC, TW68_DMAP_EN | TW68_FIFO_EN);
+ tw_writel(TW68_INTMASK, 0);
+
+ /* unregister */
+ video_unregister_device(&dev->vdev);
+ v4l2_ctrl_handler_free(&dev->hdl);
+
+ /* release resources */
+ iounmap(dev->lmmio);
+ release_mem_region(pci_resource_start(pci_dev, 0),
+ pci_resource_len(pci_dev, 0));
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+}
+
+#ifdef CONFIG_PM
+
+static int tw68_suspend(struct pci_dev *pci_dev , pm_message_t state)
+{
+ struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
+ struct tw68_dev *dev = container_of(v4l2_dev,
+ struct tw68_dev, v4l2_dev);
+
+ tw_clearl(TW68_DMAC, TW68_DMAP_EN | TW68_FIFO_EN);
+ dev->pci_irqmask &= ~TW68_VID_INTS;
+ tw_writel(TW68_INTMASK, 0);
+
+ synchronize_irq(pci_dev->irq);
+
+ pci_save_state(pci_dev);
+ pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
+ vb2_discard_done(&dev->vidq);
+
+ return 0;
+}
+
+static int tw68_resume(struct pci_dev *pci_dev)
+{
+ struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
+ struct tw68_dev *dev = container_of(v4l2_dev,
+ struct tw68_dev, v4l2_dev);
+ struct tw68_buf *buf;
+ unsigned long flags;
+
+ pci_set_power_state(pci_dev, PCI_D0);
+ pci_restore_state(pci_dev);
+
+ /* Do things that are done in tw68_initdev ,
+ except of initializing memory structures.*/
+
+ msleep(100);
+
+ tw68_set_tvnorm_hw(dev);
+
+ /*resume unfinished buffer(s)*/
+ spin_lock_irqsave(&dev->slock, flags);
+ buf = container_of(dev->active.next, struct tw68_buf, list);
+
+ tw68_video_start_dma(dev, buf);
+
+ spin_unlock_irqrestore(&dev->slock, flags);
+
+ return 0;
+}
+#endif
+
+/* ----------------------------------------------------------- */
+
+static struct pci_driver tw68_pci_driver = {
+ .name = "tw68",
+ .id_table = tw68_pci_tbl,
+ .probe = tw68_initdev,
+ .remove = tw68_finidev,
+#ifdef CONFIG_PM
+ .suspend = tw68_suspend,
+ .resume = tw68_resume
+#endif
+};
+
+module_pci_driver(tw68_pci_driver);
diff --git a/drivers/media/pci/tw68/tw68-reg.h b/drivers/media/pci/tw68/tw68-reg.h
new file mode 100644
index 000000000000..f60b3a896fa7
--- /dev/null
+++ b/drivers/media/pci/tw68/tw68-reg.h
@@ -0,0 +1,195 @@
+/*
+ * tw68-reg.h - TW68xx register offsets
+ *
+ * Much of this code is derived from the cx88 and sa7134 drivers, which
+ * were in turn derived from the bt87x driver. The original work was by
+ * Gerd Knorr; more recently the code was enhanced by Mauro Carvalho Chehab,
+ * Hans Verkuil, Andy Walls and many others. Their work is gratefully
+ * acknowledged. Full credit goes to them - any problems within this code
+ * are mine.
+ *
+ * Copyright (C) William M. Brack
+ *
+ * Refactored and updated to the latest v4l core frameworks:
+ *
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#ifndef _TW68_REG_H_
+#define _TW68_REG_H_
+
+/* ---------------------------------------------------------------------- */
+#define TW68_DMAC 0x000
+#define TW68_DMAP_SA 0x004
+#define TW68_DMAP_EXE 0x008
+#define TW68_DMAP_PP 0x00c
+#define TW68_VBIC 0x010
+#define TW68_SBUSC 0x014
+#define TW68_SBUSSD 0x018
+#define TW68_INTSTAT 0x01C
+#define TW68_INTMASK 0x020
+#define TW68_GPIOC 0x024
+#define TW68_GPOE 0x028
+#define TW68_TESTREG 0x02C
+#define TW68_SBUSRD 0x030
+#define TW68_SBUS_TRIG 0x034
+#define TW68_CAP_CTL 0x040
+#define TW68_SUBSYS 0x054
+#define TW68_I2C_RST 0x064
+#define TW68_VBIINST 0x06C
+/* define bits in FIFO and DMAP Control reg */
+#define TW68_DMAP_EN (1 << 0)
+#define TW68_FIFO_EN (1 << 1)
+/* define the Interrupt Status Register bits */
+#define TW68_SBDONE (1 << 0)
+#define TW68_DMAPI (1 << 1)
+#define TW68_GPINT (1 << 2)
+#define TW68_FFOF (1 << 3)
+#define TW68_FDMIS (1 << 4)
+#define TW68_DMAPERR (1 << 5)
+#define TW68_PABORT (1 << 6)
+#define TW68_SBDONE2 (1 << 12)
+#define TW68_SBERR2 (1 << 13)
+#define TW68_PPERR (1 << 14)
+#define TW68_FFERR (1 << 15)
+#define TW68_DET50 (1 << 16)
+#define TW68_FLOCK (1 << 17)
+#define TW68_CCVALID (1 << 18)
+#define TW68_VLOCK (1 << 19)
+#define TW68_FIELD (1 << 20)
+#define TW68_SLOCK (1 << 21)
+#define TW68_HLOCK (1 << 22)
+#define TW68_VDLOSS (1 << 23)
+#define TW68_SBERR (1 << 24)
+/* define the i2c control register bits */
+#define TW68_SBMODE (0)
+#define TW68_WREN (1)
+#define TW68_SSCLK (6)
+#define TW68_SSDAT (7)
+#define TW68_SBCLK (8)
+#define TW68_WDLEN (16)
+#define TW68_RDLEN (20)
+#define TW68_SBRW (24)
+#define TW68_SBDEV (25)
+
+#define TW68_SBMODE_B (1 << TW68_SBMODE)
+#define TW68_WREN_B (1 << TW68_WREN)
+#define TW68_SSCLK_B (1 << TW68_SSCLK)
+#define TW68_SSDAT_B (1 << TW68_SSDAT)
+#define TW68_SBRW_B (1 << TW68_SBRW)
+
+#define TW68_GPDATA 0x100
+#define TW68_STATUS1 0x204
+#define TW68_INFORM 0x208
+#define TW68_OPFORM 0x20C
+#define TW68_HSYNC 0x210
+#define TW68_ACNTL 0x218
+#define TW68_CROP_HI 0x21C
+#define TW68_VDELAY_LO 0x220
+#define TW68_VACTIVE_LO 0x224
+#define TW68_HDELAY_LO 0x228
+#define TW68_HACTIVE_LO 0x22C
+#define TW68_CNTRL1 0x230
+#define TW68_VSCALE_LO 0x234
+#define TW68_SCALE_HI 0x238
+#define TW68_HSCALE_LO 0x23C
+#define TW68_BRIGHT 0x240
+#define TW68_CONTRAST 0x244
+#define TW68_SHARPNESS 0x248
+#define TW68_SAT_U 0x24C
+#define TW68_SAT_V 0x250
+#define TW68_HUE 0x254
+#define TW68_SHARP2 0x258
+#define TW68_VSHARP 0x25C
+#define TW68_CORING 0x260
+#define TW68_VBICNTL 0x264
+#define TW68_CNTRL2 0x268
+#define TW68_CC_DATA 0x26C
+#define TW68_SDT 0x270
+#define TW68_SDTR 0x274
+#define TW68_RESERV2 0x278
+#define TW68_RESERV3 0x27C
+#define TW68_CLMPG 0x280
+#define TW68_IAGC 0x284
+#define TW68_AGCGAIN 0x288
+#define TW68_PEAKWT 0x28C
+#define TW68_CLMPL 0x290
+#define TW68_SYNCT 0x294
+#define TW68_MISSCNT 0x298
+#define TW68_PCLAMP 0x29C
+#define TW68_VCNTL1 0x2A0
+#define TW68_VCNTL2 0x2A4
+#define TW68_CKILL 0x2A8
+#define TW68_COMB 0x2AC
+#define TW68_LDLY 0x2B0
+#define TW68_MISC1 0x2B4
+#define TW68_LOOP 0x2B8
+#define TW68_MISC2 0x2BC
+#define TW68_MVSN 0x2C0
+#define TW68_STATUS2 0x2C4
+#define TW68_HFREF 0x2C8
+#define TW68_CLMD 0x2CC
+#define TW68_IDCNTL 0x2D0
+#define TW68_CLCNTL1 0x2D4
+
+/* Audio */
+#define TW68_ACKI1 0x300
+#define TW68_ACKI2 0x304
+#define TW68_ACKI3 0x308
+#define TW68_ACKN1 0x30C
+#define TW68_ACKN2 0x310
+#define TW68_ACKN3 0x314
+#define TW68_SDIV 0x318
+#define TW68_LRDIV 0x31C
+#define TW68_ACCNTL 0x320
+
+#define TW68_VSCTL 0x3B8
+#define TW68_CHROMAGVAL 0x3BC
+
+#define TW68_F2CROP_HI 0x3DC
+#define TW68_F2VDELAY_LO 0x3E0
+#define TW68_F2VACTIVE_LO 0x3E4
+#define TW68_F2HDELAY_LO 0x3E8
+#define TW68_F2HACTIVE_LO 0x3EC
+#define TW68_F2CNT 0x3F0
+#define TW68_F2VSCALE_LO 0x3F4
+#define TW68_F2SCALE_HI 0x3F8
+#define TW68_F2HSCALE_LO 0x3FC
+
+#define RISC_INT_BIT 0x08000000
+#define RISC_SYNCO 0xC0000000
+#define RISC_SYNCE 0xD0000000
+#define RISC_JUMP 0xB0000000
+#define RISC_LINESTART 0x90000000
+#define RISC_INLINE 0xA0000000
+
+#define VideoFormatNTSC 0
+#define VideoFormatNTSCJapan 0
+#define VideoFormatPALBDGHI 1
+#define VideoFormatSECAM 2
+#define VideoFormatNTSC443 3
+#define VideoFormatPALM 4
+#define VideoFormatPALN 5
+#define VideoFormatPALNC 5
+#define VideoFormatPAL60 6
+#define VideoFormatAuto 7
+
+#define ColorFormatRGB32 0x00
+#define ColorFormatRGB24 0x10
+#define ColorFormatRGB16 0x20
+#define ColorFormatRGB15 0x30
+#define ColorFormatYUY2 0x40
+#define ColorFormatBSWAP 0x04
+#define ColorFormatWSWAP 0x08
+#define ColorFormatGamma 0x80
+#endif
diff --git a/drivers/media/pci/tw68/tw68-risc.c b/drivers/media/pci/tw68/tw68-risc.c
new file mode 100644
index 000000000000..7439db212a69
--- /dev/null
+++ b/drivers/media/pci/tw68/tw68-risc.c
@@ -0,0 +1,230 @@
+/*
+ * tw68_risc.c
+ * Part of the device driver for Techwell 68xx based cards
+ *
+ * Much of this code is derived from the cx88 and sa7134 drivers, which
+ * were in turn derived from the bt87x driver. The original work was by
+ * Gerd Knorr; more recently the code was enhanced by Mauro Carvalho Chehab,
+ * Hans Verkuil, Andy Walls and many others. Their work is gratefully
+ * acknowledged. Full credit goes to them - any problems within this code
+ * are mine.
+ *
+ * Copyright (C) 2009 William M. Brack
+ *
+ * Refactored and updated to the latest v4l core frameworks:
+ *
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "tw68.h"
+
+/**
+ * @rp pointer to current risc program position
+ * @sglist pointer to "scatter-gather list" of buffer pointers
+ * @offset offset to target memory buffer
+ * @sync_line 0 -> no sync, 1 -> odd sync, 2 -> even sync
+ * @bpl number of bytes per scan line
+ * @padding number of bytes of padding to add
+ * @lines number of lines in field
+ * @jump insert a jump at the start
+ */
+static __le32 *tw68_risc_field(__le32 *rp, struct scatterlist *sglist,
+ unsigned int offset, u32 sync_line,
+ unsigned int bpl, unsigned int padding,
+ unsigned int lines, bool jump)
+{
+ struct scatterlist *sg;
+ unsigned int line, todo, done;
+
+ if (jump) {
+ *(rp++) = cpu_to_le32(RISC_JUMP);
+ *(rp++) = 0;
+ }
+
+ /* sync instruction */
+ if (sync_line == 1)
+ *(rp++) = cpu_to_le32(RISC_SYNCO);
+ else
+ *(rp++) = cpu_to_le32(RISC_SYNCE);
+ *(rp++) = 0;
+
+ /* scan lines */
+ sg = sglist;
+ for (line = 0; line < lines; line++) {
+ /* calculate next starting position */
+ while (offset && offset >= sg_dma_len(sg)) {
+ offset -= sg_dma_len(sg);
+ sg = sg_next(sg);
+ }
+ if (bpl <= sg_dma_len(sg) - offset) {
+ /* fits into current chunk */
+ *(rp++) = cpu_to_le32(RISC_LINESTART |
+ /* (offset<<12) |*/ bpl);
+ *(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
+ offset += bpl;
+ } else {
+ /*
+ * scanline needs to be split. Put the start in
+ * whatever memory remains using RISC_LINESTART,
+ * then the remainder into following addresses
+ * given by the scatter-gather list.
+ */
+ todo = bpl; /* one full line to be done */
+ /* first fragment */
+ done = (sg_dma_len(sg) - offset);
+ *(rp++) = cpu_to_le32(RISC_LINESTART |
+ (7 << 24) |
+ done);
+ *(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
+ todo -= done;
+ sg = sg_next(sg);
+ /* succeeding fragments have no offset */
+ while (todo > sg_dma_len(sg)) {
+ *(rp++) = cpu_to_le32(RISC_INLINE |
+ (done << 12) |
+ sg_dma_len(sg));
+ *(rp++) = cpu_to_le32(sg_dma_address(sg));
+ todo -= sg_dma_len(sg);
+ sg = sg_next(sg);
+ done += sg_dma_len(sg);
+ }
+ if (todo) {
+ /* final chunk - offset 0, count 'todo' */
+ *(rp++) = cpu_to_le32(RISC_INLINE |
+ (done << 12) |
+ todo);
+ *(rp++) = cpu_to_le32(sg_dma_address(sg));
+ }
+ offset = todo;
+ }
+ offset += padding;
+ }
+
+ return rp;
+}
+
+/**
+ * tw68_risc_buffer
+ *
+ * This routine is called by tw68-video. It allocates
+ * memory for the dma controller "program" and then fills in that
+ * memory with the appropriate "instructions".
+ *
+ * @pci_dev structure with info about the pci
+ * slot which our device is in.
+ * @risc structure with info about the memory
+ * used for our controller program.
+ * @sglist scatter-gather list entry
+ * @top_offset offset within the risc program area for the
+ * first odd frame line
+ * @bottom_offset offset within the risc program area for the
+ * first even frame line
+ * @bpl number of data bytes per scan line
+ * @padding number of extra bytes to add at end of line
+ * @lines number of scan lines
+ */
+int tw68_risc_buffer(struct pci_dev *pci,
+ struct tw68_buf *buf,
+ struct scatterlist *sglist,
+ unsigned int top_offset,
+ unsigned int bottom_offset,
+ unsigned int bpl,
+ unsigned int padding,
+ unsigned int lines)
+{
+ u32 instructions, fields;
+ __le32 *rp;
+
+ fields = 0;
+ if (UNSET != top_offset)
+ fields++;
+ if (UNSET != bottom_offset)
+ fields++;
+ /*
+ * estimate risc mem: worst case is one write per page border +
+ * one write per scan line + syncs + 2 jumps (all 2 dwords).
+ * Padding can cause next bpl to start close to a page border.
+ * First DMA region may be smaller than PAGE_SIZE
+ */
+ instructions = fields * (1 + (((bpl + padding) * lines) /
+ PAGE_SIZE) + lines) + 4;
+ buf->size = instructions * 8;
+ buf->cpu = pci_alloc_consistent(pci, buf->size, &buf->dma);
+ if (buf->cpu == NULL)
+ return -ENOMEM;
+
+ /* write risc instructions */
+ rp = buf->cpu;
+ if (UNSET != top_offset) /* generates SYNCO */
+ rp = tw68_risc_field(rp, sglist, top_offset, 1,
+ bpl, padding, lines, true);
+ if (UNSET != bottom_offset) /* generates SYNCE */
+ rp = tw68_risc_field(rp, sglist, bottom_offset, 2,
+ bpl, padding, lines, top_offset == UNSET);
+
+ /* save pointer to jmp instruction address */
+ buf->jmp = rp;
+ buf->cpu[1] = cpu_to_le32(buf->dma + 8);
+ /* assure risc buffer hasn't overflowed */
+ BUG_ON((buf->jmp - buf->cpu + 2) * sizeof(buf->cpu[0]) > buf->size);
+ return 0;
+}
+
+#if 0
+/* ------------------------------------------------------------------ */
+/* debug helper code */
+
+static void tw68_risc_decode(u32 risc, u32 addr)
+{
+#define RISC_OP(reg) (((reg) >> 28) & 7)
+ static struct instr_details {
+ char *name;
+ u8 has_data_type;
+ u8 has_byte_info;
+ u8 has_addr;
+ } instr[8] = {
+ [RISC_OP(RISC_SYNCO)] = {"syncOdd", 0, 0, 0},
+ [RISC_OP(RISC_SYNCE)] = {"syncEven", 0, 0, 0},
+ [RISC_OP(RISC_JUMP)] = {"jump", 0, 0, 1},
+ [RISC_OP(RISC_LINESTART)] = {"lineStart", 1, 1, 1},
+ [RISC_OP(RISC_INLINE)] = {"inline", 1, 1, 1},
+ };
+ u32 p;
+
+ p = RISC_OP(risc);
+ if (!(risc & 0x80000000) || !instr[p].name) {
+ pr_debug("0x%08x [ INVALID ]\n", risc);
+ return;
+ }
+ pr_debug("0x%08x %-9s IRQ=%d",
+ risc, instr[p].name, (risc >> 27) & 1);
+ if (instr[p].has_data_type)
+ pr_debug(" Type=%d", (risc >> 24) & 7);
+ if (instr[p].has_byte_info)
+ pr_debug(" Start=0x%03x Count=%03u",
+ (risc >> 12) & 0xfff, risc & 0xfff);
+ if (instr[p].has_addr)
+ pr_debug(" StartAddr=0x%08x", addr);
+ pr_debug("\n");
+}
+
+void tw68_risc_program_dump(struct tw68_core *core, struct tw68_buf *buf)
+{
+ const __le32 *addr;
+
+ pr_debug("%s: risc_program_dump: risc=%p, buf->cpu=0x%p, buf->jmp=0x%p\n",
+ core->name, buf, buf->cpu, buf->jmp);
+ for (addr = buf->cpu; addr <= buf->jmp; addr += 2)
+ tw68_risc_decode(*addr, *(addr+1));
+}
+#endif
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
new file mode 100644
index 000000000000..5c94ac7c88d9
--- /dev/null
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -0,0 +1,1051 @@
+/*
+ * tw68 functions to handle video data
+ *
+ * Much of this code is derived from the cx88 and sa7134 drivers, which
+ * were in turn derived from the bt87x driver. The original work was by
+ * Gerd Knorr; more recently the code was enhanced by Mauro Carvalho Chehab,
+ * Hans Verkuil, Andy Walls and many others. Their work is gratefully
+ * acknowledged. Full credit goes to them - any problems within this code
+ * are mine.
+ *
+ * Copyright (C) 2009 William M. Brack
+ *
+ * Refactored and updated to the latest v4l core frameworks:
+ *
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "tw68.h"
+#include "tw68-reg.h"
+
+/* ------------------------------------------------------------------ */
+/* data structs for video */
+/*
+ * FIXME -
+ * Note that the saa7134 has formats, e.g. YUV420, which are classified
+ * as "planar". These affect overlay mode, and are flagged with a field
+ * ".planar" in the format. Do we need to implement this in this driver?
+ */
+static const struct tw68_format formats[] = {
+ {
+ .name = "15 bpp RGB, le",
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .depth = 16,
+ .twformat = ColorFormatRGB15,
+ }, {
+ .name = "15 bpp RGB, be",
+ .fourcc = V4L2_PIX_FMT_RGB555X,
+ .depth = 16,
+ .twformat = ColorFormatRGB15 | ColorFormatBSWAP,
+ }, {
+ .name = "16 bpp RGB, le",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .depth = 16,
+ .twformat = ColorFormatRGB16,
+ }, {
+ .name = "16 bpp RGB, be",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .depth = 16,
+ .twformat = ColorFormatRGB16 | ColorFormatBSWAP,
+ }, {
+ .name = "24 bpp RGB, le",
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .depth = 24,
+ .twformat = ColorFormatRGB24,
+ }, {
+ .name = "24 bpp RGB, be",
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .depth = 24,
+ .twformat = ColorFormatRGB24 | ColorFormatBSWAP,
+ }, {
+ .name = "32 bpp RGB, le",
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .depth = 32,
+ .twformat = ColorFormatRGB32,
+ }, {
+ .name = "32 bpp RGB, be",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .depth = 32,
+ .twformat = ColorFormatRGB32 | ColorFormatBSWAP |
+ ColorFormatWSWAP,
+ }, {
+ .name = "4:2:2 packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ .twformat = ColorFormatYUY2,
+ }, {
+ .name = "4:2:2 packed, UYVY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = 16,
+ .twformat = ColorFormatYUY2 | ColorFormatBSWAP,
+ }
+};
+#define FORMATS ARRAY_SIZE(formats)
+
+#define NORM_625_50 \
+ .h_delay = 3, \
+ .h_delay0 = 133, \
+ .h_start = 0, \
+ .h_stop = 719, \
+ .v_delay = 24, \
+ .vbi_v_start_0 = 7, \
+ .vbi_v_stop_0 = 22, \
+ .video_v_start = 24, \
+ .video_v_stop = 311, \
+ .vbi_v_start_1 = 319
+
+#define NORM_525_60 \
+ .h_delay = 8, \
+ .h_delay0 = 138, \
+ .h_start = 0, \
+ .h_stop = 719, \
+ .v_delay = 22, \
+ .vbi_v_start_0 = 10, \
+ .vbi_v_stop_0 = 21, \
+ .video_v_start = 22, \
+ .video_v_stop = 262, \
+ .vbi_v_start_1 = 273
+
+/*
+ * The following table is searched by tw68_s_std, first for a specific
+ * match, then for an entry which contains the desired id. The table
+ * entries should therefore be ordered in ascending order of specificity.
+ */
+static const struct tw68_tvnorm tvnorms[] = {
+ {
+ .name = "PAL", /* autodetect */
+ .id = V4L2_STD_PAL,
+ NORM_625_50,
+
+ .sync_control = 0x18,
+ .luma_control = 0x40,
+ .chroma_ctrl1 = 0x81,
+ .chroma_gain = 0x2a,
+ .chroma_ctrl2 = 0x06,
+ .vgate_misc = 0x1c,
+ .format = VideoFormatPALBDGHI,
+ }, {
+ .name = "NTSC",
+ .id = V4L2_STD_NTSC,
+ NORM_525_60,
+
+ .sync_control = 0x59,
+ .luma_control = 0x40,
+ .chroma_ctrl1 = 0x89,
+ .chroma_gain = 0x2a,
+ .chroma_ctrl2 = 0x0e,
+ .vgate_misc = 0x18,
+ .format = VideoFormatNTSC,
+ }, {
+ .name = "SECAM",
+ .id = V4L2_STD_SECAM,
+ NORM_625_50,
+
+ .sync_control = 0x18,
+ .luma_control = 0x1b,
+ .chroma_ctrl1 = 0xd1,
+ .chroma_gain = 0x80,
+ .chroma_ctrl2 = 0x00,
+ .vgate_misc = 0x1c,
+ .format = VideoFormatSECAM,
+ }, {
+ .name = "PAL-M",
+ .id = V4L2_STD_PAL_M,
+ NORM_525_60,
+
+ .sync_control = 0x59,
+ .luma_control = 0x40,
+ .chroma_ctrl1 = 0xb9,
+ .chroma_gain = 0x2a,
+ .chroma_ctrl2 = 0x0e,
+ .vgate_misc = 0x18,
+ .format = VideoFormatPALM,
+ }, {
+ .name = "PAL-Nc",
+ .id = V4L2_STD_PAL_Nc,
+ NORM_625_50,
+
+ .sync_control = 0x18,
+ .luma_control = 0x40,
+ .chroma_ctrl1 = 0xa1,
+ .chroma_gain = 0x2a,
+ .chroma_ctrl2 = 0x06,
+ .vgate_misc = 0x1c,
+ .format = VideoFormatPALNC,
+ }, {
+ .name = "PAL-60",
+ .id = V4L2_STD_PAL_60,
+ .h_delay = 186,
+ .h_start = 0,
+ .h_stop = 719,
+ .v_delay = 26,
+ .video_v_start = 23,
+ .video_v_stop = 262,
+ .vbi_v_start_0 = 10,
+ .vbi_v_stop_0 = 21,
+ .vbi_v_start_1 = 273,
+
+ .sync_control = 0x18,
+ .luma_control = 0x40,
+ .chroma_ctrl1 = 0x81,
+ .chroma_gain = 0x2a,
+ .chroma_ctrl2 = 0x06,
+ .vgate_misc = 0x1c,
+ .format = VideoFormatPAL60,
+ }
+};
+#define TVNORMS ARRAY_SIZE(tvnorms)
+
+static const struct tw68_format *format_by_fourcc(unsigned int fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < FORMATS; i++)
+ if (formats[i].fourcc == fourcc)
+ return formats+i;
+ return NULL;
+}
+
+
+/* ------------------------------------------------------------------ */
+/*
+ * Note that the cropping rectangles are described in terms of a single
+ * frame, i.e. line positions are only 1/2 the interlaced equivalent
+ */
+static void set_tvnorm(struct tw68_dev *dev, const struct tw68_tvnorm *norm)
+{
+ if (norm != dev->tvnorm) {
+ dev->width = 720;
+ dev->height = (norm->id & V4L2_STD_525_60) ? 480 : 576;
+ dev->tvnorm = norm;
+ tw68_set_tvnorm_hw(dev);
+ }
+}
+
+/*
+ * tw68_set_scale
+ *
+ * Scaling and Cropping for video decoding
+ *
+ * We are working with 3 values for horizontal and vertical - scale,
+ * delay and active.
+ *
+ * HACTIVE represent the actual number of pixels in the "usable" image,
+ * before scaling. HDELAY represents the number of pixels skipped
+ * between the start of the horizontal sync and the start of the image.
+ * HSCALE is calculated using the formula
+ * HSCALE = (HACTIVE / (#pixels desired)) * 256
+ *
+ * The vertical registers are similar, except based upon the total number
+ * of lines in the image, and the first line of the image (i.e. ignoring
+ * vertical sync and VBI).
+ *
+ * Note that the number of bytes reaching the FIFO (and hence needing
+ * to be processed by the DMAP program) is completely dependent upon
+ * these values, especially HSCALE.
+ *
+ * Parameters:
+ * @dev pointer to the device structure, needed for
+ * getting current norm (as well as debug print)
+ * @width actual image width (from user buffer)
+ * @height actual image height
+ * @field indicates Top, Bottom or Interlaced
+ */
+static int tw68_set_scale(struct tw68_dev *dev, unsigned int width,
+ unsigned int height, enum v4l2_field field)
+{
+ const struct tw68_tvnorm *norm = dev->tvnorm;
+ /* set individually for debugging clarity */
+ int hactive, hdelay, hscale;
+ int vactive, vdelay, vscale;
+ int comb;
+
+ if (V4L2_FIELD_HAS_BOTH(field)) /* if field is interlaced */
+ height /= 2; /* we must set for 1-frame */
+
+ pr_debug("%s: width=%d, height=%d, both=%d\n"
+ " tvnorm h_delay=%d, h_start=%d, h_stop=%d, "
+ "v_delay=%d, v_start=%d, v_stop=%d\n" , __func__,
+ width, height, V4L2_FIELD_HAS_BOTH(field),
+ norm->h_delay, norm->h_start, norm->h_stop,
+ norm->v_delay, norm->video_v_start,
+ norm->video_v_stop);
+
+ switch (dev->vdecoder) {
+ case TW6800:
+ hdelay = norm->h_delay0;
+ break;
+ default:
+ hdelay = norm->h_delay;
+ break;
+ }
+
+ hdelay += norm->h_start;
+ hactive = norm->h_stop - norm->h_start + 1;
+
+ hscale = (hactive * 256) / (width);
+
+ vdelay = norm->v_delay;
+ vactive = ((norm->id & V4L2_STD_525_60) ? 524 : 624) / 2 - norm->video_v_start;
+ vscale = (vactive * 256) / height;
+
+ pr_debug("%s: %dx%d [%s%s,%s]\n", __func__,
+ width, height,
+ V4L2_FIELD_HAS_TOP(field) ? "T" : "",
+ V4L2_FIELD_HAS_BOTTOM(field) ? "B" : "",
+ v4l2_norm_to_name(dev->tvnorm->id));
+ pr_debug("%s: hactive=%d, hdelay=%d, hscale=%d; "
+ "vactive=%d, vdelay=%d, vscale=%d\n", __func__,
+ hactive, hdelay, hscale, vactive, vdelay, vscale);
+
+ comb = ((vdelay & 0x300) >> 2) |
+ ((vactive & 0x300) >> 4) |
+ ((hdelay & 0x300) >> 6) |
+ ((hactive & 0x300) >> 8);
+ pr_debug("%s: setting CROP_HI=%02x, VDELAY_LO=%02x, "
+ "VACTIVE_LO=%02x, HDELAY_LO=%02x, HACTIVE_LO=%02x\n",
+ __func__, comb, vdelay, vactive, hdelay, hactive);
+ tw_writeb(TW68_CROP_HI, comb);
+ tw_writeb(TW68_VDELAY_LO, vdelay & 0xff);
+ tw_writeb(TW68_VACTIVE_LO, vactive & 0xff);
+ tw_writeb(TW68_HDELAY_LO, hdelay & 0xff);
+ tw_writeb(TW68_HACTIVE_LO, hactive & 0xff);
+
+ comb = ((vscale & 0xf00) >> 4) | ((hscale & 0xf00) >> 8);
+ pr_debug("%s: setting SCALE_HI=%02x, VSCALE_LO=%02x, "
+ "HSCALE_LO=%02x\n", __func__, comb, vscale, hscale);
+ tw_writeb(TW68_SCALE_HI, comb);
+ tw_writeb(TW68_VSCALE_LO, vscale);
+ tw_writeb(TW68_HSCALE_LO, hscale);
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------ */
+
+int tw68_video_start_dma(struct tw68_dev *dev, struct tw68_buf *buf)
+{
+ /* Set cropping and scaling */
+ tw68_set_scale(dev, dev->width, dev->height, dev->field);
+ /*
+ * Set start address for RISC program. Note that if the DMAP
+ * processor is currently running, it must be stopped before
+ * a new address can be set.
+ */
+ tw_clearl(TW68_DMAC, TW68_DMAP_EN);
+ tw_writel(TW68_DMAP_SA, buf->dma);
+ /* Clear any pending interrupts */
+ tw_writel(TW68_INTSTAT, dev->board_virqmask);
+ /* Enable the risc engine and the fifo */
+ tw_andorl(TW68_DMAC, 0xff, dev->fmt->twformat |
+ ColorFormatGamma | TW68_DMAP_EN | TW68_FIFO_EN);
+ dev->pci_irqmask |= dev->board_virqmask;
+ tw_setl(TW68_INTMASK, dev->pci_irqmask);
+ return 0;
+}
+
+/* ------------------------------------------------------------------ */
+
+/* calc max # of buffers from size (must not exceed the 4MB virtual
+ * address space per DMA channel) */
+static int tw68_buffer_count(unsigned int size, unsigned int count)
+{
+ unsigned int maxcount;
+
+ maxcount = (4 * 1024 * 1024) / roundup(size, PAGE_SIZE);
+ if (count > maxcount)
+ count = maxcount;
+ return count;
+}
+
+/* ------------------------------------------------------------- */
+/* vb2 queue operations */
+
+static int tw68_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct tw68_dev *dev = vb2_get_drv_priv(q);
+ unsigned tot_bufs = q->num_buffers + *num_buffers;
+
+ sizes[0] = (dev->fmt->depth * dev->width * dev->height) >> 3;
+ /*
+ * We allow create_bufs, but only if the sizeimage is the same as the
+ * current sizeimage. The tw68_buffer_count calculation becomes quite
+ * difficult otherwise.
+ */
+ if (fmt && fmt->fmt.pix.sizeimage < sizes[0])
+ return -EINVAL;
+ *num_planes = 1;
+ if (tot_bufs < 2)
+ tot_bufs = 2;
+ tot_bufs = tw68_buffer_count(sizes[0], tot_bufs);
+ *num_buffers = tot_bufs - q->num_buffers;
+
+ return 0;
+}
+
+/*
+ * The risc program for each buffers works as follows: it starts with a simple
+ * 'JUMP to addr + 8', which is effectively a NOP. Then the program to DMA the
+ * buffer follows and at the end we have a JUMP back to the start + 8 (skipping
+ * the initial JUMP).
+ *
+ * This is the program of the first buffer to be queued if the active list is
+ * empty and it just keeps DMAing this buffer without generating any interrupts.
+ *
+ * If a new buffer is added then the initial JUMP in the program generates an
+ * interrupt as well which signals that the previous buffer has been DMAed
+ * successfully and that it can be returned to userspace.
+ *
+ * It also sets the final jump of the previous buffer to the start of the new
+ * buffer, thus chaining the new buffer into the DMA chain. This is a single
+ * atomic u32 write, so there is no race condition.
+ *
+ * The end-result of all this that you only get an interrupt when a buffer
+ * is ready, so the control flow is very easy.
+ */
+static void tw68_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct tw68_dev *dev = vb2_get_drv_priv(vq);
+ struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
+ struct tw68_buf *prev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->slock, flags);
+
+ /* append a 'JUMP to start of buffer' to the buffer risc program */
+ buf->jmp[0] = cpu_to_le32(RISC_JUMP);
+ buf->jmp[1] = cpu_to_le32(buf->dma + 8);
+
+ if (!list_empty(&dev->active)) {
+ prev = list_entry(dev->active.prev, struct tw68_buf, list);
+ buf->cpu[0] |= cpu_to_le32(RISC_INT_BIT);
+ prev->jmp[1] = cpu_to_le32(buf->dma);
+ }
+ list_add_tail(&buf->list, &dev->active);
+ spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+/*
+ * buffer_prepare
+ *
+ * Set the ancilliary information into the buffer structure. This
+ * includes generating the necessary risc program if it hasn't already
+ * been done for the current buffer format.
+ * The structure fh contains the details of the format requested by the
+ * user - type, width, height and #fields. This is compared with the
+ * last format set for the current buffer. If they differ, the risc
+ * code (which controls the filling of the buffer) is (re-)generated.
+ */
+static int tw68_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct tw68_dev *dev = vb2_get_drv_priv(vq);
+ struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
+ struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0);
+ unsigned size, bpl;
+ int rc;
+
+ size = (dev->width * dev->height * dev->fmt->depth) >> 3;
+ if (vb2_plane_size(vb, 0) < size)
+ return -EINVAL;
+ vb2_set_plane_payload(vb, 0, size);
+
+ rc = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
+ if (!rc)
+ return -EIO;
+
+ bpl = (dev->width * dev->fmt->depth) >> 3;
+ switch (dev->field) {
+ case V4L2_FIELD_TOP:
+ tw68_risc_buffer(dev->pci, buf, dma->sgl,
+ 0, UNSET, bpl, 0, dev->height);
+ break;
+ case V4L2_FIELD_BOTTOM:
+ tw68_risc_buffer(dev->pci, buf, dma->sgl,
+ UNSET, 0, bpl, 0, dev->height);
+ break;
+ case V4L2_FIELD_SEQ_TB:
+ tw68_risc_buffer(dev->pci, buf, dma->sgl,
+ 0, bpl * (dev->height >> 1),
+ bpl, 0, dev->height >> 1);
+ break;
+ case V4L2_FIELD_SEQ_BT:
+ tw68_risc_buffer(dev->pci, buf, dma->sgl,
+ bpl * (dev->height >> 1), 0,
+ bpl, 0, dev->height >> 1);
+ break;
+ case V4L2_FIELD_INTERLACED:
+ default:
+ tw68_risc_buffer(dev->pci, buf, dma->sgl,
+ 0, bpl, bpl, bpl, dev->height >> 1);
+ break;
+ }
+ return 0;
+}
+
+static void tw68_buf_finish(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct tw68_dev *dev = vb2_get_drv_priv(vq);
+ struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0);
+ struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
+
+ dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
+
+ pci_free_consistent(dev->pci, buf->size, buf->cpu, buf->dma);
+}
+
+static int tw68_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct tw68_dev *dev = vb2_get_drv_priv(q);
+ struct tw68_buf *buf =
+ container_of(dev->active.next, struct tw68_buf, list);
+
+ dev->seqnr = 0;
+ tw68_video_start_dma(dev, buf);
+ return 0;
+}
+
+static void tw68_stop_streaming(struct vb2_queue *q)
+{
+ struct tw68_dev *dev = vb2_get_drv_priv(q);
+
+ /* Stop risc & fifo */
+ tw_clearl(TW68_DMAC, TW68_DMAP_EN | TW68_FIFO_EN);
+ while (!list_empty(&dev->active)) {
+ struct tw68_buf *buf =
+ container_of(dev->active.next, struct tw68_buf, list);
+
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static struct vb2_ops tw68_video_qops = {
+ .queue_setup = tw68_queue_setup,
+ .buf_queue = tw68_buf_queue,
+ .buf_prepare = tw68_buf_prepare,
+ .buf_finish = tw68_buf_finish,
+ .start_streaming = tw68_start_streaming,
+ .stop_streaming = tw68_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/* ------------------------------------------------------------------ */
+
+static int tw68_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct tw68_dev *dev =
+ container_of(ctrl->handler, struct tw68_dev, hdl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ tw_writeb(TW68_BRIGHT, ctrl->val);
+ break;
+ case V4L2_CID_HUE:
+ tw_writeb(TW68_HUE, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ tw_writeb(TW68_CONTRAST, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ tw_writeb(TW68_SAT_U, ctrl->val);
+ tw_writeb(TW68_SAT_V, ctrl->val);
+ break;
+ case V4L2_CID_COLOR_KILLER:
+ if (ctrl->val)
+ tw_andorb(TW68_MISC2, 0xe0, 0xe0);
+ else
+ tw_andorb(TW68_MISC2, 0xe0, 0x00);
+ break;
+ case V4L2_CID_CHROMA_AGC:
+ if (ctrl->val)
+ tw_andorb(TW68_LOOP, 0x30, 0x20);
+ else
+ tw_andorb(TW68_LOOP, 0x30, 0x00);
+ break;
+ }
+ return 0;
+}
+
+/* ------------------------------------------------------------------ */
+
+/*
+ * Note that this routine returns what is stored in the fh structure, and
+ * does not interrogate any of the device registers.
+ */
+static int tw68_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tw68_dev *dev = video_drvdata(file);
+
+ f->fmt.pix.width = dev->width;
+ f->fmt.pix.height = dev->height;
+ f->fmt.pix.field = dev->field;
+ f->fmt.pix.pixelformat = dev->fmt->fourcc;
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * (dev->fmt->depth)) >> 3;
+ f->fmt.pix.sizeimage =
+ f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ f->fmt.pix.priv = 0;
+ return 0;
+}
+
+static int tw68_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tw68_dev *dev = video_drvdata(file);
+ const struct tw68_format *fmt;
+ enum v4l2_field field;
+ unsigned int maxh;
+
+ fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ if (NULL == fmt)
+ return -EINVAL;
+
+ field = f->fmt.pix.field;
+ maxh = (dev->tvnorm->id & V4L2_STD_525_60) ? 480 : 576;
+
+ switch (field) {
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ break;
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_BT:
+ case V4L2_FIELD_SEQ_TB:
+ maxh = maxh * 2;
+ break;
+ default:
+ field = (f->fmt.pix.height > maxh / 2)
+ ? V4L2_FIELD_INTERLACED
+ : V4L2_FIELD_BOTTOM;
+ break;
+ }
+
+ f->fmt.pix.field = field;
+ if (f->fmt.pix.width < 48)
+ f->fmt.pix.width = 48;
+ if (f->fmt.pix.height < 32)
+ f->fmt.pix.height = 32;
+ if (f->fmt.pix.width > 720)
+ f->fmt.pix.width = 720;
+ if (f->fmt.pix.height > maxh)
+ f->fmt.pix.height = maxh;
+ f->fmt.pix.width &= ~0x03;
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * (fmt->depth)) >> 3;
+ f->fmt.pix.sizeimage =
+ f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return 0;
+}
+
+/*
+ * Note that tw68_s_fmt_vid_cap sets the information into the fh structure,
+ * and it will be used for all future new buffers. However, there could be
+ * some number of buffers on the "active" chain which will be filled before
+ * the change takes place.
+ */
+static int tw68_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tw68_dev *dev = video_drvdata(file);
+ int err;
+
+ err = tw68_try_fmt_vid_cap(file, priv, f);
+ if (0 != err)
+ return err;
+
+ dev->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ dev->width = f->fmt.pix.width;
+ dev->height = f->fmt.pix.height;
+ dev->field = f->fmt.pix.field;
+ return 0;
+}
+
+static int tw68_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ struct tw68_dev *dev = video_drvdata(file);
+ unsigned int n;
+
+ n = i->index;
+ if (n >= TW68_INPUT_MAX)
+ return -EINVAL;
+ i->index = n;
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ snprintf(i->name, sizeof(i->name), "Composite %d", n);
+
+ /* If the query is for the current input, get live data */
+ if (n == dev->input) {
+ int v1 = tw_readb(TW68_STATUS1);
+ int v2 = tw_readb(TW68_MVSN);
+
+ if (0 != (v1 & (1 << 7)))
+ i->status |= V4L2_IN_ST_NO_SYNC;
+ if (0 != (v1 & (1 << 6)))
+ i->status |= V4L2_IN_ST_NO_H_LOCK;
+ if (0 != (v1 & (1 << 2)))
+ i->status |= V4L2_IN_ST_NO_SIGNAL;
+ if (0 != (v1 & 1 << 1))
+ i->status |= V4L2_IN_ST_NO_COLOR;
+ if (0 != (v2 & (1 << 2)))
+ i->status |= V4L2_IN_ST_MACROVISION;
+ }
+ i->std = video_devdata(file)->tvnorms;
+ return 0;
+}
+
+static int tw68_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct tw68_dev *dev = video_drvdata(file);
+
+ *i = dev->input;
+ return 0;
+}
+
+static int tw68_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct tw68_dev *dev = video_drvdata(file);
+
+ if (i >= TW68_INPUT_MAX)
+ return -EINVAL;
+ dev->input = i;
+ tw_andorb(TW68_INFORM, 0x03 << 2, dev->input << 2);
+ return 0;
+}
+
+static int tw68_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct tw68_dev *dev = video_drvdata(file);
+
+ strcpy(cap->driver, "tw68");
+ strlcpy(cap->card, "Techwell Capture Card",
+ sizeof(cap->card));
+ sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
+ cap->device_caps =
+ V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
+
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int tw68_s_std(struct file *file, void *priv, v4l2_std_id id)
+{
+ struct tw68_dev *dev = video_drvdata(file);
+ unsigned int i;
+
+ if (vb2_is_busy(&dev->vidq))
+ return -EBUSY;
+
+ /* Look for match on complete norm id (may have mult bits) */
+ for (i = 0; i < TVNORMS; i++) {
+ if (id == tvnorms[i].id)
+ break;
+ }
+
+ /* If no exact match, look for norm which contains this one */
+ if (i == TVNORMS) {
+ for (i = 0; i < TVNORMS; i++)
+ if (id & tvnorms[i].id)
+ break;
+ }
+ /* If still not matched, give up */
+ if (i == TVNORMS)
+ return -EINVAL;
+
+ set_tvnorm(dev, &tvnorms[i]); /* do the actual setting */
+ return 0;
+}
+
+static int tw68_g_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct tw68_dev *dev = video_drvdata(file);
+
+ *id = dev->tvnorm->id;
+ return 0;
+}
+
+static int tw68_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= FORMATS)
+ return -EINVAL;
+
+ strlcpy(f->description, formats[f->index].name,
+ sizeof(f->description));
+
+ f->pixelformat = formats[f->index].fourcc;
+
+ return 0;
+}
+
+/*
+ * Used strictly for internal development and debugging, this routine
+ * prints out the current register contents for the tw68xx device.
+ */
+static void tw68_dump_regs(struct tw68_dev *dev)
+{
+ unsigned char line[80];
+ int i, j, k;
+ unsigned char *cptr;
+
+ pr_info("Full dump of TW68 registers:\n");
+ /* First we do the PCI regs, 8 4-byte regs per line */
+ for (i = 0; i < 0x100; i += 32) {
+ cptr = line;
+ cptr += sprintf(cptr, "%03x ", i);
+ /* j steps through the next 4 words */
+ for (j = i; j < i + 16; j += 4)
+ cptr += sprintf(cptr, "%08x ", tw_readl(j));
+ *cptr++ = ' ';
+ for (; j < i + 32; j += 4)
+ cptr += sprintf(cptr, "%08x ", tw_readl(j));
+ *cptr++ = '\n';
+ *cptr = 0;
+ pr_info("%s", line);
+ }
+ /* Next the control regs, which are single-byte, address mod 4 */
+ while (i < 0x400) {
+ cptr = line;
+ cptr += sprintf(cptr, "%03x ", i);
+ /* Print out 4 groups of 4 bytes */
+ for (j = 0; j < 4; j++) {
+ for (k = 0; k < 4; k++) {
+ cptr += sprintf(cptr, "%02x ",
+ tw_readb(i));
+ i += 4;
+ }
+ *cptr++ = ' ';
+ }
+ *cptr++ = '\n';
+ *cptr = 0;
+ pr_info("%s", line);
+ }
+}
+
+static int vidioc_log_status(struct file *file, void *priv)
+{
+ struct tw68_dev *dev = video_drvdata(file);
+
+ tw68_dump_regs(dev);
+ return v4l2_ctrl_log_status(file, priv);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int vidioc_g_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg)
+{
+ struct tw68_dev *dev = video_drvdata(file);
+
+ if (reg->size == 1)
+ reg->val = tw_readb(reg->reg);
+ else
+ reg->val = tw_readl(reg->reg);
+ return 0;
+}
+
+static int vidioc_s_register(struct file *file, void *priv,
+ const struct v4l2_dbg_register *reg)
+{
+ struct tw68_dev *dev = video_drvdata(file);
+
+ if (reg->size == 1)
+ tw_writeb(reg->reg, reg->val);
+ else
+ tw_writel(reg->reg & 0xffff, reg->val);
+ return 0;
+}
+#endif
+
+static const struct v4l2_ctrl_ops tw68_ctrl_ops = {
+ .s_ctrl = tw68_s_ctrl,
+};
+
+static const struct v4l2_file_operations video_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops video_ioctl_ops = {
+ .vidioc_querycap = tw68_querycap,
+ .vidioc_enum_fmt_vid_cap = tw68_enum_fmt_vid_cap,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_s_std = tw68_s_std,
+ .vidioc_g_std = tw68_g_std,
+ .vidioc_enum_input = tw68_enum_input,
+ .vidioc_g_input = tw68_g_input,
+ .vidioc_s_input = tw68_s_input,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_g_fmt_vid_cap = tw68_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = tw68_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = tw68_s_fmt_vid_cap,
+ .vidioc_log_status = vidioc_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = vidioc_g_register,
+ .vidioc_s_register = vidioc_s_register,
+#endif
+};
+
+static struct video_device tw68_video_template = {
+ .name = "tw68_video",
+ .fops = &video_fops,
+ .ioctl_ops = &video_ioctl_ops,
+ .release = video_device_release_empty,
+ .tvnorms = TW68_NORMS,
+};
+
+/* ------------------------------------------------------------------ */
+/* exported stuff */
+void tw68_set_tvnorm_hw(struct tw68_dev *dev)
+{
+ tw_andorb(TW68_SDT, 0x07, dev->tvnorm->format);
+}
+
+int tw68_video_init1(struct tw68_dev *dev)
+{
+ struct v4l2_ctrl_handler *hdl = &dev->hdl;
+
+ v4l2_ctrl_handler_init(hdl, 6);
+ v4l2_ctrl_new_std(hdl, &tw68_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, -128, 127, 1, 20);
+ v4l2_ctrl_new_std(hdl, &tw68_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 100);
+ v4l2_ctrl_new_std(hdl, &tw68_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 128);
+ /* NTSC only */
+ v4l2_ctrl_new_std(hdl, &tw68_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, &tw68_ctrl_ops,
+ V4L2_CID_COLOR_KILLER, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &tw68_ctrl_ops,
+ V4L2_CID_CHROMA_AGC, 0, 1, 1, 1);
+ if (hdl->error) {
+ v4l2_ctrl_handler_free(hdl);
+ return hdl->error;
+ }
+ dev->v4l2_dev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(hdl);
+ return 0;
+}
+
+int tw68_video_init2(struct tw68_dev *dev, int video_nr)
+{
+ int ret;
+
+ set_tvnorm(dev, &tvnorms[0]);
+
+ dev->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
+ dev->width = 720;
+ dev->height = 576;
+ dev->field = V4L2_FIELD_INTERLACED;
+
+ INIT_LIST_HEAD(&dev->active);
+ dev->vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dev->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ dev->vidq.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ | VB2_DMABUF;
+ dev->vidq.ops = &tw68_video_qops;
+ dev->vidq.mem_ops = &vb2_dma_sg_memops;
+ dev->vidq.drv_priv = dev;
+ dev->vidq.gfp_flags = __GFP_DMA32;
+ dev->vidq.buf_struct_size = sizeof(struct tw68_buf);
+ dev->vidq.lock = &dev->lock;
+ dev->vidq.min_buffers_needed = 2;
+ ret = vb2_queue_init(&dev->vidq);
+ if (ret)
+ return ret;
+ dev->vdev = tw68_video_template;
+ dev->vdev.v4l2_dev = &dev->v4l2_dev;
+ dev->vdev.lock = &dev->lock;
+ dev->vdev.queue = &dev->vidq;
+ video_set_drvdata(&dev->vdev, dev);
+ return video_register_device(&dev->vdev, VFL_TYPE_GRABBER, video_nr);
+}
+
+/*
+ * tw68_irq_video_done
+ */
+void tw68_irq_video_done(struct tw68_dev *dev, unsigned long status)
+{
+ __u32 reg;
+
+ /* reset interrupts handled by this routine */
+ tw_writel(TW68_INTSTAT, status);
+ /*
+ * Check most likely first
+ *
+ * DMAPI shows we have reached the end of the risc code
+ * for the current buffer.
+ */
+ if (status & TW68_DMAPI) {
+ struct tw68_buf *buf;
+
+ spin_lock(&dev->slock);
+ buf = list_entry(dev->active.next, struct tw68_buf, list);
+ list_del(&buf->list);
+ spin_unlock(&dev->slock);
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ buf->vb.v4l2_buf.field = dev->field;
+ buf->vb.v4l2_buf.sequence = dev->seqnr++;
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ status &= ~(TW68_DMAPI);
+ if (0 == status)
+ return;
+ }
+ if (status & (TW68_VLOCK | TW68_HLOCK))
+ dev_dbg(&dev->pci->dev, "Lost sync\n");
+ if (status & TW68_PABORT)
+ dev_err(&dev->pci->dev, "PABORT interrupt\n");
+ if (status & TW68_DMAPERR)
+ dev_err(&dev->pci->dev, "DMAPERR interrupt\n");
+ /*
+ * On TW6800, FDMIS is apparently generated if video input is switched
+ * during operation. Therefore, it is not enabled for that chip.
+ */
+ if (status & TW68_FDMIS)
+ dev_dbg(&dev->pci->dev, "FDMIS interrupt\n");
+ if (status & TW68_FFOF) {
+ /* probably a logic error */
+ reg = tw_readl(TW68_DMAC) & TW68_FIFO_EN;
+ tw_clearl(TW68_DMAC, TW68_FIFO_EN);
+ dev_dbg(&dev->pci->dev, "FFOF interrupt\n");
+ tw_setl(TW68_DMAC, reg);
+ }
+ if (status & TW68_FFERR)
+ dev_dbg(&dev->pci->dev, "FFERR interrupt\n");
+}
diff --git a/drivers/media/pci/tw68/tw68.h b/drivers/media/pci/tw68/tw68.h
new file mode 100644
index 000000000000..2c8abe26b13b
--- /dev/null
+++ b/drivers/media/pci/tw68/tw68.h
@@ -0,0 +1,231 @@
+/*
+ * tw68 driver common header file
+ *
+ * Much of this code is derived from the cx88 and sa7134 drivers, which
+ * were in turn derived from the bt87x driver. The original work was by
+ * Gerd Knorr; more recently the code was enhanced by Mauro Carvalho Chehab,
+ * Hans Verkuil, Andy Walls and many others. Their work is gratefully
+ * acknowledged. Full credit goes to them - any problems within this code
+ * are mine.
+ *
+ * Copyright (C) 2009 William M. Brack
+ *
+ * Refactored and updated to the latest v4l core frameworks:
+ *
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/videodev2.h>
+#include <linux/notifier.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "tw68-reg.h"
+
+#define UNSET (-1U)
+
+/* system vendor and device ID's */
+#define PCI_VENDOR_ID_TECHWELL 0x1797
+#define PCI_DEVICE_ID_6800 0x6800
+#define PCI_DEVICE_ID_6801 0x6801
+#define PCI_DEVICE_ID_AUDIO2 0x6802
+#define PCI_DEVICE_ID_TS3 0x6803
+#define PCI_DEVICE_ID_6804 0x6804
+#define PCI_DEVICE_ID_AUDIO5 0x6805
+#define PCI_DEVICE_ID_TS6 0x6806
+
+/* tw6816 based cards */
+#define PCI_DEVICE_ID_6816_1 0x6810
+#define PCI_DEVICE_ID_6816_2 0x6811
+#define PCI_DEVICE_ID_6816_3 0x6812
+#define PCI_DEVICE_ID_6816_4 0x6813
+
+#define TW68_NORMS ( \
+ V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM | \
+ V4L2_STD_PAL_M | V4L2_STD_PAL_Nc | V4L2_STD_PAL_60)
+
+#define TW68_VID_INTS (TW68_FFERR | TW68_PABORT | TW68_DMAPERR | \
+ TW68_FFOF | TW68_DMAPI)
+/* TW6800 chips have trouble with these, so we don't set them for that chip */
+#define TW68_VID_INTSX (TW68_FDMIS | TW68_HLOCK | TW68_VLOCK)
+
+#define TW68_I2C_INTS (TW68_SBERR | TW68_SBDONE | TW68_SBERR2 | \
+ TW68_SBDONE2)
+
+enum tw68_decoder_type {
+ TW6800,
+ TW6801,
+ TW6804,
+ TWXXXX,
+};
+
+/* ----------------------------------------------------------- */
+/* static data */
+
+struct tw68_tvnorm {
+ char *name;
+ v4l2_std_id id;
+
+ /* video decoder */
+ u32 sync_control;
+ u32 luma_control;
+ u32 chroma_ctrl1;
+ u32 chroma_gain;
+ u32 chroma_ctrl2;
+ u32 vgate_misc;
+
+ /* video scaler */
+ u32 h_delay;
+ u32 h_delay0; /* for TW6800 */
+ u32 h_start;
+ u32 h_stop;
+ u32 v_delay;
+ u32 video_v_start;
+ u32 video_v_stop;
+ u32 vbi_v_start_0;
+ u32 vbi_v_stop_0;
+ u32 vbi_v_start_1;
+
+ /* Techwell specific */
+ u32 format;
+};
+
+struct tw68_format {
+ char *name;
+ u32 fourcc;
+ u32 depth;
+ u32 twformat;
+};
+
+/* ----------------------------------------------------------- */
+/* card configuration */
+
+#define TW68_BOARD_NOAUTO UNSET
+#define TW68_BOARD_UNKNOWN 0
+#define TW68_BOARD_GENERIC_6802 1
+
+#define TW68_MAXBOARDS 16
+#define TW68_INPUT_MAX 4
+
+/* ----------------------------------------------------------- */
+/* device / file handle status */
+
+#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
+
+struct tw68_dev; /* forward delclaration */
+
+/* buffer for one video/vbi/ts frame */
+struct tw68_buf {
+ struct vb2_buffer vb;
+ struct list_head list;
+
+ unsigned int size;
+ __le32 *cpu;
+ __le32 *jmp;
+ dma_addr_t dma;
+};
+
+struct tw68_fmt {
+ char *name;
+ u32 fourcc; /* v4l2 format id */
+ int depth;
+ int flags;
+ u32 twformat;
+};
+
+/* global device status */
+struct tw68_dev {
+ struct mutex lock;
+ spinlock_t slock;
+ u16 instance;
+ struct v4l2_device v4l2_dev;
+
+ /* various device info */
+ enum tw68_decoder_type vdecoder;
+ struct video_device vdev;
+ struct v4l2_ctrl_handler hdl;
+
+ /* pci i/o */
+ char *name;
+ struct pci_dev *pci;
+ unsigned char pci_rev, pci_lat;
+ u32 __iomem *lmmio;
+ u8 __iomem *bmmio;
+ u32 pci_irqmask;
+ /* The irq mask to be used will depend upon the chip type */
+ u32 board_virqmask;
+
+ /* video capture */
+ const struct tw68_format *fmt;
+ unsigned width, height;
+ unsigned seqnr;
+ unsigned field;
+ struct vb2_queue vidq;
+ struct list_head active;
+
+ /* various v4l controls */
+ const struct tw68_tvnorm *tvnorm; /* video */
+
+ int input;
+};
+
+/* ----------------------------------------------------------- */
+
+#define tw_readl(reg) readl(dev->lmmio + ((reg) >> 2))
+#define tw_readb(reg) readb(dev->bmmio + (reg))
+#define tw_writel(reg, value) writel((value), dev->lmmio + ((reg) >> 2))
+#define tw_writeb(reg, value) writeb((value), dev->bmmio + (reg))
+
+#define tw_andorl(reg, mask, value) \
+ writel((readl(dev->lmmio+((reg)>>2)) & ~(mask)) |\
+ ((value) & (mask)), dev->lmmio+((reg)>>2))
+#define tw_andorb(reg, mask, value) \
+ writeb((readb(dev->bmmio + (reg)) & ~(mask)) |\
+ ((value) & (mask)), dev->bmmio+(reg))
+#define tw_setl(reg, bit) tw_andorl((reg), (bit), (bit))
+#define tw_setb(reg, bit) tw_andorb((reg), (bit), (bit))
+#define tw_clearl(reg, bit) \
+ writel((readl(dev->lmmio + ((reg) >> 2)) & ~(bit)), \
+ dev->lmmio + ((reg) >> 2))
+#define tw_clearb(reg, bit) \
+ writeb((readb(dev->bmmio+(reg)) & ~(bit)), \
+ dev->bmmio + (reg))
+
+#define tw_wait(us) { udelay(us); }
+
+/* ----------------------------------------------------------- */
+/* tw68-video.c */
+
+void tw68_set_tvnorm_hw(struct tw68_dev *dev);
+
+int tw68_video_init1(struct tw68_dev *dev);
+int tw68_video_init2(struct tw68_dev *dev, int video_nr);
+void tw68_irq_video_done(struct tw68_dev *dev, unsigned long status);
+int tw68_video_start_dma(struct tw68_dev *dev, struct tw68_buf *buf);
+
+/* ----------------------------------------------------------- */
+/* tw68-risc.c */
+
+int tw68_risc_buffer(struct pci_dev *pci, struct tw68_buf *buf,
+ struct scatterlist *sglist, unsigned int top_offset,
+ unsigned int bottom_offset, unsigned int bpl,
+ unsigned int padding, unsigned int lines);
diff --git a/drivers/media/pci/zoran/zoran_device.c b/drivers/media/pci/zoran/zoran_device.c
index bf34b93f23ee..b6801e035ea4 100644
--- a/drivers/media/pci/zoran/zoran_device.c
+++ b/drivers/media/pci/zoran/zoran_device.c
@@ -682,7 +682,7 @@ set_videobus_dir (struct zoran *zr,
switch (zr->card.type) {
case LML33:
case LML33R10:
- if (lml33dpath == 0)
+ if (!lml33dpath)
GPIO(zr, 5, val);
else
GPIO(zr, 5, 1);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 6d86646d9743..bee9074ebc13 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -56,7 +56,8 @@ config VIDEO_VIU
config VIDEO_TIMBERDALE
tristate "Support for timberdale Video In/LogiWIN"
- depends on MFD_TIMBERDALE && VIDEO_V4L2 && I2C && DMADEVICES
+ depends on VIDEO_V4L2 && I2C && DMADEVICES
+ depends on MFD_TIMBERDALE || COMPILE_TEST
select DMA_ENGINE
select TIMB_DMA
select VIDEO_ADV7180
@@ -74,7 +75,8 @@ config VIDEO_VINO
config VIDEO_M32R_AR
tristate "AR devices"
- depends on M32R && VIDEO_V4L2
+ depends on VIDEO_V4L2
+ depends on M32R || COMPILE_TEST
---help---
This is a video4linux driver for the Renesas AR (Artificial Retina)
camera module.
@@ -94,6 +96,7 @@ config VIDEO_M32R_AR_M64278
config VIDEO_OMAP3
tristate "OMAP 3 Camera support"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3
+ depends on HAS_DMA
select ARM_DMA_USE_IOMMU
select OMAP_IOMMU
select VIDEOBUF2_DMA_CONTIG
@@ -109,7 +112,9 @@ config VIDEO_OMAP3_DEBUG
config VIDEO_S3C_CAMIF
tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
- depends on (ARCH_S3C64XX || PLAT_S3C24XX) && PM_RUNTIME
+ depends on PM_RUNTIME
+ depends on ARCH_S3C64XX || PLAT_S3C24XX || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
---help---
This is a v4l2 driver for s3c24xx and s3c64xx SoC series camera
@@ -140,6 +145,7 @@ if V4L_MEM2MEM_DRIVERS
config VIDEO_CODA
tristate "Chips&Media Coda multi-standard codec IP"
depends on VIDEO_DEV && VIDEO_V4L2 && ARCH_MXC
+ depends on HAS_DMA
select SRAM
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
@@ -151,6 +157,7 @@ config VIDEO_CODA
config VIDEO_MEM2MEM_DEINTERLACE
tristate "Deinterlace support"
depends on VIDEO_DEV && VIDEO_V4L2 && DMA_ENGINE
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
@@ -158,7 +165,9 @@ config VIDEO_MEM2MEM_DEINTERLACE
config VIDEO_SAMSUNG_S5P_G2D
tristate "Samsung S5P and EXYNOS4 G2D 2d graphics accelerator driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && (PLAT_S5P || ARCH_EXYNOS)
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
default n
@@ -168,7 +177,9 @@ config VIDEO_SAMSUNG_S5P_G2D
config VIDEO_SAMSUNG_S5P_JPEG
tristate "Samsung S5P/Exynos3250/Exynos4 JPEG codec driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && (PLAT_S5P || ARCH_EXYNOS)
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
---help---
@@ -177,7 +188,9 @@ config VIDEO_SAMSUNG_S5P_JPEG
config VIDEO_SAMSUNG_S5P_MFC
tristate "Samsung S5P MFC Video Codec"
- depends on VIDEO_DEV && VIDEO_V4L2 && (PLAT_S5P || ARCH_EXYNOS)
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
default n
help
@@ -185,7 +198,9 @@ config VIDEO_SAMSUNG_S5P_MFC
config VIDEO_MX2_EMMAPRP
tristate "MX2 eMMa-PrP support"
- depends on VIDEO_DEV && VIDEO_V4L2 && SOC_IMX27
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on SOC_IMX27 || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
@@ -195,7 +210,9 @@ config VIDEO_MX2_EMMAPRP
config VIDEO_SAMSUNG_EXYNOS_GSC
tristate "Samsung Exynos G-Scaler driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && ARCH_EXYNOS5
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_EXYNOS5 || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
@@ -204,6 +221,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
config VIDEO_SH_VEU
tristate "SuperH VEU mem2mem video processing driver"
depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
@@ -213,6 +231,7 @@ config VIDEO_SH_VEU
config VIDEO_RENESAS_VSP1
tristate "Renesas VSP1 Video Processing Engine"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
+ depends on ARCH_SHMOBILE || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
---help---
This is a V4L2 driver for the Renesas VSP1 video processing engine.
@@ -222,7 +241,9 @@ config VIDEO_RENESAS_VSP1
config VIDEO_TI_VPE
tristate "TI VPE (Video Processing Engine) driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && SOC_DRA7XX
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on SOC_DRA7XX || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
default n
@@ -243,19 +264,8 @@ menuconfig V4L_TEST_DRIVERS
depends on MEDIA_CAMERA_SUPPORT
if V4L_TEST_DRIVERS
-config VIDEO_VIVI
- tristate "Virtual Video Driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64
- select FONT_SUPPORT
- select FONT_8x16
- select VIDEOBUF2_VMALLOC
- default n
- ---help---
- Enables a virtual video driver. This device shows a color bar
- and a timestamp, as a real device would generate by using V4L2
- api.
- Say Y here if you want to test video apps or debug V4L devices.
- In doubt, say N.
+
+source "drivers/media/platform/vivid/Kconfig"
config VIDEO_MEM2MEM_TESTDEV
tristate "Virtual test device for mem2mem framework"
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index e5269da91906..579046bc276f 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -15,14 +15,14 @@ obj-$(CONFIG_VIDEO_MMP_CAMERA) += marvell-ccic/
obj-$(CONFIG_VIDEO_OMAP3) += omap3isp/
obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
-obj-$(CONFIG_VIDEO_VIVI) += vivi.o
+obj-$(CONFIG_VIDEO_VIVID) += vivid/
obj-$(CONFIG_VIDEO_MEM2MEM_TESTDEV) += mem2mem_testdev.o
obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
-obj-$(CONFIG_VIDEO_CODA) += coda.o
+obj-$(CONFIG_VIDEO_CODA) += coda/
obj-$(CONFIG_VIDEO_SH_VEU) += sh_veu.o
@@ -47,8 +47,6 @@ obj-$(CONFIG_SOC_CAMERA) += soc_camera/
obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1/
-obj-y += davinci/
-
-obj-$(CONFIG_ARCH_OMAP) += omap/
+obj-y += omap/
ccflags-y += -I$(srctree)/drivers/media/i2c
diff --git a/drivers/media/platform/blackfin/Kconfig b/drivers/media/platform/blackfin/Kconfig
index cc239972fa2c..68fa90151b8f 100644
--- a/drivers/media/platform/blackfin/Kconfig
+++ b/drivers/media/platform/blackfin/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_BLACKFIN_CAPTURE
tristate "Blackfin Video Capture Driver"
depends on VIDEO_V4L2 && BLACKFIN && I2C
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
help
V4L2 bridge driver for Blackfin video capture device.
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
deleted file mode 100644
index 3a6d1d2b429e..000000000000
--- a/drivers/media/platform/coda.c
+++ /dev/null
@@ -1,3933 +0,0 @@
-/*
- * Coda multi-standard codec IP
- *
- * Copyright (C) 2012 Vista Silicon S.L.
- * Javier Martin, <javier.martin@vista-silicon.com>
- * Xavier Duret
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/clk.h>
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/genalloc.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/kfifo.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/videodev2.h>
-#include <linux/of.h>
-#include <linux/platform_data/coda.h>
-#include <linux/reset.h>
-
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-mem2mem.h>
-#include <media/videobuf2-core.h>
-#include <media/videobuf2-dma-contig.h>
-
-#include "coda.h"
-
-#define CODA_NAME "coda"
-
-#define CODADX6_MAX_INSTANCES 4
-
-#define CODA_PARA_BUF_SIZE (10 * 1024)
-#define CODA_ISRAM_SIZE (2048 * 2)
-
-#define CODA7_PS_BUF_SIZE 0x28000
-#define CODA9_PS_SAVE_SIZE (512 * 1024)
-
-#define CODA_MAX_FRAMEBUFFERS 8
-
-#define CODA_MAX_FRAME_SIZE 0x100000
-#define FMO_SLICE_SAVE_BUF_SIZE (32)
-#define CODA_DEFAULT_GAMMA 4096
-#define CODA9_DEFAULT_GAMMA 24576 /* 0.75 * 32768 */
-
-#define MIN_W 176
-#define MIN_H 144
-
-#define S_ALIGN 1 /* multiple of 2 */
-#define W_ALIGN 1 /* multiple of 2 */
-#define H_ALIGN 1 /* multiple of 2 */
-
-#define fh_to_ctx(__fh) container_of(__fh, struct coda_ctx, fh)
-
-static int coda_debug;
-module_param(coda_debug, int, 0644);
-MODULE_PARM_DESC(coda_debug, "Debug level (0-1)");
-
-enum {
- V4L2_M2M_SRC = 0,
- V4L2_M2M_DST = 1,
-};
-
-enum coda_inst_type {
- CODA_INST_ENCODER,
- CODA_INST_DECODER,
-};
-
-enum coda_product {
- CODA_DX6 = 0xf001,
- CODA_7541 = 0xf012,
- CODA_960 = 0xf020,
-};
-
-struct coda_fmt {
- char *name;
- u32 fourcc;
-};
-
-struct coda_codec {
- u32 mode;
- u32 src_fourcc;
- u32 dst_fourcc;
- u32 max_w;
- u32 max_h;
-};
-
-struct coda_devtype {
- char *firmware;
- enum coda_product product;
- struct coda_codec *codecs;
- unsigned int num_codecs;
- size_t workbuf_size;
- size_t tempbuf_size;
- size_t iram_size;
-};
-
-/* Per-queue, driver-specific private data */
-struct coda_q_data {
- unsigned int width;
- unsigned int height;
- unsigned int bytesperline;
- unsigned int sizeimage;
- unsigned int fourcc;
- struct v4l2_rect rect;
-};
-
-struct coda_aux_buf {
- void *vaddr;
- dma_addr_t paddr;
- u32 size;
- struct debugfs_blob_wrapper blob;
- struct dentry *dentry;
-};
-
-struct coda_dev {
- struct v4l2_device v4l2_dev;
- struct video_device vfd;
- struct platform_device *plat_dev;
- const struct coda_devtype *devtype;
-
- void __iomem *regs_base;
- struct clk *clk_per;
- struct clk *clk_ahb;
- struct reset_control *rstc;
-
- struct coda_aux_buf codebuf;
- struct coda_aux_buf tempbuf;
- struct coda_aux_buf workbuf;
- struct gen_pool *iram_pool;
- struct coda_aux_buf iram;
-
- spinlock_t irqlock;
- struct mutex dev_mutex;
- struct mutex coda_mutex;
- struct workqueue_struct *workqueue;
- struct v4l2_m2m_dev *m2m_dev;
- struct vb2_alloc_ctx *alloc_ctx;
- struct list_head instances;
- unsigned long instance_mask;
- struct dentry *debugfs_root;
-};
-
-struct coda_params {
- u8 rot_mode;
- u8 h264_intra_qp;
- u8 h264_inter_qp;
- u8 h264_min_qp;
- u8 h264_max_qp;
- u8 h264_deblk_enabled;
- u8 h264_deblk_alpha;
- u8 h264_deblk_beta;
- u8 mpeg4_intra_qp;
- u8 mpeg4_inter_qp;
- u8 gop_size;
- int intra_refresh;
- int codec_mode;
- int codec_mode_aux;
- enum v4l2_mpeg_video_multi_slice_mode slice_mode;
- u32 framerate;
- u16 bitrate;
- u32 slice_max_bits;
- u32 slice_max_mb;
-};
-
-struct coda_iram_info {
- u32 axi_sram_use;
- phys_addr_t buf_bit_use;
- phys_addr_t buf_ip_ac_dc_use;
- phys_addr_t buf_dbk_y_use;
- phys_addr_t buf_dbk_c_use;
- phys_addr_t buf_ovl_use;
- phys_addr_t buf_btp_use;
- phys_addr_t search_ram_paddr;
- int search_ram_size;
- int remaining;
- phys_addr_t next_paddr;
-};
-
-struct gdi_tiled_map {
- int xy2ca_map[16];
- int xy2ba_map[16];
- int xy2ra_map[16];
- int rbc2axi_map[32];
- int xy2rbc_config;
- int map_type;
-#define GDI_LINEAR_FRAME_MAP 0
-};
-
-struct coda_timestamp {
- struct list_head list;
- u32 sequence;
- struct v4l2_timecode timecode;
- struct timeval timestamp;
-};
-
-struct coda_ctx {
- struct coda_dev *dev;
- struct mutex buffer_mutex;
- struct list_head list;
- struct work_struct pic_run_work;
- struct work_struct seq_end_work;
- struct completion completion;
- int aborting;
- int initialized;
- int streamon_out;
- int streamon_cap;
- u32 isequence;
- u32 qsequence;
- u32 osequence;
- u32 sequence_offset;
- struct coda_q_data q_data[2];
- enum coda_inst_type inst_type;
- struct coda_codec *codec;
- enum v4l2_colorspace colorspace;
- struct coda_params params;
- struct v4l2_ctrl_handler ctrls;
- struct v4l2_fh fh;
- int gopcounter;
- int runcounter;
- char vpu_header[3][64];
- int vpu_header_size[3];
- struct kfifo bitstream_fifo;
- struct mutex bitstream_mutex;
- struct coda_aux_buf bitstream;
- bool hold;
- struct coda_aux_buf parabuf;
- struct coda_aux_buf psbuf;
- struct coda_aux_buf slicebuf;
- struct coda_aux_buf internal_frames[CODA_MAX_FRAMEBUFFERS];
- u32 frame_types[CODA_MAX_FRAMEBUFFERS];
- struct coda_timestamp frame_timestamps[CODA_MAX_FRAMEBUFFERS];
- u32 frame_errors[CODA_MAX_FRAMEBUFFERS];
- struct list_head timestamp_list;
- struct coda_aux_buf workbuf;
- int num_internal_frames;
- int idx;
- int reg_idx;
- struct coda_iram_info iram_info;
- struct gdi_tiled_map tiled_map;
- u32 bit_stream_param;
- u32 frm_dis_flg;
- u32 frame_mem_ctrl;
- int display_idx;
- struct dentry *debugfs_entry;
-};
-
-static const u8 coda_filler_nal[14] = { 0x00, 0x00, 0x00, 0x01, 0x0c, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x80 };
-static const u8 coda_filler_size[8] = { 0, 7, 14, 13, 12, 11, 10, 9 };
-
-static inline void coda_write(struct coda_dev *dev, u32 data, u32 reg)
-{
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "%s: data=0x%x, reg=0x%x\n", __func__, data, reg);
- writel(data, dev->regs_base + reg);
-}
-
-static inline unsigned int coda_read(struct coda_dev *dev, u32 reg)
-{
- u32 data;
- data = readl(dev->regs_base + reg);
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "%s: data=0x%x, reg=0x%x\n", __func__, data, reg);
- return data;
-}
-
-static inline unsigned long coda_isbusy(struct coda_dev *dev)
-{
- return coda_read(dev, CODA_REG_BIT_BUSY);
-}
-
-static inline int coda_is_initialized(struct coda_dev *dev)
-{
- return (coda_read(dev, CODA_REG_BIT_CUR_PC) != 0);
-}
-
-static int coda_wait_timeout(struct coda_dev *dev)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-
- while (coda_isbusy(dev)) {
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
- }
- return 0;
-}
-
-static void coda_command_async(struct coda_ctx *ctx, int cmd)
-{
- struct coda_dev *dev = ctx->dev;
-
- if (dev->devtype->product == CODA_960 ||
- dev->devtype->product == CODA_7541) {
- /* Restore context related registers to CODA */
- coda_write(dev, ctx->bit_stream_param,
- CODA_REG_BIT_BIT_STREAM_PARAM);
- coda_write(dev, ctx->frm_dis_flg,
- CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
- coda_write(dev, ctx->frame_mem_ctrl,
- CODA_REG_BIT_FRAME_MEM_CTRL);
- coda_write(dev, ctx->workbuf.paddr, CODA_REG_BIT_WORK_BUF_ADDR);
- }
-
- if (dev->devtype->product == CODA_960) {
- coda_write(dev, 1, CODA9_GDI_WPROT_ERR_CLR);
- coda_write(dev, 0, CODA9_GDI_WPROT_RGN_EN);
- }
-
- coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
-
- coda_write(dev, ctx->idx, CODA_REG_BIT_RUN_INDEX);
- coda_write(dev, ctx->params.codec_mode, CODA_REG_BIT_RUN_COD_STD);
- coda_write(dev, ctx->params.codec_mode_aux, CODA7_REG_BIT_RUN_AUX_STD);
-
- coda_write(dev, cmd, CODA_REG_BIT_RUN_COMMAND);
-}
-
-static int coda_command_sync(struct coda_ctx *ctx, int cmd)
-{
- struct coda_dev *dev = ctx->dev;
-
- coda_command_async(ctx, cmd);
- return coda_wait_timeout(dev);
-}
-
-static int coda_hw_reset(struct coda_ctx *ctx)
-{
- struct coda_dev *dev = ctx->dev;
- unsigned long timeout;
- unsigned int idx;
- int ret;
-
- if (!dev->rstc)
- return -ENOENT;
-
- idx = coda_read(dev, CODA_REG_BIT_RUN_INDEX);
-
- timeout = jiffies + msecs_to_jiffies(100);
- coda_write(dev, 0x11, CODA9_GDI_BUS_CTRL);
- while (coda_read(dev, CODA9_GDI_BUS_STATUS) != 0x77) {
- if (time_after(jiffies, timeout))
- return -ETIME;
- cpu_relax();
- }
-
- ret = reset_control_reset(dev->rstc);
- if (ret < 0)
- return ret;
-
- coda_write(dev, 0x00, CODA9_GDI_BUS_CTRL);
- coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
- coda_write(dev, CODA_REG_RUN_ENABLE, CODA_REG_BIT_CODE_RUN);
- ret = coda_wait_timeout(dev);
- coda_write(dev, idx, CODA_REG_BIT_RUN_INDEX);
-
- return ret;
-}
-
-static struct coda_q_data *get_q_data(struct coda_ctx *ctx,
- enum v4l2_buf_type type)
-{
- switch (type) {
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- return &(ctx->q_data[V4L2_M2M_SRC]);
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- return &(ctx->q_data[V4L2_M2M_DST]);
- default:
- return NULL;
- }
-}
-
-/*
- * Array of all formats supported by any version of Coda:
- */
-static struct coda_fmt coda_formats[] = {
- {
- .name = "YUV 4:2:0 Planar, YCbCr",
- .fourcc = V4L2_PIX_FMT_YUV420,
- },
- {
- .name = "YUV 4:2:0 Planar, YCrCb",
- .fourcc = V4L2_PIX_FMT_YVU420,
- },
- {
- .name = "H264 Encoded Stream",
- .fourcc = V4L2_PIX_FMT_H264,
- },
- {
- .name = "MPEG4 Encoded Stream",
- .fourcc = V4L2_PIX_FMT_MPEG4,
- },
-};
-
-#define CODA_CODEC(mode, src_fourcc, dst_fourcc, max_w, max_h) \
- { mode, src_fourcc, dst_fourcc, max_w, max_h }
-
-/*
- * Arrays of codecs supported by each given version of Coda:
- * i.MX27 -> codadx6
- * i.MX5x -> coda7
- * i.MX6 -> coda960
- * Use V4L2_PIX_FMT_YUV420 as placeholder for all supported YUV 4:2:0 variants
- */
-static struct coda_codec codadx6_codecs[] = {
- CODA_CODEC(CODADX6_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 720, 576),
- CODA_CODEC(CODADX6_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 720, 576),
-};
-
-static struct coda_codec coda7_codecs[] = {
- CODA_CODEC(CODA7_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1280, 720),
- CODA_CODEC(CODA7_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1280, 720),
- CODA_CODEC(CODA7_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1080),
- CODA_CODEC(CODA7_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1080),
-};
-
-static struct coda_codec coda9_codecs[] = {
- CODA_CODEC(CODA9_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1920, 1080),
- CODA_CODEC(CODA9_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1920, 1080),
- CODA_CODEC(CODA9_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1080),
- CODA_CODEC(CODA9_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1080),
-};
-
-static bool coda_format_is_yuv(u32 fourcc)
-{
- switch (fourcc) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
- return true;
- default:
- return false;
- }
-}
-
-/*
- * Normalize all supported YUV 4:2:0 formats to the value used in the codec
- * tables.
- */
-static u32 coda_format_normalize_yuv(u32 fourcc)
-{
- return coda_format_is_yuv(fourcc) ? V4L2_PIX_FMT_YUV420 : fourcc;
-}
-
-static struct coda_codec *coda_find_codec(struct coda_dev *dev, int src_fourcc,
- int dst_fourcc)
-{
- struct coda_codec *codecs = dev->devtype->codecs;
- int num_codecs = dev->devtype->num_codecs;
- int k;
-
- src_fourcc = coda_format_normalize_yuv(src_fourcc);
- dst_fourcc = coda_format_normalize_yuv(dst_fourcc);
- if (src_fourcc == dst_fourcc)
- return NULL;
-
- for (k = 0; k < num_codecs; k++) {
- if (codecs[k].src_fourcc == src_fourcc &&
- codecs[k].dst_fourcc == dst_fourcc)
- break;
- }
-
- if (k == num_codecs)
- return NULL;
-
- return &codecs[k];
-}
-
-static void coda_get_max_dimensions(struct coda_dev *dev,
- struct coda_codec *codec,
- int *max_w, int *max_h)
-{
- struct coda_codec *codecs = dev->devtype->codecs;
- int num_codecs = dev->devtype->num_codecs;
- unsigned int w, h;
- int k;
-
- if (codec) {
- w = codec->max_w;
- h = codec->max_h;
- } else {
- for (k = 0, w = 0, h = 0; k < num_codecs; k++) {
- w = max(w, codecs[k].max_w);
- h = max(h, codecs[k].max_h);
- }
- }
-
- if (max_w)
- *max_w = w;
- if (max_h)
- *max_h = h;
-}
-
-static char *coda_product_name(int product)
-{
- static char buf[9];
-
- switch (product) {
- case CODA_DX6:
- return "CodaDx6";
- case CODA_7541:
- return "CODA7541";
- case CODA_960:
- return "CODA960";
- default:
- snprintf(buf, sizeof(buf), "(0x%04x)", product);
- return buf;
- }
-}
-
-/*
- * V4L2 ioctl() operations.
- */
-static int coda_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct coda_ctx *ctx = fh_to_ctx(priv);
-
- strlcpy(cap->driver, CODA_NAME, sizeof(cap->driver));
- strlcpy(cap->card, coda_product_name(ctx->dev->devtype->product),
- sizeof(cap->card));
- strlcpy(cap->bus_info, "platform:" CODA_NAME, sizeof(cap->bus_info));
- /*
- * This is only a mem-to-mem video device. The capture and output
- * device capability flags are left only for backward compatibility
- * and are scheduled for removal.
- */
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
- V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
- return 0;
-}
-
-static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
- enum v4l2_buf_type type, int src_fourcc)
-{
- struct coda_ctx *ctx = fh_to_ctx(priv);
- struct coda_codec *codecs = ctx->dev->devtype->codecs;
- struct coda_fmt *formats = coda_formats;
- struct coda_fmt *fmt;
- int num_codecs = ctx->dev->devtype->num_codecs;
- int num_formats = ARRAY_SIZE(coda_formats);
- int i, k, num = 0;
-
- for (i = 0; i < num_formats; i++) {
- /* Both uncompressed formats are always supported */
- if (coda_format_is_yuv(formats[i].fourcc) &&
- !coda_format_is_yuv(src_fourcc)) {
- if (num == f->index)
- break;
- ++num;
- continue;
- }
- /* Compressed formats may be supported, check the codec list */
- for (k = 0; k < num_codecs; k++) {
- /* if src_fourcc is set, only consider matching codecs */
- if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- formats[i].fourcc == codecs[k].dst_fourcc &&
- (!src_fourcc || src_fourcc == codecs[k].src_fourcc))
- break;
- if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- formats[i].fourcc == codecs[k].src_fourcc)
- break;
- }
- if (k < num_codecs) {
- if (num == f->index)
- break;
- ++num;
- }
- }
-
- if (i < num_formats) {
- fmt = &formats[i];
- strlcpy(f->description, fmt->name, sizeof(f->description));
- f->pixelformat = fmt->fourcc;
- if (!coda_format_is_yuv(fmt->fourcc))
- f->flags |= V4L2_FMT_FLAG_COMPRESSED;
- return 0;
- }
-
- /* Format not found */
- return -EINVAL;
-}
-
-static int coda_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- struct coda_ctx *ctx = fh_to_ctx(priv);
- struct vb2_queue *src_vq;
- struct coda_q_data *q_data_src;
-
- /* If the source format is already fixed, only list matching formats */
- src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- if (vb2_is_streaming(src_vq)) {
- q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
-
- return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE,
- q_data_src->fourcc);
- }
-
- return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0);
-}
-
-static int coda_enum_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_OUTPUT, 0);
-}
-
-static int coda_g_fmt(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct coda_q_data *q_data;
- struct coda_ctx *ctx = fh_to_ctx(priv);
-
- q_data = get_q_data(ctx, f->type);
- if (!q_data)
- return -EINVAL;
-
- f->fmt.pix.field = V4L2_FIELD_NONE;
- f->fmt.pix.pixelformat = q_data->fourcc;
- f->fmt.pix.width = q_data->width;
- f->fmt.pix.height = q_data->height;
- f->fmt.pix.bytesperline = q_data->bytesperline;
-
- f->fmt.pix.sizeimage = q_data->sizeimage;
- f->fmt.pix.colorspace = ctx->colorspace;
-
- return 0;
-}
-
-static int coda_try_fmt(struct coda_ctx *ctx, struct coda_codec *codec,
- struct v4l2_format *f)
-{
- struct coda_dev *dev = ctx->dev;
- struct coda_q_data *q_data;
- unsigned int max_w, max_h;
- enum v4l2_field field;
-
- field = f->fmt.pix.field;
- if (field == V4L2_FIELD_ANY)
- field = V4L2_FIELD_NONE;
- else if (V4L2_FIELD_NONE != field)
- return -EINVAL;
-
- /* V4L2 specification suggests the driver corrects the format struct
- * if any of the dimensions is unsupported */
- f->fmt.pix.field = field;
-
- coda_get_max_dimensions(dev, codec, &max_w, &max_h);
- v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w, W_ALIGN,
- &f->fmt.pix.height, MIN_H, max_h, H_ALIGN,
- S_ALIGN);
-
- switch (f->fmt.pix.pixelformat) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
- case V4L2_PIX_FMT_H264:
- case V4L2_PIX_FMT_MPEG4:
- case V4L2_PIX_FMT_JPEG:
- break;
- default:
- q_data = get_q_data(ctx, f->type);
- if (!q_data)
- return -EINVAL;
- f->fmt.pix.pixelformat = q_data->fourcc;
- }
-
- switch (f->fmt.pix.pixelformat) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
- /* Frame stride must be multiple of 8, but 16 for h.264 */
- f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
- f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
- f->fmt.pix.height * 3 / 2;
- break;
- case V4L2_PIX_FMT_H264:
- case V4L2_PIX_FMT_MPEG4:
- case V4L2_PIX_FMT_JPEG:
- f->fmt.pix.bytesperline = 0;
- f->fmt.pix.sizeimage = CODA_MAX_FRAME_SIZE;
- break;
- default:
- BUG();
- }
-
- return 0;
-}
-
-static int coda_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct coda_ctx *ctx = fh_to_ctx(priv);
- struct coda_codec *codec;
- struct vb2_queue *src_vq;
- int ret;
-
- /*
- * If the source format is already fixed, try to find a codec that
- * converts to the given destination format
- */
- src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- if (vb2_is_streaming(src_vq)) {
- struct coda_q_data *q_data_src;
-
- q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
- f->fmt.pix.pixelformat);
- if (!codec)
- return -EINVAL;
- } else {
- /* Otherwise determine codec by encoded format, if possible */
- codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420,
- f->fmt.pix.pixelformat);
- }
-
- f->fmt.pix.colorspace = ctx->colorspace;
-
- ret = coda_try_fmt(ctx, codec, f);
- if (ret < 0)
- return ret;
-
- /* The h.264 decoder only returns complete 16x16 macroblocks */
- if (codec && codec->src_fourcc == V4L2_PIX_FMT_H264) {
- f->fmt.pix.width = f->fmt.pix.width;
- f->fmt.pix.height = round_up(f->fmt.pix.height, 16);
- f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
- f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
- f->fmt.pix.height * 3 / 2;
- }
-
- return 0;
-}
-
-static int coda_try_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct coda_ctx *ctx = fh_to_ctx(priv);
- struct coda_codec *codec;
-
- /* Determine codec by encoded format, returns NULL if raw or invalid */
- codec = coda_find_codec(ctx->dev, f->fmt.pix.pixelformat,
- V4L2_PIX_FMT_YUV420);
-
- if (!f->fmt.pix.colorspace)
- f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
-
- return coda_try_fmt(ctx, codec, f);
-}
-
-static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
-{
- struct coda_q_data *q_data;
- struct vb2_queue *vq;
-
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
- if (!vq)
- return -EINVAL;
-
- q_data = get_q_data(ctx, f->type);
- if (!q_data)
- return -EINVAL;
-
- if (vb2_is_busy(vq)) {
- v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
- return -EBUSY;
- }
-
- q_data->fourcc = f->fmt.pix.pixelformat;
- q_data->width = f->fmt.pix.width;
- q_data->height = f->fmt.pix.height;
- q_data->bytesperline = f->fmt.pix.bytesperline;
- q_data->sizeimage = f->fmt.pix.sizeimage;
- q_data->rect.left = 0;
- q_data->rect.top = 0;
- q_data->rect.width = f->fmt.pix.width;
- q_data->rect.height = f->fmt.pix.height;
-
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "Setting format for type %d, wxh: %dx%d, fmt: %d\n",
- f->type, q_data->width, q_data->height, q_data->fourcc);
-
- return 0;
-}
-
-static int coda_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct coda_ctx *ctx = fh_to_ctx(priv);
- int ret;
-
- ret = coda_try_fmt_vid_cap(file, priv, f);
- if (ret)
- return ret;
-
- return coda_s_fmt(ctx, f);
-}
-
-static int coda_s_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct coda_ctx *ctx = fh_to_ctx(priv);
- int ret;
-
- ret = coda_try_fmt_vid_out(file, priv, f);
- if (ret)
- return ret;
-
- ret = coda_s_fmt(ctx, f);
- if (ret)
- ctx->colorspace = f->fmt.pix.colorspace;
-
- return ret;
-}
-
-static int coda_qbuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct coda_ctx *ctx = fh_to_ctx(priv);
-
- return v4l2_m2m_qbuf(file, ctx->fh.m2m_ctx, buf);
-}
-
-static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx,
- struct v4l2_buffer *buf)
-{
- struct vb2_queue *src_vq;
-
- src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
-
- return ((ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) &&
- (buf->sequence == (ctx->qsequence - 1)));
-}
-
-static int coda_dqbuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct coda_ctx *ctx = fh_to_ctx(priv);
- int ret;
-
- ret = v4l2_m2m_dqbuf(file, ctx->fh.m2m_ctx, buf);
-
- /* If this is the last capture buffer, emit an end-of-stream event */
- if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- coda_buf_is_end_of_stream(ctx, buf)) {
- const struct v4l2_event eos_event = {
- .type = V4L2_EVENT_EOS
- };
-
- v4l2_event_queue_fh(&ctx->fh, &eos_event);
- }
-
- return ret;
-}
-
-static int coda_g_selection(struct file *file, void *fh,
- struct v4l2_selection *s)
-{
- struct coda_ctx *ctx = fh_to_ctx(fh);
- struct coda_q_data *q_data;
- struct v4l2_rect r, *rsel;
-
- q_data = get_q_data(ctx, s->type);
- if (!q_data)
- return -EINVAL;
-
- r.left = 0;
- r.top = 0;
- r.width = q_data->width;
- r.height = q_data->height;
- rsel = &q_data->rect;
-
- switch (s->target) {
- case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_CROP_BOUNDS:
- rsel = &r;
- /* fallthrough */
- case V4L2_SEL_TGT_CROP:
- if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
- return -EINVAL;
- break;
- case V4L2_SEL_TGT_COMPOSE_BOUNDS:
- case V4L2_SEL_TGT_COMPOSE_PADDED:
- rsel = &r;
- /* fallthrough */
- case V4L2_SEL_TGT_COMPOSE:
- case V4L2_SEL_TGT_COMPOSE_DEFAULT:
- if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- s->r = *rsel;
-
- return 0;
-}
-
-static int coda_try_decoder_cmd(struct file *file, void *fh,
- struct v4l2_decoder_cmd *dc)
-{
- if (dc->cmd != V4L2_DEC_CMD_STOP)
- return -EINVAL;
-
- if (dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
- return -EINVAL;
-
- if (!(dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) && (dc->stop.pts != 0))
- return -EINVAL;
-
- return 0;
-}
-
-static int coda_decoder_cmd(struct file *file, void *fh,
- struct v4l2_decoder_cmd *dc)
-{
- struct coda_ctx *ctx = fh_to_ctx(fh);
- struct coda_dev *dev = ctx->dev;
- int ret;
-
- ret = coda_try_decoder_cmd(file, fh, dc);
- if (ret < 0)
- return ret;
-
- /* Ignore decoder stop command silently in encoder context */
- if (ctx->inst_type != CODA_INST_DECODER)
- return 0;
-
- /* Set the strem-end flag on this context */
- ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
-
- if ((dev->devtype->product == CODA_960) &&
- coda_isbusy(dev) &&
- (ctx->idx == coda_read(dev, CODA_REG_BIT_RUN_INDEX))) {
- /* If this context is currently running, update the hardware flag */
- coda_write(dev, ctx->bit_stream_param, CODA_REG_BIT_BIT_STREAM_PARAM);
- }
- ctx->hold = false;
- v4l2_m2m_try_schedule(ctx->fh.m2m_ctx);
-
- return 0;
-}
-
-static int coda_subscribe_event(struct v4l2_fh *fh,
- const struct v4l2_event_subscription *sub)
-{
- switch (sub->type) {
- case V4L2_EVENT_EOS:
- return v4l2_event_subscribe(fh, sub, 0, NULL);
- default:
- return v4l2_ctrl_subscribe_event(fh, sub);
- }
-}
-
-static const struct v4l2_ioctl_ops coda_ioctl_ops = {
- .vidioc_querycap = coda_querycap,
-
- .vidioc_enum_fmt_vid_cap = coda_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = coda_g_fmt,
- .vidioc_try_fmt_vid_cap = coda_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = coda_s_fmt_vid_cap,
-
- .vidioc_enum_fmt_vid_out = coda_enum_fmt_vid_out,
- .vidioc_g_fmt_vid_out = coda_g_fmt,
- .vidioc_try_fmt_vid_out = coda_try_fmt_vid_out,
- .vidioc_s_fmt_vid_out = coda_s_fmt_vid_out,
-
- .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
- .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
-
- .vidioc_qbuf = coda_qbuf,
- .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
- .vidioc_dqbuf = coda_dqbuf,
- .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
-
- .vidioc_streamon = v4l2_m2m_ioctl_streamon,
- .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
-
- .vidioc_g_selection = coda_g_selection,
-
- .vidioc_try_decoder_cmd = coda_try_decoder_cmd,
- .vidioc_decoder_cmd = coda_decoder_cmd,
-
- .vidioc_subscribe_event = coda_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-static int coda_start_decoding(struct coda_ctx *ctx);
-
-static inline int coda_get_bitstream_payload(struct coda_ctx *ctx)
-{
- return kfifo_len(&ctx->bitstream_fifo);
-}
-
-static void coda_kfifo_sync_from_device(struct coda_ctx *ctx)
-{
- struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
- struct coda_dev *dev = ctx->dev;
- u32 rd_ptr;
-
- rd_ptr = coda_read(dev, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
- kfifo->out = (kfifo->in & ~kfifo->mask) |
- (rd_ptr - ctx->bitstream.paddr);
- if (kfifo->out > kfifo->in)
- kfifo->out -= kfifo->mask + 1;
-}
-
-static void coda_kfifo_sync_to_device_full(struct coda_ctx *ctx)
-{
- struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
- struct coda_dev *dev = ctx->dev;
- u32 rd_ptr, wr_ptr;
-
- rd_ptr = ctx->bitstream.paddr + (kfifo->out & kfifo->mask);
- coda_write(dev, rd_ptr, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
- wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
- coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
-}
-
-static void coda_kfifo_sync_to_device_write(struct coda_ctx *ctx)
-{
- struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
- struct coda_dev *dev = ctx->dev;
- u32 wr_ptr;
-
- wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
- coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
-}
-
-static int coda_bitstream_queue(struct coda_ctx *ctx, struct vb2_buffer *src_buf)
-{
- u32 src_size = vb2_get_plane_payload(src_buf, 0);
- u32 n;
-
- n = kfifo_in(&ctx->bitstream_fifo, vb2_plane_vaddr(src_buf, 0), src_size);
- if (n < src_size)
- return -ENOSPC;
-
- dma_sync_single_for_device(&ctx->dev->plat_dev->dev, ctx->bitstream.paddr,
- ctx->bitstream.size, DMA_TO_DEVICE);
-
- src_buf->v4l2_buf.sequence = ctx->qsequence++;
-
- return 0;
-}
-
-static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
- struct vb2_buffer *src_buf)
-{
- int ret;
-
- if (coda_get_bitstream_payload(ctx) +
- vb2_get_plane_payload(src_buf, 0) + 512 >= ctx->bitstream.size)
- return false;
-
- if (vb2_plane_vaddr(src_buf, 0) == NULL) {
- v4l2_err(&ctx->dev->v4l2_dev, "trying to queue empty buffer\n");
- return true;
- }
-
- ret = coda_bitstream_queue(ctx, src_buf);
- if (ret < 0) {
- v4l2_err(&ctx->dev->v4l2_dev, "bitstream buffer overflow\n");
- return false;
- }
- /* Sync read pointer to device */
- if (ctx == v4l2_m2m_get_curr_priv(ctx->dev->m2m_dev))
- coda_kfifo_sync_to_device_write(ctx);
-
- ctx->hold = false;
-
- return true;
-}
-
-static void coda_fill_bitstream(struct coda_ctx *ctx)
-{
- struct vb2_buffer *src_buf;
- struct coda_timestamp *ts;
-
- while (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) {
- src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
-
- if (coda_bitstream_try_queue(ctx, src_buf)) {
- /*
- * Source buffer is queued in the bitstream ringbuffer;
- * queue the timestamp and mark source buffer as done
- */
- src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
-
- ts = kmalloc(sizeof(*ts), GFP_KERNEL);
- if (ts) {
- ts->sequence = src_buf->v4l2_buf.sequence;
- ts->timecode = src_buf->v4l2_buf.timecode;
- ts->timestamp = src_buf->v4l2_buf.timestamp;
- list_add_tail(&ts->list, &ctx->timestamp_list);
- }
-
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
- } else {
- break;
- }
- }
-}
-
-static void coda_set_gdi_regs(struct coda_ctx *ctx)
-{
- struct gdi_tiled_map *tiled_map = &ctx->tiled_map;
- struct coda_dev *dev = ctx->dev;
- int i;
-
- for (i = 0; i < 16; i++)
- coda_write(dev, tiled_map->xy2ca_map[i],
- CODA9_GDI_XY2_CAS_0 + 4 * i);
- for (i = 0; i < 4; i++)
- coda_write(dev, tiled_map->xy2ba_map[i],
- CODA9_GDI_XY2_BA_0 + 4 * i);
- for (i = 0; i < 16; i++)
- coda_write(dev, tiled_map->xy2ra_map[i],
- CODA9_GDI_XY2_RAS_0 + 4 * i);
- coda_write(dev, tiled_map->xy2rbc_config, CODA9_GDI_XY2_RBC_CONFIG);
- for (i = 0; i < 32; i++)
- coda_write(dev, tiled_map->rbc2axi_map[i],
- CODA9_GDI_RBC2_AXI_0 + 4 * i);
-}
-
-/*
- * Mem-to-mem operations.
- */
-static int coda_prepare_decode(struct coda_ctx *ctx)
-{
- struct vb2_buffer *dst_buf;
- struct coda_dev *dev = ctx->dev;
- struct coda_q_data *q_data_dst;
- u32 stridey, height;
- u32 picture_y, picture_cb, picture_cr;
-
- dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
-
- if (ctx->params.rot_mode & CODA_ROT_90) {
- stridey = q_data_dst->height;
- height = q_data_dst->width;
- } else {
- stridey = q_data_dst->width;
- height = q_data_dst->height;
- }
-
- /* Try to copy source buffer contents into the bitstream ringbuffer */
- mutex_lock(&ctx->bitstream_mutex);
- coda_fill_bitstream(ctx);
- mutex_unlock(&ctx->bitstream_mutex);
-
- if (coda_get_bitstream_payload(ctx) < 512 &&
- (!(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "bitstream payload: %d, skipping\n",
- coda_get_bitstream_payload(ctx));
- v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
- return -EAGAIN;
- }
-
- /* Run coda_start_decoding (again) if not yet initialized */
- if (!ctx->initialized) {
- int ret = coda_start_decoding(ctx);
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev, "failed to start decoding\n");
- v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
- return -EAGAIN;
- } else {
- ctx->initialized = 1;
- }
- }
-
- if (dev->devtype->product == CODA_960)
- coda_set_gdi_regs(ctx);
-
- /* Set rotator output */
- picture_y = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
- if (q_data_dst->fourcc == V4L2_PIX_FMT_YVU420) {
- /* Switch Cr and Cb for YVU420 format */
- picture_cr = picture_y + stridey * height;
- picture_cb = picture_cr + stridey / 2 * height / 2;
- } else {
- picture_cb = picture_y + stridey * height;
- picture_cr = picture_cb + stridey / 2 * height / 2;
- }
-
- if (dev->devtype->product == CODA_960) {
- /*
- * The CODA960 seems to have an internal list of buffers with
- * 64 entries that includes the registered frame buffers as
- * well as the rotator buffer output.
- * ROT_INDEX needs to be < 0x40, but > ctx->num_internal_frames.
- */
- coda_write(dev, CODA_MAX_FRAMEBUFFERS + dst_buf->v4l2_buf.index,
- CODA9_CMD_DEC_PIC_ROT_INDEX);
- coda_write(dev, picture_y, CODA9_CMD_DEC_PIC_ROT_ADDR_Y);
- coda_write(dev, picture_cb, CODA9_CMD_DEC_PIC_ROT_ADDR_CB);
- coda_write(dev, picture_cr, CODA9_CMD_DEC_PIC_ROT_ADDR_CR);
- coda_write(dev, stridey, CODA9_CMD_DEC_PIC_ROT_STRIDE);
- } else {
- coda_write(dev, picture_y, CODA_CMD_DEC_PIC_ROT_ADDR_Y);
- coda_write(dev, picture_cb, CODA_CMD_DEC_PIC_ROT_ADDR_CB);
- coda_write(dev, picture_cr, CODA_CMD_DEC_PIC_ROT_ADDR_CR);
- coda_write(dev, stridey, CODA_CMD_DEC_PIC_ROT_STRIDE);
- }
- coda_write(dev, CODA_ROT_MIR_ENABLE | ctx->params.rot_mode,
- CODA_CMD_DEC_PIC_ROT_MODE);
-
- switch (dev->devtype->product) {
- case CODA_DX6:
- /* TBD */
- case CODA_7541:
- coda_write(dev, CODA_PRE_SCAN_EN, CODA_CMD_DEC_PIC_OPTION);
- break;
- case CODA_960:
- coda_write(dev, (1 << 10), CODA_CMD_DEC_PIC_OPTION); /* 'hardcode to use interrupt disable mode'? */
- break;
- }
-
- coda_write(dev, 0, CODA_CMD_DEC_PIC_SKIP_NUM);
-
- coda_write(dev, 0, CODA_CMD_DEC_PIC_BB_START);
- coda_write(dev, 0, CODA_CMD_DEC_PIC_START_BYTE);
-
- return 0;
-}
-
-static void coda_prepare_encode(struct coda_ctx *ctx)
-{
- struct coda_q_data *q_data_src, *q_data_dst;
- struct vb2_buffer *src_buf, *dst_buf;
- struct coda_dev *dev = ctx->dev;
- int force_ipicture;
- int quant_param = 0;
- u32 picture_y, picture_cb, picture_cr;
- u32 pic_stream_buffer_addr, pic_stream_buffer_size;
- u32 dst_fourcc;
-
- src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- dst_fourcc = q_data_dst->fourcc;
-
- src_buf->v4l2_buf.sequence = ctx->osequence;
- dst_buf->v4l2_buf.sequence = ctx->osequence;
- ctx->osequence++;
-
- /*
- * Workaround coda firmware BUG that only marks the first
- * frame as IDR. This is a problem for some decoders that can't
- * recover when a frame is lost.
- */
- if (src_buf->v4l2_buf.sequence % ctx->params.gop_size) {
- src_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
- src_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
- } else {
- src_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
- src_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
- }
-
- if (dev->devtype->product == CODA_960)
- coda_set_gdi_regs(ctx);
-
- /*
- * Copy headers at the beginning of the first frame for H.264 only.
- * In MPEG4 they are already copied by the coda.
- */
- if (src_buf->v4l2_buf.sequence == 0) {
- pic_stream_buffer_addr =
- vb2_dma_contig_plane_dma_addr(dst_buf, 0) +
- ctx->vpu_header_size[0] +
- ctx->vpu_header_size[1] +
- ctx->vpu_header_size[2];
- pic_stream_buffer_size = CODA_MAX_FRAME_SIZE -
- ctx->vpu_header_size[0] -
- ctx->vpu_header_size[1] -
- ctx->vpu_header_size[2];
- memcpy(vb2_plane_vaddr(dst_buf, 0),
- &ctx->vpu_header[0][0], ctx->vpu_header_size[0]);
- memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0],
- &ctx->vpu_header[1][0], ctx->vpu_header_size[1]);
- memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0] +
- ctx->vpu_header_size[1], &ctx->vpu_header[2][0],
- ctx->vpu_header_size[2]);
- } else {
- pic_stream_buffer_addr =
- vb2_dma_contig_plane_dma_addr(dst_buf, 0);
- pic_stream_buffer_size = CODA_MAX_FRAME_SIZE;
- }
-
- if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) {
- force_ipicture = 1;
- switch (dst_fourcc) {
- case V4L2_PIX_FMT_H264:
- quant_param = ctx->params.h264_intra_qp;
- break;
- case V4L2_PIX_FMT_MPEG4:
- quant_param = ctx->params.mpeg4_intra_qp;
- break;
- default:
- v4l2_warn(&ctx->dev->v4l2_dev,
- "cannot set intra qp, fmt not supported\n");
- break;
- }
- } else {
- force_ipicture = 0;
- switch (dst_fourcc) {
- case V4L2_PIX_FMT_H264:
- quant_param = ctx->params.h264_inter_qp;
- break;
- case V4L2_PIX_FMT_MPEG4:
- quant_param = ctx->params.mpeg4_inter_qp;
- break;
- default:
- v4l2_warn(&ctx->dev->v4l2_dev,
- "cannot set inter qp, fmt not supported\n");
- break;
- }
- }
-
- /* submit */
- coda_write(dev, CODA_ROT_MIR_ENABLE | ctx->params.rot_mode, CODA_CMD_ENC_PIC_ROT_MODE);
- coda_write(dev, quant_param, CODA_CMD_ENC_PIC_QS);
-
-
- picture_y = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- switch (q_data_src->fourcc) {
- case V4L2_PIX_FMT_YVU420:
- /* Switch Cb and Cr for YVU420 format */
- picture_cr = picture_y + q_data_src->bytesperline *
- q_data_src->height;
- picture_cb = picture_cr + q_data_src->bytesperline / 2 *
- q_data_src->height / 2;
- break;
- case V4L2_PIX_FMT_YUV420:
- default:
- picture_cb = picture_y + q_data_src->bytesperline *
- q_data_src->height;
- picture_cr = picture_cb + q_data_src->bytesperline / 2 *
- q_data_src->height / 2;
- break;
- }
-
- if (dev->devtype->product == CODA_960) {
- coda_write(dev, 4/*FIXME: 0*/, CODA9_CMD_ENC_PIC_SRC_INDEX);
- coda_write(dev, q_data_src->width, CODA9_CMD_ENC_PIC_SRC_STRIDE);
- coda_write(dev, 0, CODA9_CMD_ENC_PIC_SUB_FRAME_SYNC);
-
- coda_write(dev, picture_y, CODA9_CMD_ENC_PIC_SRC_ADDR_Y);
- coda_write(dev, picture_cb, CODA9_CMD_ENC_PIC_SRC_ADDR_CB);
- coda_write(dev, picture_cr, CODA9_CMD_ENC_PIC_SRC_ADDR_CR);
- } else {
- coda_write(dev, picture_y, CODA_CMD_ENC_PIC_SRC_ADDR_Y);
- coda_write(dev, picture_cb, CODA_CMD_ENC_PIC_SRC_ADDR_CB);
- coda_write(dev, picture_cr, CODA_CMD_ENC_PIC_SRC_ADDR_CR);
- }
- coda_write(dev, force_ipicture << 1 & 0x2,
- CODA_CMD_ENC_PIC_OPTION);
-
- coda_write(dev, pic_stream_buffer_addr, CODA_CMD_ENC_PIC_BB_START);
- coda_write(dev, pic_stream_buffer_size / 1024,
- CODA_CMD_ENC_PIC_BB_SIZE);
-
- if (!ctx->streamon_out) {
- /* After streamoff on the output side, set the stream end flag */
- ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
- coda_write(dev, ctx->bit_stream_param, CODA_REG_BIT_BIT_STREAM_PARAM);
- }
-}
-
-static void coda_device_run(void *m2m_priv)
-{
- struct coda_ctx *ctx = m2m_priv;
- struct coda_dev *dev = ctx->dev;
-
- queue_work(dev->workqueue, &ctx->pic_run_work);
-}
-
-static void coda_free_framebuffers(struct coda_ctx *ctx);
-static void coda_free_context_buffers(struct coda_ctx *ctx);
-
-static void coda_seq_end_work(struct work_struct *work)
-{
- struct coda_ctx *ctx = container_of(work, struct coda_ctx, seq_end_work);
- struct coda_dev *dev = ctx->dev;
-
- mutex_lock(&ctx->buffer_mutex);
- mutex_lock(&dev->coda_mutex);
-
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "%d: %s: sent command 'SEQ_END' to coda\n", ctx->idx, __func__);
- if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) {
- v4l2_err(&dev->v4l2_dev,
- "CODA_COMMAND_SEQ_END failed\n");
- }
-
- kfifo_init(&ctx->bitstream_fifo,
- ctx->bitstream.vaddr, ctx->bitstream.size);
-
- coda_free_framebuffers(ctx);
- coda_free_context_buffers(ctx);
-
- mutex_unlock(&dev->coda_mutex);
- mutex_unlock(&ctx->buffer_mutex);
-}
-
-static void coda_finish_decode(struct coda_ctx *ctx);
-static void coda_finish_encode(struct coda_ctx *ctx);
-
-static void coda_pic_run_work(struct work_struct *work)
-{
- struct coda_ctx *ctx = container_of(work, struct coda_ctx, pic_run_work);
- struct coda_dev *dev = ctx->dev;
- int ret;
-
- mutex_lock(&ctx->buffer_mutex);
- mutex_lock(&dev->coda_mutex);
-
- if (ctx->inst_type == CODA_INST_DECODER) {
- ret = coda_prepare_decode(ctx);
- if (ret < 0) {
- mutex_unlock(&dev->coda_mutex);
- mutex_unlock(&ctx->buffer_mutex);
- /* job_finish scheduled by prepare_decode */
- return;
- }
- } else {
- coda_prepare_encode(ctx);
- }
-
- if (dev->devtype->product != CODA_DX6)
- coda_write(dev, ctx->iram_info.axi_sram_use,
- CODA7_REG_BIT_AXI_SRAM_USE);
-
- if (ctx->inst_type == CODA_INST_DECODER)
- coda_kfifo_sync_to_device_full(ctx);
- coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
-
- if (!wait_for_completion_timeout(&ctx->completion, msecs_to_jiffies(1000))) {
- dev_err(&dev->plat_dev->dev, "CODA PIC_RUN timeout\n");
-
- ctx->hold = true;
-
- coda_hw_reset(ctx);
- } else if (!ctx->aborting) {
- if (ctx->inst_type == CODA_INST_DECODER)
- coda_finish_decode(ctx);
- else
- coda_finish_encode(ctx);
- }
-
- if (ctx->aborting || (!ctx->streamon_cap && !ctx->streamon_out))
- queue_work(dev->workqueue, &ctx->seq_end_work);
-
- mutex_unlock(&dev->coda_mutex);
- mutex_unlock(&ctx->buffer_mutex);
-
- v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
-}
-
-static int coda_job_ready(void *m2m_priv)
-{
- struct coda_ctx *ctx = m2m_priv;
-
- /*
- * For both 'P' and 'key' frame cases 1 picture
- * and 1 frame are needed. In the decoder case,
- * the compressed frame can be in the bitstream.
- */
- if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) &&
- ctx->inst_type != CODA_INST_DECODER) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "not ready: not enough video buffers.\n");
- return 0;
- }
-
- if (!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "not ready: not enough video capture buffers.\n");
- return 0;
- }
-
- if (ctx->hold ||
- ((ctx->inst_type == CODA_INST_DECODER) &&
- (coda_get_bitstream_payload(ctx) < 512) &&
- !(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "%d: not ready: not enough bitstream data.\n",
- ctx->idx);
- return 0;
- }
-
- if (ctx->aborting) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "not ready: aborting\n");
- return 0;
- }
-
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "job ready\n");
- return 1;
-}
-
-static void coda_job_abort(void *priv)
-{
- struct coda_ctx *ctx = priv;
-
- ctx->aborting = 1;
-
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "Aborting task\n");
-}
-
-static void coda_lock(void *m2m_priv)
-{
- struct coda_ctx *ctx = m2m_priv;
- struct coda_dev *pcdev = ctx->dev;
- mutex_lock(&pcdev->dev_mutex);
-}
-
-static void coda_unlock(void *m2m_priv)
-{
- struct coda_ctx *ctx = m2m_priv;
- struct coda_dev *pcdev = ctx->dev;
- mutex_unlock(&pcdev->dev_mutex);
-}
-
-static struct v4l2_m2m_ops coda_m2m_ops = {
- .device_run = coda_device_run,
- .job_ready = coda_job_ready,
- .job_abort = coda_job_abort,
- .lock = coda_lock,
- .unlock = coda_unlock,
-};
-
-static void coda_set_tiled_map_type(struct coda_ctx *ctx, int tiled_map_type)
-{
- struct gdi_tiled_map *tiled_map = &ctx->tiled_map;
- int luma_map, chro_map, i;
-
- memset(tiled_map, 0, sizeof(*tiled_map));
-
- luma_map = 64;
- chro_map = 64;
- tiled_map->map_type = tiled_map_type;
- for (i = 0; i < 16; i++)
- tiled_map->xy2ca_map[i] = luma_map << 8 | chro_map;
- for (i = 0; i < 4; i++)
- tiled_map->xy2ba_map[i] = luma_map << 8 | chro_map;
- for (i = 0; i < 16; i++)
- tiled_map->xy2ra_map[i] = luma_map << 8 | chro_map;
-
- if (tiled_map_type == GDI_LINEAR_FRAME_MAP) {
- tiled_map->xy2rbc_config = 0;
- } else {
- dev_err(&ctx->dev->plat_dev->dev, "invalid map type: %d\n",
- tiled_map_type);
- return;
- }
-}
-
-static void set_default_params(struct coda_ctx *ctx)
-{
- int max_w;
- int max_h;
-
- ctx->codec = &ctx->dev->devtype->codecs[0];
- max_w = ctx->codec->max_w;
- max_h = ctx->codec->max_h;
-
- ctx->params.codec_mode = CODA_MODE_INVALID;
- ctx->colorspace = V4L2_COLORSPACE_REC709;
- ctx->params.framerate = 30;
- ctx->aborting = 0;
-
- /* Default formats for output and input queues */
- ctx->q_data[V4L2_M2M_SRC].fourcc = ctx->codec->src_fourcc;
- ctx->q_data[V4L2_M2M_DST].fourcc = ctx->codec->dst_fourcc;
- ctx->q_data[V4L2_M2M_SRC].width = max_w;
- ctx->q_data[V4L2_M2M_SRC].height = max_h;
- ctx->q_data[V4L2_M2M_SRC].bytesperline = max_w;
- ctx->q_data[V4L2_M2M_SRC].sizeimage = (max_w * max_h * 3) / 2;
- ctx->q_data[V4L2_M2M_DST].width = max_w;
- ctx->q_data[V4L2_M2M_DST].height = max_h;
- ctx->q_data[V4L2_M2M_DST].bytesperline = 0;
- ctx->q_data[V4L2_M2M_DST].sizeimage = CODA_MAX_FRAME_SIZE;
- ctx->q_data[V4L2_M2M_SRC].rect.width = max_w;
- ctx->q_data[V4L2_M2M_SRC].rect.height = max_h;
- ctx->q_data[V4L2_M2M_DST].rect.width = max_w;
- ctx->q_data[V4L2_M2M_DST].rect.height = max_h;
-
- if (ctx->dev->devtype->product == CODA_960)
- coda_set_tiled_map_type(ctx, GDI_LINEAR_FRAME_MAP);
-}
-
-/*
- * Queue operations
- */
-static int coda_queue_setup(struct vb2_queue *vq,
- const struct v4l2_format *fmt,
- unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
-{
- struct coda_ctx *ctx = vb2_get_drv_priv(vq);
- struct coda_q_data *q_data;
- unsigned int size;
-
- q_data = get_q_data(ctx, vq->type);
- size = q_data->sizeimage;
-
- *nplanes = 1;
- sizes[0] = size;
-
- alloc_ctxs[0] = ctx->dev->alloc_ctx;
-
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "get %d buffer(s) of size %d each.\n", *nbuffers, size);
-
- return 0;
-}
-
-static int coda_buf_prepare(struct vb2_buffer *vb)
-{
- struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct coda_q_data *q_data;
-
- q_data = get_q_data(ctx, vb->vb2_queue->type);
-
- if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
- v4l2_warn(&ctx->dev->v4l2_dev,
- "%s data will not fit into plane (%lu < %lu)\n",
- __func__, vb2_plane_size(vb, 0),
- (long)q_data->sizeimage);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void coda_buf_queue(struct vb2_buffer *vb)
-{
- struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct coda_dev *dev = ctx->dev;
- struct coda_q_data *q_data;
-
- q_data = get_q_data(ctx, vb->vb2_queue->type);
-
- /*
- * In the decoder case, immediately try to copy the buffer into the
- * bitstream ringbuffer and mark it as ready to be dequeued.
- */
- if (q_data->fourcc == V4L2_PIX_FMT_H264 &&
- vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- /*
- * For backwards compatibility, queuing an empty buffer marks
- * the stream end
- */
- if (vb2_get_plane_payload(vb, 0) == 0) {
- ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
- if ((dev->devtype->product == CODA_960) &&
- coda_isbusy(dev) &&
- (ctx->idx == coda_read(dev, CODA_REG_BIT_RUN_INDEX))) {
- /* if this decoder instance is running, set the stream end flag */
- coda_write(dev, ctx->bit_stream_param, CODA_REG_BIT_BIT_STREAM_PARAM);
- }
- }
- mutex_lock(&ctx->bitstream_mutex);
- v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
- coda_fill_bitstream(ctx);
- mutex_unlock(&ctx->bitstream_mutex);
- } else {
- v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
- }
-}
-
-static void coda_parabuf_write(struct coda_ctx *ctx, int index, u32 value)
-{
- struct coda_dev *dev = ctx->dev;
- u32 *p = ctx->parabuf.vaddr;
-
- if (dev->devtype->product == CODA_DX6)
- p[index] = value;
- else
- p[index ^ 1] = value;
-}
-
-static int coda_alloc_aux_buf(struct coda_dev *dev,
- struct coda_aux_buf *buf, size_t size,
- const char *name, struct dentry *parent)
-{
- buf->vaddr = dma_alloc_coherent(&dev->plat_dev->dev, size, &buf->paddr,
- GFP_KERNEL);
- if (!buf->vaddr)
- return -ENOMEM;
-
- buf->size = size;
-
- if (name && parent) {
- buf->blob.data = buf->vaddr;
- buf->blob.size = size;
- buf->dentry = debugfs_create_blob(name, 0644, parent, &buf->blob);
- if (!buf->dentry)
- dev_warn(&dev->plat_dev->dev,
- "failed to create debugfs entry %s\n", name);
- }
-
- return 0;
-}
-
-static inline int coda_alloc_context_buf(struct coda_ctx *ctx,
- struct coda_aux_buf *buf, size_t size,
- const char *name)
-{
- return coda_alloc_aux_buf(ctx->dev, buf, size, name, ctx->debugfs_entry);
-}
-
-static void coda_free_aux_buf(struct coda_dev *dev,
- struct coda_aux_buf *buf)
-{
- if (buf->vaddr) {
- dma_free_coherent(&dev->plat_dev->dev, buf->size,
- buf->vaddr, buf->paddr);
- buf->vaddr = NULL;
- buf->size = 0;
- }
- debugfs_remove(buf->dentry);
-}
-
-static void coda_free_framebuffers(struct coda_ctx *ctx)
-{
- int i;
-
- for (i = 0; i < CODA_MAX_FRAMEBUFFERS; i++)
- coda_free_aux_buf(ctx->dev, &ctx->internal_frames[i]);
-}
-
-static int coda_alloc_framebuffers(struct coda_ctx *ctx, struct coda_q_data *q_data, u32 fourcc)
-{
- struct coda_dev *dev = ctx->dev;
- int width, height;
- dma_addr_t paddr;
- int ysize;
- int ret;
- int i;
-
- if (ctx->codec && (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
- ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264)) {
- width = round_up(q_data->width, 16);
- height = round_up(q_data->height, 16);
- } else {
- width = round_up(q_data->width, 8);
- height = q_data->height;
- }
- ysize = width * height;
-
- /* Allocate frame buffers */
- for (i = 0; i < ctx->num_internal_frames; i++) {
- size_t size;
- char *name;
-
- size = ysize + ysize / 2;
- if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 &&
- dev->devtype->product != CODA_DX6)
- size += ysize / 4;
- name = kasprintf(GFP_KERNEL, "fb%d", i);
- ret = coda_alloc_context_buf(ctx, &ctx->internal_frames[i],
- size, name);
- kfree(name);
- if (ret < 0) {
- coda_free_framebuffers(ctx);
- return ret;
- }
- }
-
- /* Register frame buffers in the parameter buffer */
- for (i = 0; i < ctx->num_internal_frames; i++) {
- paddr = ctx->internal_frames[i].paddr;
- coda_parabuf_write(ctx, i * 3 + 0, paddr); /* Y */
- coda_parabuf_write(ctx, i * 3 + 1, paddr + ysize); /* Cb */
- coda_parabuf_write(ctx, i * 3 + 2, paddr + ysize + ysize/4); /* Cr */
-
- /* mvcol buffer for h.264 */
- if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 &&
- dev->devtype->product != CODA_DX6)
- coda_parabuf_write(ctx, 96 + i,
- ctx->internal_frames[i].paddr +
- ysize + ysize/4 + ysize/4);
- }
-
- /* mvcol buffer for mpeg4 */
- if ((dev->devtype->product != CODA_DX6) &&
- (ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4))
- coda_parabuf_write(ctx, 97, ctx->internal_frames[i].paddr +
- ysize + ysize/4 + ysize/4);
-
- return 0;
-}
-
-static int coda_h264_padding(int size, char *p)
-{
- int nal_size;
- int diff;
-
- diff = size - (size & ~0x7);
- if (diff == 0)
- return 0;
-
- nal_size = coda_filler_size[diff];
- memcpy(p, coda_filler_nal, nal_size);
-
- /* Add rbsp stop bit and trailing at the end */
- *(p + nal_size - 1) = 0x80;
-
- return nal_size;
-}
-
-static phys_addr_t coda_iram_alloc(struct coda_iram_info *iram, size_t size)
-{
- phys_addr_t ret;
-
- size = round_up(size, 1024);
- if (size > iram->remaining)
- return 0;
- iram->remaining -= size;
-
- ret = iram->next_paddr;
- iram->next_paddr += size;
-
- return ret;
-}
-
-static void coda_setup_iram(struct coda_ctx *ctx)
-{
- struct coda_iram_info *iram_info = &ctx->iram_info;
- struct coda_dev *dev = ctx->dev;
- int mb_width;
- int dbk_bits;
- int bit_bits;
- int ip_bits;
-
- memset(iram_info, 0, sizeof(*iram_info));
- iram_info->next_paddr = dev->iram.paddr;
- iram_info->remaining = dev->iram.size;
-
- switch (dev->devtype->product) {
- case CODA_7541:
- dbk_bits = CODA7_USE_HOST_DBK_ENABLE | CODA7_USE_DBK_ENABLE;
- bit_bits = CODA7_USE_HOST_BIT_ENABLE | CODA7_USE_BIT_ENABLE;
- ip_bits = CODA7_USE_HOST_IP_ENABLE | CODA7_USE_IP_ENABLE;
- break;
- case CODA_960:
- dbk_bits = CODA9_USE_HOST_DBK_ENABLE | CODA9_USE_DBK_ENABLE;
- bit_bits = CODA9_USE_HOST_BIT_ENABLE | CODA7_USE_BIT_ENABLE;
- ip_bits = CODA9_USE_HOST_IP_ENABLE | CODA7_USE_IP_ENABLE;
- break;
- default: /* CODA_DX6 */
- return;
- }
-
- if (ctx->inst_type == CODA_INST_ENCODER) {
- struct coda_q_data *q_data_src;
-
- q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- mb_width = DIV_ROUND_UP(q_data_src->width, 16);
-
- /* Prioritize in case IRAM is too small for everything */
- if (dev->devtype->product == CODA_7541) {
- iram_info->search_ram_size = round_up(mb_width * 16 *
- 36 + 2048, 1024);
- iram_info->search_ram_paddr = coda_iram_alloc(iram_info,
- iram_info->search_ram_size);
- if (!iram_info->search_ram_paddr) {
- pr_err("IRAM is smaller than the search ram size\n");
- goto out;
- }
- iram_info->axi_sram_use |= CODA7_USE_HOST_ME_ENABLE |
- CODA7_USE_ME_ENABLE;
- }
-
- /* Only H.264BP and H.263P3 are considered */
- iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, 64 * mb_width);
- iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, 64 * mb_width);
- if (!iram_info->buf_dbk_c_use)
- goto out;
- iram_info->axi_sram_use |= dbk_bits;
-
- iram_info->buf_bit_use = coda_iram_alloc(iram_info, 128 * mb_width);
- if (!iram_info->buf_bit_use)
- goto out;
- iram_info->axi_sram_use |= bit_bits;
-
- iram_info->buf_ip_ac_dc_use = coda_iram_alloc(iram_info, 128 * mb_width);
- if (!iram_info->buf_ip_ac_dc_use)
- goto out;
- iram_info->axi_sram_use |= ip_bits;
-
- /* OVL and BTP disabled for encoder */
- } else if (ctx->inst_type == CODA_INST_DECODER) {
- struct coda_q_data *q_data_dst;
-
- q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- mb_width = DIV_ROUND_UP(q_data_dst->width, 16);
-
- iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, 128 * mb_width);
- iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, 128 * mb_width);
- if (!iram_info->buf_dbk_c_use)
- goto out;
- iram_info->axi_sram_use |= dbk_bits;
-
- iram_info->buf_bit_use = coda_iram_alloc(iram_info, 128 * mb_width);
- if (!iram_info->buf_bit_use)
- goto out;
- iram_info->axi_sram_use |= bit_bits;
-
- iram_info->buf_ip_ac_dc_use = coda_iram_alloc(iram_info, 128 * mb_width);
- if (!iram_info->buf_ip_ac_dc_use)
- goto out;
- iram_info->axi_sram_use |= ip_bits;
-
- /* OVL and BTP unused as there is no VC1 support yet */
- }
-
-out:
- if (!(iram_info->axi_sram_use & CODA7_USE_HOST_IP_ENABLE))
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "IRAM smaller than needed\n");
-
- if (dev->devtype->product == CODA_7541) {
- /* TODO - Enabling these causes picture errors on CODA7541 */
- if (ctx->inst_type == CODA_INST_DECODER) {
- /* fw 1.4.50 */
- iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
- CODA7_USE_IP_ENABLE);
- } else {
- /* fw 13.4.29 */
- iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
- CODA7_USE_HOST_DBK_ENABLE |
- CODA7_USE_IP_ENABLE |
- CODA7_USE_DBK_ENABLE);
- }
- }
-}
-
-static void coda_free_context_buffers(struct coda_ctx *ctx)
-{
- struct coda_dev *dev = ctx->dev;
-
- coda_free_aux_buf(dev, &ctx->slicebuf);
- coda_free_aux_buf(dev, &ctx->psbuf);
- if (dev->devtype->product != CODA_DX6)
- coda_free_aux_buf(dev, &ctx->workbuf);
-}
-
-static int coda_alloc_context_buffers(struct coda_ctx *ctx,
- struct coda_q_data *q_data)
-{
- struct coda_dev *dev = ctx->dev;
- size_t size;
- int ret;
-
- if (dev->devtype->product == CODA_DX6)
- return 0;
-
- if (ctx->psbuf.vaddr) {
- v4l2_err(&dev->v4l2_dev, "psmembuf still allocated\n");
- return -EBUSY;
- }
- if (ctx->slicebuf.vaddr) {
- v4l2_err(&dev->v4l2_dev, "slicebuf still allocated\n");
- return -EBUSY;
- }
- if (ctx->workbuf.vaddr) {
- v4l2_err(&dev->v4l2_dev, "context buffer still allocated\n");
- ret = -EBUSY;
- return -ENOMEM;
- }
-
- if (q_data->fourcc == V4L2_PIX_FMT_H264) {
- /* worst case slice size */
- size = (DIV_ROUND_UP(q_data->width, 16) *
- DIV_ROUND_UP(q_data->height, 16)) * 3200 / 8 + 512;
- ret = coda_alloc_context_buf(ctx, &ctx->slicebuf, size, "slicebuf");
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev, "failed to allocate %d byte slice buffer",
- ctx->slicebuf.size);
- return ret;
- }
- }
-
- if (dev->devtype->product == CODA_7541) {
- ret = coda_alloc_context_buf(ctx, &ctx->psbuf, CODA7_PS_BUF_SIZE, "psbuf");
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev, "failed to allocate psmem buffer");
- goto err;
- }
- }
-
- size = dev->devtype->workbuf_size;
- if (dev->devtype->product == CODA_960 &&
- q_data->fourcc == V4L2_PIX_FMT_H264)
- size += CODA9_PS_SAVE_SIZE;
- ret = coda_alloc_context_buf(ctx, &ctx->workbuf, size, "workbuf");
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev, "failed to allocate %d byte context buffer",
- ctx->workbuf.size);
- goto err;
- }
-
- return 0;
-
-err:
- coda_free_context_buffers(ctx);
- return ret;
-}
-
-static int coda_start_decoding(struct coda_ctx *ctx)
-{
- struct coda_q_data *q_data_src, *q_data_dst;
- u32 bitstream_buf, bitstream_size;
- struct coda_dev *dev = ctx->dev;
- int width, height;
- u32 src_fourcc;
- u32 val;
- int ret;
-
- /* Start decoding */
- q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- bitstream_buf = ctx->bitstream.paddr;
- bitstream_size = ctx->bitstream.size;
- src_fourcc = q_data_src->fourcc;
-
- coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
-
- /* Update coda bitstream read and write pointers from kfifo */
- coda_kfifo_sync_to_device_full(ctx);
-
- ctx->display_idx = -1;
- ctx->frm_dis_flg = 0;
- coda_write(dev, 0, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
-
- coda_write(dev, CODA_BIT_DEC_SEQ_INIT_ESCAPE,
- CODA_REG_BIT_BIT_STREAM_PARAM);
-
- coda_write(dev, bitstream_buf, CODA_CMD_DEC_SEQ_BB_START);
- coda_write(dev, bitstream_size / 1024, CODA_CMD_DEC_SEQ_BB_SIZE);
- val = 0;
- if ((dev->devtype->product == CODA_7541) ||
- (dev->devtype->product == CODA_960))
- val |= CODA_REORDER_ENABLE;
- coda_write(dev, val, CODA_CMD_DEC_SEQ_OPTION);
-
- ctx->params.codec_mode = ctx->codec->mode;
- if (dev->devtype->product == CODA_960 &&
- src_fourcc == V4L2_PIX_FMT_MPEG4)
- ctx->params.codec_mode_aux = CODA_MP4_AUX_MPEG4;
- else
- ctx->params.codec_mode_aux = 0;
- if (src_fourcc == V4L2_PIX_FMT_H264) {
- if (dev->devtype->product == CODA_7541) {
- coda_write(dev, ctx->psbuf.paddr,
- CODA_CMD_DEC_SEQ_PS_BB_START);
- coda_write(dev, (CODA7_PS_BUF_SIZE / 1024),
- CODA_CMD_DEC_SEQ_PS_BB_SIZE);
- }
- if (dev->devtype->product == CODA_960) {
- coda_write(dev, 0, CODA_CMD_DEC_SEQ_X264_MV_EN);
- coda_write(dev, 512, CODA_CMD_DEC_SEQ_SPP_CHUNK_SIZE);
- }
- }
- if (dev->devtype->product != CODA_960) {
- coda_write(dev, 0, CODA_CMD_DEC_SEQ_SRC_SIZE);
- }
-
- if (coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT)) {
- v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
- coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
- return -ETIMEDOUT;
- }
-
- /* Update kfifo out pointer from coda bitstream read pointer */
- coda_kfifo_sync_from_device(ctx);
-
- coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
-
- if (coda_read(dev, CODA_RET_DEC_SEQ_SUCCESS) == 0) {
- v4l2_err(&dev->v4l2_dev,
- "CODA_COMMAND_SEQ_INIT failed, error code = %d\n",
- coda_read(dev, CODA_RET_DEC_SEQ_ERR_REASON));
- return -EAGAIN;
- }
-
- val = coda_read(dev, CODA_RET_DEC_SEQ_SRC_SIZE);
- if (dev->devtype->product == CODA_DX6) {
- width = (val >> CODADX6_PICWIDTH_OFFSET) & CODADX6_PICWIDTH_MASK;
- height = val & CODADX6_PICHEIGHT_MASK;
- } else {
- width = (val >> CODA7_PICWIDTH_OFFSET) & CODA7_PICWIDTH_MASK;
- height = val & CODA7_PICHEIGHT_MASK;
- }
-
- if (width > q_data_dst->width || height > q_data_dst->height) {
- v4l2_err(&dev->v4l2_dev, "stream is %dx%d, not %dx%d\n",
- width, height, q_data_dst->width, q_data_dst->height);
- return -EINVAL;
- }
-
- width = round_up(width, 16);
- height = round_up(height, 16);
-
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "%s instance %d now: %dx%d\n",
- __func__, ctx->idx, width, height);
-
- ctx->num_internal_frames = coda_read(dev, CODA_RET_DEC_SEQ_FRAME_NEED);
- if (ctx->num_internal_frames > CODA_MAX_FRAMEBUFFERS) {
- v4l2_err(&dev->v4l2_dev,
- "not enough framebuffers to decode (%d < %d)\n",
- CODA_MAX_FRAMEBUFFERS, ctx->num_internal_frames);
- return -EINVAL;
- }
-
- if (src_fourcc == V4L2_PIX_FMT_H264) {
- u32 left_right;
- u32 top_bottom;
-
- left_right = coda_read(dev, CODA_RET_DEC_SEQ_CROP_LEFT_RIGHT);
- top_bottom = coda_read(dev, CODA_RET_DEC_SEQ_CROP_TOP_BOTTOM);
-
- q_data_dst->rect.left = (left_right >> 10) & 0x3ff;
- q_data_dst->rect.top = (top_bottom >> 10) & 0x3ff;
- q_data_dst->rect.width = width - q_data_dst->rect.left -
- (left_right & 0x3ff);
- q_data_dst->rect.height = height - q_data_dst->rect.top -
- (top_bottom & 0x3ff);
- }
-
- ret = coda_alloc_framebuffers(ctx, q_data_dst, src_fourcc);
- if (ret < 0)
- return ret;
-
- /* Tell the decoder how many frame buffers we allocated. */
- coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
- coda_write(dev, width, CODA_CMD_SET_FRAME_BUF_STRIDE);
-
- if (dev->devtype->product != CODA_DX6) {
- /* Set secondary AXI IRAM */
- coda_setup_iram(ctx);
-
- coda_write(dev, ctx->iram_info.buf_bit_use,
- CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
- coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
- CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
- coda_write(dev, ctx->iram_info.buf_dbk_y_use,
- CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
- coda_write(dev, ctx->iram_info.buf_dbk_c_use,
- CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
- coda_write(dev, ctx->iram_info.buf_ovl_use,
- CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
- if (dev->devtype->product == CODA_960)
- coda_write(dev, ctx->iram_info.buf_btp_use,
- CODA9_CMD_SET_FRAME_AXI_BTP_ADDR);
- }
-
- if (dev->devtype->product == CODA_960) {
- coda_write(dev, -1, CODA9_CMD_SET_FRAME_DELAY);
-
- coda_write(dev, 0x20262024, CODA9_CMD_SET_FRAME_CACHE_SIZE);
- coda_write(dev, 2 << CODA9_CACHE_PAGEMERGE_OFFSET |
- 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
- 8 << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET |
- 8 << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET,
- CODA9_CMD_SET_FRAME_CACHE_CONFIG);
- }
-
- if (src_fourcc == V4L2_PIX_FMT_H264) {
- coda_write(dev, ctx->slicebuf.paddr,
- CODA_CMD_SET_FRAME_SLICE_BB_START);
- coda_write(dev, ctx->slicebuf.size / 1024,
- CODA_CMD_SET_FRAME_SLICE_BB_SIZE);
- }
-
- if (dev->devtype->product == CODA_7541) {
- int max_mb_x = 1920 / 16;
- int max_mb_y = 1088 / 16;
- int max_mb_num = max_mb_x * max_mb_y;
-
- coda_write(dev, max_mb_num << 16 | max_mb_x << 8 | max_mb_y,
- CODA7_CMD_SET_FRAME_MAX_DEC_SIZE);
- } else if (dev->devtype->product == CODA_960) {
- int max_mb_x = 1920 / 16;
- int max_mb_y = 1088 / 16;
- int max_mb_num = max_mb_x * max_mb_y;
-
- coda_write(dev, max_mb_num << 16 | max_mb_x << 8 | max_mb_y,
- CODA9_CMD_SET_FRAME_MAX_DEC_SIZE);
- }
-
- if (coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF)) {
- v4l2_err(&ctx->dev->v4l2_dev,
- "CODA_COMMAND_SET_FRAME_BUF timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int coda_encode_header(struct coda_ctx *ctx, struct vb2_buffer *buf,
- int header_code, u8 *header, int *size)
-{
- struct coda_dev *dev = ctx->dev;
- size_t bufsize;
- int ret;
- int i;
-
- if (dev->devtype->product == CODA_960)
- memset(vb2_plane_vaddr(buf, 0), 0, 64);
-
- coda_write(dev, vb2_dma_contig_plane_dma_addr(buf, 0),
- CODA_CMD_ENC_HEADER_BB_START);
- bufsize = vb2_plane_size(buf, 0);
- if (dev->devtype->product == CODA_960)
- bufsize /= 1024;
- coda_write(dev, bufsize, CODA_CMD_ENC_HEADER_BB_SIZE);
- coda_write(dev, header_code, CODA_CMD_ENC_HEADER_CODE);
- ret = coda_command_sync(ctx, CODA_COMMAND_ENCODE_HEADER);
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_ENCODE_HEADER timeout\n");
- return ret;
- }
-
- if (dev->devtype->product == CODA_960) {
- for (i = 63; i > 0; i--)
- if (((char *)vb2_plane_vaddr(buf, 0))[i] != 0)
- break;
- *size = i + 1;
- } else {
- *size = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)) -
- coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
- }
- memcpy(header, vb2_plane_vaddr(buf, 0), *size);
-
- return 0;
-}
-
-static int coda_start_encoding(struct coda_ctx *ctx);
-
-static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
-{
- struct coda_ctx *ctx = vb2_get_drv_priv(q);
- struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev;
- struct coda_dev *dev = ctx->dev;
- struct coda_q_data *q_data_src, *q_data_dst;
- u32 dst_fourcc;
- int ret = 0;
-
- q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- if (q_data_src->fourcc == V4L2_PIX_FMT_H264) {
- if (coda_get_bitstream_payload(ctx) < 512)
- return -EINVAL;
- } else {
- if (count < 1)
- return -EINVAL;
- }
-
- ctx->streamon_out = 1;
-
- if (coda_format_is_yuv(q_data_src->fourcc))
- ctx->inst_type = CODA_INST_ENCODER;
- else
- ctx->inst_type = CODA_INST_DECODER;
- } else {
- if (count < 1)
- return -EINVAL;
-
- ctx->streamon_cap = 1;
- }
-
- /* Don't start the coda unless both queues are on */
- if (!(ctx->streamon_out & ctx->streamon_cap))
- return 0;
-
- /* Allow decoder device_run with no new buffers queued */
- if (ctx->inst_type == CODA_INST_DECODER)
- v4l2_m2m_set_src_buffered(ctx->fh.m2m_ctx, true);
-
- ctx->gopcounter = ctx->params.gop_size - 1;
- q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- dst_fourcc = q_data_dst->fourcc;
-
- ctx->codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
- q_data_dst->fourcc);
- if (!ctx->codec) {
- v4l2_err(v4l2_dev, "couldn't tell instance type.\n");
- return -EINVAL;
- }
-
- /* Allocate per-instance buffers */
- ret = coda_alloc_context_buffers(ctx, q_data_src);
- if (ret < 0)
- return ret;
-
- if (ctx->inst_type == CODA_INST_DECODER) {
- mutex_lock(&dev->coda_mutex);
- ret = coda_start_decoding(ctx);
- mutex_unlock(&dev->coda_mutex);
- if (ret == -EAGAIN)
- return 0;
- else if (ret < 0)
- return ret;
- } else {
- ret = coda_start_encoding(ctx);
- }
-
- ctx->initialized = 1;
- return ret;
-}
-
-static int coda_start_encoding(struct coda_ctx *ctx)
-{
- struct coda_dev *dev = ctx->dev;
- struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
- struct coda_q_data *q_data_src, *q_data_dst;
- u32 bitstream_buf, bitstream_size;
- struct vb2_buffer *buf;
- int gamma, ret, value;
- u32 dst_fourcc;
-
- q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- dst_fourcc = q_data_dst->fourcc;
-
- buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- bitstream_buf = vb2_dma_contig_plane_dma_addr(buf, 0);
- bitstream_size = q_data_dst->sizeimage;
-
- if (!coda_is_initialized(dev)) {
- v4l2_err(v4l2_dev, "coda is not initialized.\n");
- return -EFAULT;
- }
-
- mutex_lock(&dev->coda_mutex);
-
- coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
- coda_write(dev, bitstream_buf, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
- coda_write(dev, bitstream_buf, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
- switch (dev->devtype->product) {
- case CODA_DX6:
- coda_write(dev, CODADX6_STREAM_BUF_DYNALLOC_EN |
- CODADX6_STREAM_BUF_PIC_RESET, CODA_REG_BIT_STREAM_CTRL);
- break;
- case CODA_960:
- coda_write(dev, 0, CODA9_GDI_WPROT_RGN_EN);
- /* fallthrough */
- case CODA_7541:
- coda_write(dev, CODA7_STREAM_BUF_DYNALLOC_EN |
- CODA7_STREAM_BUF_PIC_RESET, CODA_REG_BIT_STREAM_CTRL);
- break;
- }
-
- value = coda_read(dev, CODA_REG_BIT_FRAME_MEM_CTRL);
- value &= ~(1 << 2 | 0x7 << 9);
- ctx->frame_mem_ctrl = value;
- coda_write(dev, value, CODA_REG_BIT_FRAME_MEM_CTRL);
-
- if (dev->devtype->product == CODA_DX6) {
- /* Configure the coda */
- coda_write(dev, dev->iram.paddr, CODADX6_REG_BIT_SEARCH_RAM_BASE_ADDR);
- }
-
- /* Could set rotation here if needed */
- switch (dev->devtype->product) {
- case CODA_DX6:
- value = (q_data_src->width & CODADX6_PICWIDTH_MASK) << CODADX6_PICWIDTH_OFFSET;
- value |= (q_data_src->height & CODADX6_PICHEIGHT_MASK) << CODA_PICHEIGHT_OFFSET;
- break;
- case CODA_7541:
- if (dst_fourcc == V4L2_PIX_FMT_H264) {
- value = (round_up(q_data_src->width, 16) &
- CODA7_PICWIDTH_MASK) << CODA7_PICWIDTH_OFFSET;
- value |= (round_up(q_data_src->height, 16) &
- CODA7_PICHEIGHT_MASK) << CODA_PICHEIGHT_OFFSET;
- break;
- }
- /* fallthrough */
- case CODA_960:
- value = (q_data_src->width & CODA7_PICWIDTH_MASK) << CODA7_PICWIDTH_OFFSET;
- value |= (q_data_src->height & CODA7_PICHEIGHT_MASK) << CODA_PICHEIGHT_OFFSET;
- }
- coda_write(dev, value, CODA_CMD_ENC_SEQ_SRC_SIZE);
- coda_write(dev, ctx->params.framerate,
- CODA_CMD_ENC_SEQ_SRC_F_RATE);
-
- ctx->params.codec_mode = ctx->codec->mode;
- switch (dst_fourcc) {
- case V4L2_PIX_FMT_MPEG4:
- if (dev->devtype->product == CODA_960)
- coda_write(dev, CODA9_STD_MPEG4, CODA_CMD_ENC_SEQ_COD_STD);
- else
- coda_write(dev, CODA_STD_MPEG4, CODA_CMD_ENC_SEQ_COD_STD);
- coda_write(dev, 0, CODA_CMD_ENC_SEQ_MP4_PARA);
- break;
- case V4L2_PIX_FMT_H264:
- if (dev->devtype->product == CODA_960)
- coda_write(dev, CODA9_STD_H264, CODA_CMD_ENC_SEQ_COD_STD);
- else
- coda_write(dev, CODA_STD_H264, CODA_CMD_ENC_SEQ_COD_STD);
- if (ctx->params.h264_deblk_enabled) {
- value = ((ctx->params.h264_deblk_alpha &
- CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) <<
- CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
- ((ctx->params.h264_deblk_beta &
- CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
- CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET);
- } else {
- value = 1 << CODA_264PARAM_DISABLEDEBLK_OFFSET;
- }
- coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA);
- break;
- default:
- v4l2_err(v4l2_dev,
- "dst format (0x%08x) invalid.\n", dst_fourcc);
- ret = -EINVAL;
- goto out;
- }
-
- switch (ctx->params.slice_mode) {
- case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE:
- value = 0;
- break;
- case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB:
- value = (ctx->params.slice_max_mb & CODA_SLICING_SIZE_MASK) << CODA_SLICING_SIZE_OFFSET;
- value |= (1 & CODA_SLICING_UNIT_MASK) << CODA_SLICING_UNIT_OFFSET;
- value |= 1 & CODA_SLICING_MODE_MASK;
- break;
- case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES:
- value = (ctx->params.slice_max_bits & CODA_SLICING_SIZE_MASK) << CODA_SLICING_SIZE_OFFSET;
- value |= (0 & CODA_SLICING_UNIT_MASK) << CODA_SLICING_UNIT_OFFSET;
- value |= 1 & CODA_SLICING_MODE_MASK;
- break;
- }
- coda_write(dev, value, CODA_CMD_ENC_SEQ_SLICE_MODE);
- value = ctx->params.gop_size & CODA_GOP_SIZE_MASK;
- coda_write(dev, value, CODA_CMD_ENC_SEQ_GOP_SIZE);
-
- if (ctx->params.bitrate) {
- /* Rate control enabled */
- value = (ctx->params.bitrate & CODA_RATECONTROL_BITRATE_MASK) << CODA_RATECONTROL_BITRATE_OFFSET;
- value |= 1 & CODA_RATECONTROL_ENABLE_MASK;
- if (dev->devtype->product == CODA_960)
- value |= BIT(31); /* disable autoskip */
- } else {
- value = 0;
- }
- coda_write(dev, value, CODA_CMD_ENC_SEQ_RC_PARA);
-
- coda_write(dev, 0, CODA_CMD_ENC_SEQ_RC_BUF_SIZE);
- coda_write(dev, ctx->params.intra_refresh,
- CODA_CMD_ENC_SEQ_INTRA_REFRESH);
-
- coda_write(dev, bitstream_buf, CODA_CMD_ENC_SEQ_BB_START);
- coda_write(dev, bitstream_size / 1024, CODA_CMD_ENC_SEQ_BB_SIZE);
-
-
- value = 0;
- if (dev->devtype->product == CODA_960)
- gamma = CODA9_DEFAULT_GAMMA;
- else
- gamma = CODA_DEFAULT_GAMMA;
- if (gamma > 0) {
- coda_write(dev, (gamma & CODA_GAMMA_MASK) << CODA_GAMMA_OFFSET,
- CODA_CMD_ENC_SEQ_RC_GAMMA);
- }
-
- if (ctx->params.h264_min_qp || ctx->params.h264_max_qp) {
- coda_write(dev,
- ctx->params.h264_min_qp << CODA_QPMIN_OFFSET |
- ctx->params.h264_max_qp << CODA_QPMAX_OFFSET,
- CODA_CMD_ENC_SEQ_RC_QP_MIN_MAX);
- }
- if (dev->devtype->product == CODA_960) {
- if (ctx->params.h264_max_qp)
- value |= 1 << CODA9_OPTION_RCQPMAX_OFFSET;
- if (CODA_DEFAULT_GAMMA > 0)
- value |= 1 << CODA9_OPTION_GAMMA_OFFSET;
- } else {
- if (CODA_DEFAULT_GAMMA > 0) {
- if (dev->devtype->product == CODA_DX6)
- value |= 1 << CODADX6_OPTION_GAMMA_OFFSET;
- else
- value |= 1 << CODA7_OPTION_GAMMA_OFFSET;
- }
- if (ctx->params.h264_min_qp)
- value |= 1 << CODA7_OPTION_RCQPMIN_OFFSET;
- if (ctx->params.h264_max_qp)
- value |= 1 << CODA7_OPTION_RCQPMAX_OFFSET;
- }
- coda_write(dev, value, CODA_CMD_ENC_SEQ_OPTION);
-
- coda_write(dev, 0, CODA_CMD_ENC_SEQ_RC_INTERVAL_MODE);
-
- coda_setup_iram(ctx);
-
- if (dst_fourcc == V4L2_PIX_FMT_H264) {
- switch (dev->devtype->product) {
- case CODA_DX6:
- value = FMO_SLICE_SAVE_BUF_SIZE << 7;
- coda_write(dev, value, CODADX6_CMD_ENC_SEQ_FMO);
- break;
- case CODA_7541:
- coda_write(dev, ctx->iram_info.search_ram_paddr,
- CODA7_CMD_ENC_SEQ_SEARCH_BASE);
- coda_write(dev, ctx->iram_info.search_ram_size,
- CODA7_CMD_ENC_SEQ_SEARCH_SIZE);
- break;
- case CODA_960:
- coda_write(dev, 0, CODA9_CMD_ENC_SEQ_ME_OPTION);
- coda_write(dev, 0, CODA9_CMD_ENC_SEQ_INTRA_WEIGHT);
- }
- }
-
- ret = coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT);
- if (ret < 0) {
- v4l2_err(v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
- goto out;
- }
-
- if (coda_read(dev, CODA_RET_ENC_SEQ_SUCCESS) == 0) {
- v4l2_err(v4l2_dev, "CODA_COMMAND_SEQ_INIT failed\n");
- ret = -EFAULT;
- goto out;
- }
-
- if (dev->devtype->product == CODA_960)
- ctx->num_internal_frames = 4;
- else
- ctx->num_internal_frames = 2;
- ret = coda_alloc_framebuffers(ctx, q_data_src, dst_fourcc);
- if (ret < 0) {
- v4l2_err(v4l2_dev, "failed to allocate framebuffers\n");
- goto out;
- }
-
- coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
- coda_write(dev, q_data_src->bytesperline,
- CODA_CMD_SET_FRAME_BUF_STRIDE);
- if (dev->devtype->product == CODA_7541) {
- coda_write(dev, q_data_src->bytesperline,
- CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE);
- }
- if (dev->devtype->product != CODA_DX6) {
- coda_write(dev, ctx->iram_info.buf_bit_use,
- CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
- coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
- CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
- coda_write(dev, ctx->iram_info.buf_dbk_y_use,
- CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
- coda_write(dev, ctx->iram_info.buf_dbk_c_use,
- CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
- coda_write(dev, ctx->iram_info.buf_ovl_use,
- CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
- if (dev->devtype->product == CODA_960) {
- coda_write(dev, ctx->iram_info.buf_btp_use,
- CODA9_CMD_SET_FRAME_AXI_BTP_ADDR);
-
- /* FIXME */
- coda_write(dev, ctx->internal_frames[2].paddr, CODA9_CMD_SET_FRAME_SUBSAMP_A);
- coda_write(dev, ctx->internal_frames[3].paddr, CODA9_CMD_SET_FRAME_SUBSAMP_B);
- }
- }
-
- ret = coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF);
- if (ret < 0) {
- v4l2_err(v4l2_dev, "CODA_COMMAND_SET_FRAME_BUF timeout\n");
- goto out;
- }
-
- /* Save stream headers */
- buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- switch (dst_fourcc) {
- case V4L2_PIX_FMT_H264:
- /*
- * Get SPS in the first frame and copy it to an
- * intermediate buffer.
- */
- ret = coda_encode_header(ctx, buf, CODA_HEADER_H264_SPS,
- &ctx->vpu_header[0][0],
- &ctx->vpu_header_size[0]);
- if (ret < 0)
- goto out;
-
- /*
- * Get PPS in the first frame and copy it to an
- * intermediate buffer.
- */
- ret = coda_encode_header(ctx, buf, CODA_HEADER_H264_PPS,
- &ctx->vpu_header[1][0],
- &ctx->vpu_header_size[1]);
- if (ret < 0)
- goto out;
-
- /*
- * Length of H.264 headers is variable and thus it might not be
- * aligned for the coda to append the encoded frame. In that is
- * the case a filler NAL must be added to header 2.
- */
- ctx->vpu_header_size[2] = coda_h264_padding(
- (ctx->vpu_header_size[0] +
- ctx->vpu_header_size[1]),
- ctx->vpu_header[2]);
- break;
- case V4L2_PIX_FMT_MPEG4:
- /*
- * Get VOS in the first frame and copy it to an
- * intermediate buffer
- */
- ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VOS,
- &ctx->vpu_header[0][0],
- &ctx->vpu_header_size[0]);
- if (ret < 0)
- goto out;
-
- ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VIS,
- &ctx->vpu_header[1][0],
- &ctx->vpu_header_size[1]);
- if (ret < 0)
- goto out;
-
- ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VOL,
- &ctx->vpu_header[2][0],
- &ctx->vpu_header_size[2]);
- if (ret < 0)
- goto out;
- break;
- default:
- /* No more formats need to save headers at the moment */
- break;
- }
-
-out:
- mutex_unlock(&dev->coda_mutex);
- return ret;
-}
-
-static void coda_stop_streaming(struct vb2_queue *q)
-{
- struct coda_ctx *ctx = vb2_get_drv_priv(q);
- struct coda_dev *dev = ctx->dev;
-
- if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "%s: output\n", __func__);
- ctx->streamon_out = 0;
-
- if (ctx->inst_type == CODA_INST_DECODER &&
- coda_isbusy(dev) && ctx->idx == coda_read(dev, CODA_REG_BIT_RUN_INDEX)) {
- /* if this decoder instance is running, set the stream end flag */
- if (dev->devtype->product == CODA_960) {
- u32 val = coda_read(dev, CODA_REG_BIT_BIT_STREAM_PARAM);
-
- val |= CODA_BIT_STREAM_END_FLAG;
- coda_write(dev, val, CODA_REG_BIT_BIT_STREAM_PARAM);
- ctx->bit_stream_param = val;
- }
- }
- ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
-
- ctx->isequence = 0;
- } else {
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "%s: capture\n", __func__);
- ctx->streamon_cap = 0;
-
- ctx->osequence = 0;
- ctx->sequence_offset = 0;
- }
-
- if (!ctx->streamon_out && !ctx->streamon_cap) {
- struct coda_timestamp *ts;
-
- while (!list_empty(&ctx->timestamp_list)) {
- ts = list_first_entry(&ctx->timestamp_list,
- struct coda_timestamp, list);
- list_del(&ts->list);
- kfree(ts);
- }
- kfifo_init(&ctx->bitstream_fifo,
- ctx->bitstream.vaddr, ctx->bitstream.size);
- ctx->runcounter = 0;
- }
-}
-
-static struct vb2_ops coda_qops = {
- .queue_setup = coda_queue_setup,
- .buf_prepare = coda_buf_prepare,
- .buf_queue = coda_buf_queue,
- .start_streaming = coda_start_streaming,
- .stop_streaming = coda_stop_streaming,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
-};
-
-static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct coda_ctx *ctx =
- container_of(ctrl->handler, struct coda_ctx, ctrls);
-
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
-
- switch (ctrl->id) {
- case V4L2_CID_HFLIP:
- if (ctrl->val)
- ctx->params.rot_mode |= CODA_MIR_HOR;
- else
- ctx->params.rot_mode &= ~CODA_MIR_HOR;
- break;
- case V4L2_CID_VFLIP:
- if (ctrl->val)
- ctx->params.rot_mode |= CODA_MIR_VER;
- else
- ctx->params.rot_mode &= ~CODA_MIR_VER;
- break;
- case V4L2_CID_MPEG_VIDEO_BITRATE:
- ctx->params.bitrate = ctrl->val / 1000;
- break;
- case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
- ctx->params.gop_size = ctrl->val;
- break;
- case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
- ctx->params.h264_intra_qp = ctrl->val;
- break;
- case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
- ctx->params.h264_inter_qp = ctrl->val;
- break;
- case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
- ctx->params.h264_min_qp = ctrl->val;
- break;
- case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
- ctx->params.h264_max_qp = ctrl->val;
- break;
- case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
- ctx->params.h264_deblk_alpha = ctrl->val;
- break;
- case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
- ctx->params.h264_deblk_beta = ctrl->val;
- break;
- case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
- ctx->params.h264_deblk_enabled = (ctrl->val ==
- V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
- break;
- case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP:
- ctx->params.mpeg4_intra_qp = ctrl->val;
- break;
- case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:
- ctx->params.mpeg4_inter_qp = ctrl->val;
- break;
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
- ctx->params.slice_mode = ctrl->val;
- break;
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
- ctx->params.slice_max_mb = ctrl->val;
- break;
- case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
- ctx->params.slice_max_bits = ctrl->val * 8;
- break;
- case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
- break;
- case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
- ctx->params.intra_refresh = ctrl->val;
- break;
- default:
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "Invalid control, id=%d, val=%d\n",
- ctrl->id, ctrl->val);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static struct v4l2_ctrl_ops coda_ctrl_ops = {
- .s_ctrl = coda_s_ctrl,
-};
-
-static int coda_ctrls_setup(struct coda_ctx *ctx)
-{
- v4l2_ctrl_handler_init(&ctx->ctrls, 9);
-
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_BITRATE, 0, 32767000, 1, 0);
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_GOP_SIZE, 1, 60, 1, 16);
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 0, 51, 1, 25);
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 0, 51, 1, 25);
- if (ctx->dev->devtype->product != CODA_960) {
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 0, 51, 1, 12);
- }
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 0, 51, 1, 51);
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, 0, 15, 1, 0);
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, 0, 15, 1, 0);
- v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
- V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED, 0x0,
- V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP, 1, 31, 1, 2);
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP, 1, 31, 1, 2);
- v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
- V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES, 0x0,
- V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, 1, 0x3fffffff, 1, 1);
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES, 1, 0x3fffffff, 1, 500);
- v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_HEADER_MODE,
- V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
- (1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE),
- V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
- v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB, 0, 1920 * 1088 / 256, 1, 0);
-
- if (ctx->ctrls.error) {
- v4l2_err(&ctx->dev->v4l2_dev, "control initialization error (%d)",
- ctx->ctrls.error);
- return -EINVAL;
- }
-
- return v4l2_ctrl_handler_setup(&ctx->ctrls);
-}
-
-static int coda_queue_init(void *priv, struct vb2_queue *src_vq,
- struct vb2_queue *dst_vq)
-{
- struct coda_ctx *ctx = priv;
- int ret;
-
- src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- src_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
- src_vq->drv_priv = ctx;
- src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- src_vq->ops = &coda_qops;
- src_vq->mem_ops = &vb2_dma_contig_memops;
- src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- src_vq->lock = &ctx->dev->dev_mutex;
-
- ret = vb2_queue_init(src_vq);
- if (ret)
- return ret;
-
- dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- dst_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
- dst_vq->drv_priv = ctx;
- dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- dst_vq->ops = &coda_qops;
- dst_vq->mem_ops = &vb2_dma_contig_memops;
- dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- dst_vq->lock = &ctx->dev->dev_mutex;
-
- return vb2_queue_init(dst_vq);
-}
-
-static int coda_next_free_instance(struct coda_dev *dev)
-{
- int idx = ffz(dev->instance_mask);
-
- if ((idx < 0) ||
- (dev->devtype->product == CODA_DX6 && idx > CODADX6_MAX_INSTANCES))
- return -EBUSY;
-
- return idx;
-}
-
-static int coda_open(struct file *file)
-{
- struct coda_dev *dev = video_drvdata(file);
- struct coda_ctx *ctx = NULL;
- char *name;
- int ret;
- int idx;
-
- ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- idx = coda_next_free_instance(dev);
- if (idx < 0) {
- ret = idx;
- goto err_coda_max;
- }
- set_bit(idx, &dev->instance_mask);
-
- name = kasprintf(GFP_KERNEL, "context%d", idx);
- ctx->debugfs_entry = debugfs_create_dir(name, dev->debugfs_root);
- kfree(name);
-
- init_completion(&ctx->completion);
- INIT_WORK(&ctx->pic_run_work, coda_pic_run_work);
- INIT_WORK(&ctx->seq_end_work, coda_seq_end_work);
- v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
- ctx->dev = dev;
- ctx->idx = idx;
- switch (dev->devtype->product) {
- case CODA_7541:
- case CODA_960:
- ctx->reg_idx = 0;
- break;
- default:
- ctx->reg_idx = idx;
- }
-
- /* Power up and upload firmware if necessary */
- ret = pm_runtime_get_sync(&dev->plat_dev->dev);
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev, "failed to power up: %d\n", ret);
- goto err_pm_get;
- }
-
- ret = clk_prepare_enable(dev->clk_per);
- if (ret)
- goto err_clk_per;
-
- ret = clk_prepare_enable(dev->clk_ahb);
- if (ret)
- goto err_clk_ahb;
-
- set_default_params(ctx);
- ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
- &coda_queue_init);
- if (IS_ERR(ctx->fh.m2m_ctx)) {
- ret = PTR_ERR(ctx->fh.m2m_ctx);
-
- v4l2_err(&dev->v4l2_dev, "%s return error (%d)\n",
- __func__, ret);
- goto err_ctx_init;
- }
-
- ret = coda_ctrls_setup(ctx);
- if (ret) {
- v4l2_err(&dev->v4l2_dev, "failed to setup coda controls\n");
- goto err_ctrls_setup;
- }
-
- ctx->fh.ctrl_handler = &ctx->ctrls;
-
- ret = coda_alloc_context_buf(ctx, &ctx->parabuf, CODA_PARA_BUF_SIZE,
- "parabuf");
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev, "failed to allocate parabuf");
- goto err_dma_alloc;
- }
-
- ctx->bitstream.size = CODA_MAX_FRAME_SIZE;
- ctx->bitstream.vaddr = dma_alloc_writecombine(&dev->plat_dev->dev,
- ctx->bitstream.size, &ctx->bitstream.paddr, GFP_KERNEL);
- if (!ctx->bitstream.vaddr) {
- v4l2_err(&dev->v4l2_dev, "failed to allocate bitstream ringbuffer");
- ret = -ENOMEM;
- goto err_dma_writecombine;
- }
- kfifo_init(&ctx->bitstream_fifo,
- ctx->bitstream.vaddr, ctx->bitstream.size);
- mutex_init(&ctx->bitstream_mutex);
- mutex_init(&ctx->buffer_mutex);
- INIT_LIST_HEAD(&ctx->timestamp_list);
-
- coda_lock(ctx);
- list_add(&ctx->list, &dev->instances);
- coda_unlock(ctx);
-
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Created instance %d (%p)\n",
- ctx->idx, ctx);
-
- return 0;
-
-err_dma_writecombine:
- coda_free_context_buffers(ctx);
- if (ctx->dev->devtype->product == CODA_DX6)
- coda_free_aux_buf(dev, &ctx->workbuf);
- coda_free_aux_buf(dev, &ctx->parabuf);
-err_dma_alloc:
- v4l2_ctrl_handler_free(&ctx->ctrls);
-err_ctrls_setup:
- v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
-err_ctx_init:
- clk_disable_unprepare(dev->clk_ahb);
-err_clk_ahb:
- clk_disable_unprepare(dev->clk_per);
-err_clk_per:
- pm_runtime_put_sync(&dev->plat_dev->dev);
-err_pm_get:
- v4l2_fh_del(&ctx->fh);
- v4l2_fh_exit(&ctx->fh);
- clear_bit(ctx->idx, &dev->instance_mask);
-err_coda_max:
- kfree(ctx);
- return ret;
-}
-
-static int coda_release(struct file *file)
-{
- struct coda_dev *dev = video_drvdata(file);
- struct coda_ctx *ctx = fh_to_ctx(file->private_data);
-
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Releasing instance %p\n",
- ctx);
-
- debugfs_remove_recursive(ctx->debugfs_entry);
-
- /* If this instance is running, call .job_abort and wait for it to end */
- v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
-
- /* In case the instance was not running, we still need to call SEQ_END */
- if (ctx->initialized) {
- queue_work(dev->workqueue, &ctx->seq_end_work);
- flush_work(&ctx->seq_end_work);
- }
-
- coda_free_framebuffers(ctx);
-
- coda_lock(ctx);
- list_del(&ctx->list);
- coda_unlock(ctx);
-
- dma_free_writecombine(&dev->plat_dev->dev, ctx->bitstream.size,
- ctx->bitstream.vaddr, ctx->bitstream.paddr);
- coda_free_context_buffers(ctx);
- if (ctx->dev->devtype->product == CODA_DX6)
- coda_free_aux_buf(dev, &ctx->workbuf);
-
- coda_free_aux_buf(dev, &ctx->parabuf);
- v4l2_ctrl_handler_free(&ctx->ctrls);
- clk_disable_unprepare(dev->clk_ahb);
- clk_disable_unprepare(dev->clk_per);
- pm_runtime_put_sync(&dev->plat_dev->dev);
- v4l2_fh_del(&ctx->fh);
- v4l2_fh_exit(&ctx->fh);
- clear_bit(ctx->idx, &dev->instance_mask);
- kfree(ctx);
-
- return 0;
-}
-
-static const struct v4l2_file_operations coda_fops = {
- .owner = THIS_MODULE,
- .open = coda_open,
- .release = coda_release,
- .poll = v4l2_m2m_fop_poll,
- .unlocked_ioctl = video_ioctl2,
- .mmap = v4l2_m2m_fop_mmap,
-};
-
-static void coda_finish_decode(struct coda_ctx *ctx)
-{
- struct coda_dev *dev = ctx->dev;
- struct coda_q_data *q_data_src;
- struct coda_q_data *q_data_dst;
- struct vb2_buffer *dst_buf;
- struct coda_timestamp *ts;
- int width, height;
- int decoded_idx;
- int display_idx;
- u32 src_fourcc;
- int success;
- u32 err_mb;
- u32 val;
-
- dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
-
- /* Update kfifo out pointer from coda bitstream read pointer */
- coda_kfifo_sync_from_device(ctx);
-
- /*
- * in stream-end mode, the read pointer can overshoot the write pointer
- * by up to 512 bytes
- */
- if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) {
- if (coda_get_bitstream_payload(ctx) >= 0x100000 - 512)
- kfifo_init(&ctx->bitstream_fifo,
- ctx->bitstream.vaddr, ctx->bitstream.size);
- }
-
- q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- src_fourcc = q_data_src->fourcc;
-
- val = coda_read(dev, CODA_RET_DEC_PIC_SUCCESS);
- if (val != 1)
- pr_err("DEC_PIC_SUCCESS = %d\n", val);
-
- success = val & 0x1;
- if (!success)
- v4l2_err(&dev->v4l2_dev, "decode failed\n");
-
- if (src_fourcc == V4L2_PIX_FMT_H264) {
- if (val & (1 << 3))
- v4l2_err(&dev->v4l2_dev,
- "insufficient PS buffer space (%d bytes)\n",
- ctx->psbuf.size);
- if (val & (1 << 2))
- v4l2_err(&dev->v4l2_dev,
- "insufficient slice buffer space (%d bytes)\n",
- ctx->slicebuf.size);
- }
-
- val = coda_read(dev, CODA_RET_DEC_PIC_SIZE);
- width = (val >> 16) & 0xffff;
- height = val & 0xffff;
-
- q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
-
- /* frame crop information */
- if (src_fourcc == V4L2_PIX_FMT_H264) {
- u32 left_right;
- u32 top_bottom;
-
- left_right = coda_read(dev, CODA_RET_DEC_PIC_CROP_LEFT_RIGHT);
- top_bottom = coda_read(dev, CODA_RET_DEC_PIC_CROP_TOP_BOTTOM);
-
- if (left_right == 0xffffffff && top_bottom == 0xffffffff) {
- /* Keep current crop information */
- } else {
- struct v4l2_rect *rect = &q_data_dst->rect;
-
- rect->left = left_right >> 16 & 0xffff;
- rect->top = top_bottom >> 16 & 0xffff;
- rect->width = width - rect->left -
- (left_right & 0xffff);
- rect->height = height - rect->top -
- (top_bottom & 0xffff);
- }
- } else {
- /* no cropping */
- }
-
- err_mb = coda_read(dev, CODA_RET_DEC_PIC_ERR_MB);
- if (err_mb > 0)
- v4l2_err(&dev->v4l2_dev,
- "errors in %d macroblocks\n", err_mb);
-
- if (dev->devtype->product == CODA_7541) {
- val = coda_read(dev, CODA_RET_DEC_PIC_OPTION);
- if (val == 0) {
- /* not enough bitstream data */
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "prescan failed: %d\n", val);
- ctx->hold = true;
- return;
- }
- }
-
- ctx->frm_dis_flg = coda_read(dev, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
-
- /*
- * The previous display frame was copied out by the rotator,
- * now it can be overwritten again
- */
- if (ctx->display_idx >= 0 &&
- ctx->display_idx < ctx->num_internal_frames) {
- ctx->frm_dis_flg &= ~(1 << ctx->display_idx);
- coda_write(dev, ctx->frm_dis_flg,
- CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
- }
-
- /*
- * The index of the last decoded frame, not necessarily in
- * display order, and the index of the next display frame.
- * The latter could have been decoded in a previous run.
- */
- decoded_idx = coda_read(dev, CODA_RET_DEC_PIC_CUR_IDX);
- display_idx = coda_read(dev, CODA_RET_DEC_PIC_FRAME_IDX);
-
- if (decoded_idx == -1) {
- /* no frame was decoded, but we might have a display frame */
- if (display_idx >= 0 && display_idx < ctx->num_internal_frames)
- ctx->sequence_offset++;
- else if (ctx->display_idx < 0)
- ctx->hold = true;
- } else if (decoded_idx == -2) {
- /* no frame was decoded, we still return the remaining buffers */
- } else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) {
- v4l2_err(&dev->v4l2_dev,
- "decoded frame index out of range: %d\n", decoded_idx);
- } else {
- ts = list_first_entry(&ctx->timestamp_list,
- struct coda_timestamp, list);
- list_del(&ts->list);
- val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1;
- val -= ctx->sequence_offset;
- if (val != (ts->sequence & 0xffff)) {
- v4l2_err(&dev->v4l2_dev,
- "sequence number mismatch (%d(%d) != %d)\n",
- val, ctx->sequence_offset, ts->sequence);
- }
- ctx->frame_timestamps[decoded_idx] = *ts;
- kfree(ts);
-
- val = coda_read(dev, CODA_RET_DEC_PIC_TYPE) & 0x7;
- if (val == 0)
- ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_KEYFRAME;
- else if (val == 1)
- ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_PFRAME;
- else
- ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_BFRAME;
-
- ctx->frame_errors[decoded_idx] = err_mb;
- }
-
- if (display_idx == -1) {
- /*
- * no more frames to be decoded, but there could still
- * be rotator output to dequeue
- */
- ctx->hold = true;
- } else if (display_idx == -3) {
- /* possibly prescan failure */
- } else if (display_idx < 0 || display_idx >= ctx->num_internal_frames) {
- v4l2_err(&dev->v4l2_dev,
- "presentation frame index out of range: %d\n",
- display_idx);
- }
-
- /* If a frame was copied out, return it */
- if (ctx->display_idx >= 0 &&
- ctx->display_idx < ctx->num_internal_frames) {
- dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- dst_buf->v4l2_buf.sequence = ctx->osequence++;
-
- dst_buf->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME |
- V4L2_BUF_FLAG_PFRAME |
- V4L2_BUF_FLAG_BFRAME);
- dst_buf->v4l2_buf.flags |= ctx->frame_types[ctx->display_idx];
- ts = &ctx->frame_timestamps[ctx->display_idx];
- dst_buf->v4l2_buf.timecode = ts->timecode;
- dst_buf->v4l2_buf.timestamp = ts->timestamp;
-
- vb2_set_plane_payload(dst_buf, 0, width * height * 3 / 2);
-
- v4l2_m2m_buf_done(dst_buf, ctx->frame_errors[display_idx] ?
- VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
-
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "job finished: decoding frame (%d) (%s)\n",
- dst_buf->v4l2_buf.sequence,
- (dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ?
- "KEYFRAME" : "PFRAME");
- } else {
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "job finished: no frame decoded\n");
- }
-
- /* The rotator will copy the current display frame next time */
- ctx->display_idx = display_idx;
-}
-
-static void coda_finish_encode(struct coda_ctx *ctx)
-{
- struct vb2_buffer *src_buf, *dst_buf;
- struct coda_dev *dev = ctx->dev;
- u32 wr_ptr, start_ptr;
-
- src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
-
- /* Get results from the coda */
- start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START);
- wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
-
- /* Calculate bytesused field */
- if (dst_buf->v4l2_buf.sequence == 0) {
- vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr +
- ctx->vpu_header_size[0] +
- ctx->vpu_header_size[1] +
- ctx->vpu_header_size[2]);
- } else {
- vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr);
- }
-
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "frame size = %u\n",
- wr_ptr - start_ptr);
-
- coda_read(dev, CODA_RET_ENC_PIC_SLICE_NUM);
- coda_read(dev, CODA_RET_ENC_PIC_FLAG);
-
- if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0) {
- dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
- dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
- } else {
- dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
- dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
- }
-
- dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
- dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_buf->v4l2_buf.flags |=
- src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
-
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
-
- dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
-
- ctx->gopcounter--;
- if (ctx->gopcounter < 0)
- ctx->gopcounter = ctx->params.gop_size - 1;
-
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "job finished: encoding frame (%d) (%s)\n",
- dst_buf->v4l2_buf.sequence,
- (dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ?
- "KEYFRAME" : "PFRAME");
-}
-
-static irqreturn_t coda_irq_handler(int irq, void *data)
-{
- struct coda_dev *dev = data;
- struct coda_ctx *ctx;
-
- /* read status register to attend the IRQ */
- coda_read(dev, CODA_REG_BIT_INT_STATUS);
- coda_write(dev, CODA_REG_BIT_INT_CLEAR_SET,
- CODA_REG_BIT_INT_CLEAR);
-
- ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
- if (ctx == NULL) {
- v4l2_err(&dev->v4l2_dev, "Instance released before the end of transaction\n");
- mutex_unlock(&dev->coda_mutex);
- return IRQ_HANDLED;
- }
-
- if (ctx->aborting) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "task has been aborted\n");
- }
-
- if (coda_isbusy(ctx->dev)) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "coda is still busy!!!!\n");
- return IRQ_NONE;
- }
-
- complete(&ctx->completion);
-
- return IRQ_HANDLED;
-}
-
-static u32 coda_supported_firmwares[] = {
- CODA_FIRMWARE_VERNUM(CODA_DX6, 2, 2, 5),
- CODA_FIRMWARE_VERNUM(CODA_7541, 1, 4, 50),
- CODA_FIRMWARE_VERNUM(CODA_960, 2, 1, 5),
-};
-
-static bool coda_firmware_supported(u32 vernum)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(coda_supported_firmwares); i++)
- if (vernum == coda_supported_firmwares[i])
- return true;
- return false;
-}
-
-static int coda_hw_init(struct coda_dev *dev)
-{
- u32 data;
- u16 *p;
- int i, ret;
-
- ret = clk_prepare_enable(dev->clk_per);
- if (ret)
- goto err_clk_per;
-
- ret = clk_prepare_enable(dev->clk_ahb);
- if (ret)
- goto err_clk_ahb;
-
- if (dev->rstc)
- reset_control_reset(dev->rstc);
-
- /*
- * Copy the first CODA_ISRAM_SIZE in the internal SRAM.
- * The 16-bit chars in the code buffer are in memory access
- * order, re-sort them to CODA order for register download.
- * Data in this SRAM survives a reboot.
- */
- p = (u16 *)dev->codebuf.vaddr;
- if (dev->devtype->product == CODA_DX6) {
- for (i = 0; i < (CODA_ISRAM_SIZE / 2); i++) {
- data = CODA_DOWN_ADDRESS_SET(i) |
- CODA_DOWN_DATA_SET(p[i ^ 1]);
- coda_write(dev, data, CODA_REG_BIT_CODE_DOWN);
- }
- } else {
- for (i = 0; i < (CODA_ISRAM_SIZE / 2); i++) {
- data = CODA_DOWN_ADDRESS_SET(i) |
- CODA_DOWN_DATA_SET(p[round_down(i, 4) +
- 3 - (i % 4)]);
- coda_write(dev, data, CODA_REG_BIT_CODE_DOWN);
- }
- }
-
- /* Clear registers */
- for (i = 0; i < 64; i++)
- coda_write(dev, 0, CODA_REG_BIT_CODE_BUF_ADDR + i * 4);
-
- /* Tell the BIT where to find everything it needs */
- if (dev->devtype->product == CODA_960 ||
- dev->devtype->product == CODA_7541) {
- coda_write(dev, dev->tempbuf.paddr,
- CODA_REG_BIT_TEMP_BUF_ADDR);
- coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
- } else {
- coda_write(dev, dev->workbuf.paddr,
- CODA_REG_BIT_WORK_BUF_ADDR);
- }
- coda_write(dev, dev->codebuf.paddr,
- CODA_REG_BIT_CODE_BUF_ADDR);
- coda_write(dev, 0, CODA_REG_BIT_CODE_RUN);
-
- /* Set default values */
- switch (dev->devtype->product) {
- case CODA_DX6:
- coda_write(dev, CODADX6_STREAM_BUF_PIC_FLUSH, CODA_REG_BIT_STREAM_CTRL);
- break;
- default:
- coda_write(dev, CODA7_STREAM_BUF_PIC_FLUSH, CODA_REG_BIT_STREAM_CTRL);
- }
- if (dev->devtype->product == CODA_960)
- coda_write(dev, 1 << 12, CODA_REG_BIT_FRAME_MEM_CTRL);
- else
- coda_write(dev, 0, CODA_REG_BIT_FRAME_MEM_CTRL);
-
- if (dev->devtype->product != CODA_DX6)
- coda_write(dev, 0, CODA7_REG_BIT_AXI_SRAM_USE);
-
- coda_write(dev, CODA_INT_INTERRUPT_ENABLE,
- CODA_REG_BIT_INT_ENABLE);
-
- /* Reset VPU and start processor */
- data = coda_read(dev, CODA_REG_BIT_CODE_RESET);
- data |= CODA_REG_RESET_ENABLE;
- coda_write(dev, data, CODA_REG_BIT_CODE_RESET);
- udelay(10);
- data &= ~CODA_REG_RESET_ENABLE;
- coda_write(dev, data, CODA_REG_BIT_CODE_RESET);
- coda_write(dev, CODA_REG_RUN_ENABLE, CODA_REG_BIT_CODE_RUN);
-
- clk_disable_unprepare(dev->clk_ahb);
- clk_disable_unprepare(dev->clk_per);
-
- return 0;
-
-err_clk_ahb:
- clk_disable_unprepare(dev->clk_per);
-err_clk_per:
- return ret;
-}
-
-static int coda_check_firmware(struct coda_dev *dev)
-{
- u16 product, major, minor, release;
- u32 data;
- int ret;
-
- ret = clk_prepare_enable(dev->clk_per);
- if (ret)
- goto err_clk_per;
-
- ret = clk_prepare_enable(dev->clk_ahb);
- if (ret)
- goto err_clk_ahb;
-
- coda_write(dev, 0, CODA_CMD_FIRMWARE_VERNUM);
- coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
- coda_write(dev, 0, CODA_REG_BIT_RUN_INDEX);
- coda_write(dev, 0, CODA_REG_BIT_RUN_COD_STD);
- coda_write(dev, CODA_COMMAND_FIRMWARE_GET, CODA_REG_BIT_RUN_COMMAND);
- if (coda_wait_timeout(dev)) {
- v4l2_err(&dev->v4l2_dev, "firmware get command error\n");
- ret = -EIO;
- goto err_run_cmd;
- }
-
- if (dev->devtype->product == CODA_960) {
- data = coda_read(dev, CODA9_CMD_FIRMWARE_CODE_REV);
- v4l2_info(&dev->v4l2_dev, "Firmware code revision: %d\n",
- data);
- }
-
- /* Check we are compatible with the loaded firmware */
- data = coda_read(dev, CODA_CMD_FIRMWARE_VERNUM);
- product = CODA_FIRMWARE_PRODUCT(data);
- major = CODA_FIRMWARE_MAJOR(data);
- minor = CODA_FIRMWARE_MINOR(data);
- release = CODA_FIRMWARE_RELEASE(data);
-
- clk_disable_unprepare(dev->clk_per);
- clk_disable_unprepare(dev->clk_ahb);
-
- if (product != dev->devtype->product) {
- v4l2_err(&dev->v4l2_dev, "Wrong firmware. Hw: %s, Fw: %s,"
- " Version: %u.%u.%u\n",
- coda_product_name(dev->devtype->product),
- coda_product_name(product), major, minor, release);
- return -EINVAL;
- }
-
- v4l2_info(&dev->v4l2_dev, "Initialized %s.\n",
- coda_product_name(product));
-
- if (coda_firmware_supported(data)) {
- v4l2_info(&dev->v4l2_dev, "Firmware version: %u.%u.%u\n",
- major, minor, release);
- } else {
- v4l2_warn(&dev->v4l2_dev, "Unsupported firmware version: "
- "%u.%u.%u\n", major, minor, release);
- }
-
- return 0;
-
-err_run_cmd:
- clk_disable_unprepare(dev->clk_ahb);
-err_clk_ahb:
- clk_disable_unprepare(dev->clk_per);
-err_clk_per:
- return ret;
-}
-
-static void coda_fw_callback(const struct firmware *fw, void *context)
-{
- struct coda_dev *dev = context;
- struct platform_device *pdev = dev->plat_dev;
- int ret;
-
- if (!fw) {
- v4l2_err(&dev->v4l2_dev, "firmware request failed\n");
- return;
- }
-
- /* allocate auxiliary per-device code buffer for the BIT processor */
- ret = coda_alloc_aux_buf(dev, &dev->codebuf, fw->size, "codebuf",
- dev->debugfs_root);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to allocate code buffer\n");
- return;
- }
-
- /* Copy the whole firmware image to the code buffer */
- memcpy(dev->codebuf.vaddr, fw->data, fw->size);
- release_firmware(fw);
-
- if (pm_runtime_enabled(&pdev->dev) && pdev->dev.pm_domain) {
- /*
- * Enabling power temporarily will cause coda_hw_init to be
- * called via coda_runtime_resume by the pm domain.
- */
- ret = pm_runtime_get_sync(&dev->plat_dev->dev);
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev, "failed to power on: %d\n",
- ret);
- return;
- }
-
- ret = coda_check_firmware(dev);
- if (ret < 0)
- return;
-
- pm_runtime_put_sync(&dev->plat_dev->dev);
- } else {
- /*
- * If runtime pm is disabled or pm_domain is not set,
- * initialize once manually.
- */
- ret = coda_hw_init(dev);
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev, "HW initialization failed\n");
- return;
- }
-
- ret = coda_check_firmware(dev);
- if (ret < 0)
- return;
- }
-
- dev->vfd.fops = &coda_fops,
- dev->vfd.ioctl_ops = &coda_ioctl_ops;
- dev->vfd.release = video_device_release_empty,
- dev->vfd.lock = &dev->dev_mutex;
- dev->vfd.v4l2_dev = &dev->v4l2_dev;
- dev->vfd.vfl_dir = VFL_DIR_M2M;
- snprintf(dev->vfd.name, sizeof(dev->vfd.name), "%s", CODA_NAME);
- video_set_drvdata(&dev->vfd, dev);
-
- dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(dev->alloc_ctx)) {
- v4l2_err(&dev->v4l2_dev, "Failed to alloc vb2 context\n");
- return;
- }
-
- dev->m2m_dev = v4l2_m2m_init(&coda_m2m_ops);
- if (IS_ERR(dev->m2m_dev)) {
- v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
- goto rel_ctx;
- }
-
- ret = video_register_device(&dev->vfd, VFL_TYPE_GRABBER, 0);
- if (ret) {
- v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
- goto rel_m2m;
- }
- v4l2_info(&dev->v4l2_dev, "codec registered as /dev/video%d\n",
- dev->vfd.num);
-
- return;
-
-rel_m2m:
- v4l2_m2m_release(dev->m2m_dev);
-rel_ctx:
- vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
-}
-
-static int coda_firmware_request(struct coda_dev *dev)
-{
- char *fw = dev->devtype->firmware;
-
- dev_dbg(&dev->plat_dev->dev, "requesting firmware '%s' for %s\n", fw,
- coda_product_name(dev->devtype->product));
-
- return request_firmware_nowait(THIS_MODULE, true,
- fw, &dev->plat_dev->dev, GFP_KERNEL, dev, coda_fw_callback);
-}
-
-enum coda_platform {
- CODA_IMX27,
- CODA_IMX53,
- CODA_IMX6Q,
- CODA_IMX6DL,
-};
-
-static const struct coda_devtype coda_devdata[] = {
- [CODA_IMX27] = {
- .firmware = "v4l-codadx6-imx27.bin",
- .product = CODA_DX6,
- .codecs = codadx6_codecs,
- .num_codecs = ARRAY_SIZE(codadx6_codecs),
- .workbuf_size = 288 * 1024 + FMO_SLICE_SAVE_BUF_SIZE * 8 * 1024,
- .iram_size = 0xb000,
- },
- [CODA_IMX53] = {
- .firmware = "v4l-coda7541-imx53.bin",
- .product = CODA_7541,
- .codecs = coda7_codecs,
- .num_codecs = ARRAY_SIZE(coda7_codecs),
- .workbuf_size = 128 * 1024,
- .tempbuf_size = 304 * 1024,
- .iram_size = 0x14000,
- },
- [CODA_IMX6Q] = {
- .firmware = "v4l-coda960-imx6q.bin",
- .product = CODA_960,
- .codecs = coda9_codecs,
- .num_codecs = ARRAY_SIZE(coda9_codecs),
- .workbuf_size = 80 * 1024,
- .tempbuf_size = 204 * 1024,
- .iram_size = 0x21000,
- },
- [CODA_IMX6DL] = {
- .firmware = "v4l-coda960-imx6dl.bin",
- .product = CODA_960,
- .codecs = coda9_codecs,
- .num_codecs = ARRAY_SIZE(coda9_codecs),
- .workbuf_size = 80 * 1024,
- .tempbuf_size = 204 * 1024,
- .iram_size = 0x20000,
- },
-};
-
-static struct platform_device_id coda_platform_ids[] = {
- { .name = "coda-imx27", .driver_data = CODA_IMX27 },
- { .name = "coda-imx53", .driver_data = CODA_IMX53 },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(platform, coda_platform_ids);
-
-#ifdef CONFIG_OF
-static const struct of_device_id coda_dt_ids[] = {
- { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] },
- { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] },
- { .compatible = "fsl,imx6q-vpu", .data = &coda_devdata[CODA_IMX6Q] },
- { .compatible = "fsl,imx6dl-vpu", .data = &coda_devdata[CODA_IMX6DL] },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, coda_dt_ids);
-#endif
-
-static int coda_probe(struct platform_device *pdev)
-{
- const struct of_device_id *of_id =
- of_match_device(of_match_ptr(coda_dt_ids), &pdev->dev);
- const struct platform_device_id *pdev_id;
- struct coda_platform_data *pdata = pdev->dev.platform_data;
- struct device_node *np = pdev->dev.of_node;
- struct gen_pool *pool;
- struct coda_dev *dev;
- struct resource *res;
- int ret, irq;
-
- dev = devm_kzalloc(&pdev->dev, sizeof *dev, GFP_KERNEL);
- if (!dev) {
- dev_err(&pdev->dev, "Not enough memory for %s\n",
- CODA_NAME);
- return -ENOMEM;
- }
-
- spin_lock_init(&dev->irqlock);
- INIT_LIST_HEAD(&dev->instances);
-
- dev->plat_dev = pdev;
- dev->clk_per = devm_clk_get(&pdev->dev, "per");
- if (IS_ERR(dev->clk_per)) {
- dev_err(&pdev->dev, "Could not get per clock\n");
- return PTR_ERR(dev->clk_per);
- }
-
- dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
- if (IS_ERR(dev->clk_ahb)) {
- dev_err(&pdev->dev, "Could not get ahb clock\n");
- return PTR_ERR(dev->clk_ahb);
- }
-
- /* Get memory for physical registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dev->regs_base))
- return PTR_ERR(dev->regs_base);
-
- /* IRQ */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get irq resource\n");
- return irq;
- }
-
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, coda_irq_handler,
- IRQF_ONESHOT, dev_name(&pdev->dev), dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
- return ret;
- }
-
- dev->rstc = devm_reset_control_get_optional(&pdev->dev, NULL);
- if (IS_ERR(dev->rstc)) {
- ret = PTR_ERR(dev->rstc);
- if (ret == -ENOENT || ret == -ENOSYS) {
- dev->rstc = NULL;
- } else {
- dev_err(&pdev->dev, "failed get reset control: %d\n", ret);
- return ret;
- }
- }
-
- /* Get IRAM pool from device tree or platform data */
- pool = of_get_named_gen_pool(np, "iram", 0);
- if (!pool && pdata)
- pool = dev_get_gen_pool(pdata->iram_dev);
- if (!pool) {
- dev_err(&pdev->dev, "iram pool not available\n");
- return -ENOMEM;
- }
- dev->iram_pool = pool;
-
- ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
- if (ret)
- return ret;
-
- mutex_init(&dev->dev_mutex);
- mutex_init(&dev->coda_mutex);
-
- pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
-
- if (of_id) {
- dev->devtype = of_id->data;
- } else if (pdev_id) {
- dev->devtype = &coda_devdata[pdev_id->driver_data];
- } else {
- v4l2_device_unregister(&dev->v4l2_dev);
- return -EINVAL;
- }
-
- dev->debugfs_root = debugfs_create_dir("coda", NULL);
- if (!dev->debugfs_root)
- dev_warn(&pdev->dev, "failed to create debugfs root\n");
-
- /* allocate auxiliary per-device buffers for the BIT processor */
- if (dev->devtype->product == CODA_DX6) {
- ret = coda_alloc_aux_buf(dev, &dev->workbuf,
- dev->devtype->workbuf_size, "workbuf",
- dev->debugfs_root);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to allocate work buffer\n");
- v4l2_device_unregister(&dev->v4l2_dev);
- return ret;
- }
- }
-
- if (dev->devtype->tempbuf_size) {
- ret = coda_alloc_aux_buf(dev, &dev->tempbuf,
- dev->devtype->tempbuf_size, "tempbuf",
- dev->debugfs_root);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to allocate temp buffer\n");
- v4l2_device_unregister(&dev->v4l2_dev);
- return ret;
- }
- }
-
- dev->iram.size = dev->devtype->iram_size;
- dev->iram.vaddr = gen_pool_dma_alloc(dev->iram_pool, dev->iram.size,
- &dev->iram.paddr);
- if (!dev->iram.vaddr) {
- dev_err(&pdev->dev, "unable to alloc iram\n");
- return -ENOMEM;
- }
-
- dev->iram.blob.data = dev->iram.vaddr;
- dev->iram.blob.size = dev->iram.size;
- dev->iram.dentry = debugfs_create_blob("iram", 0644, dev->debugfs_root,
- &dev->iram.blob);
-
- dev->workqueue = alloc_workqueue("coda", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
- if (!dev->workqueue) {
- dev_err(&pdev->dev, "unable to alloc workqueue\n");
- return -ENOMEM;
- }
-
- platform_set_drvdata(pdev, dev);
-
- pm_runtime_enable(&pdev->dev);
-
- return coda_firmware_request(dev);
-}
-
-static int coda_remove(struct platform_device *pdev)
-{
- struct coda_dev *dev = platform_get_drvdata(pdev);
-
- video_unregister_device(&dev->vfd);
- if (dev->m2m_dev)
- v4l2_m2m_release(dev->m2m_dev);
- pm_runtime_disable(&pdev->dev);
- if (dev->alloc_ctx)
- vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
- v4l2_device_unregister(&dev->v4l2_dev);
- destroy_workqueue(dev->workqueue);
- if (dev->iram.vaddr)
- gen_pool_free(dev->iram_pool, (unsigned long)dev->iram.vaddr,
- dev->iram.size);
- coda_free_aux_buf(dev, &dev->codebuf);
- coda_free_aux_buf(dev, &dev->tempbuf);
- coda_free_aux_buf(dev, &dev->workbuf);
- debugfs_remove_recursive(dev->debugfs_root);
- return 0;
-}
-
-#ifdef CONFIG_PM_RUNTIME
-static int coda_runtime_resume(struct device *dev)
-{
- struct coda_dev *cdev = dev_get_drvdata(dev);
- int ret = 0;
-
- if (dev->pm_domain) {
- ret = coda_hw_init(cdev);
- if (ret)
- v4l2_err(&cdev->v4l2_dev, "HW initialization failed\n");
- }
-
- return ret;
-}
-#endif
-
-static const struct dev_pm_ops coda_pm_ops = {
- SET_RUNTIME_PM_OPS(NULL, coda_runtime_resume, NULL)
-};
-
-static struct platform_driver coda_driver = {
- .probe = coda_probe,
- .remove = coda_remove,
- .driver = {
- .name = CODA_NAME,
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(coda_dt_ids),
- .pm = &coda_pm_ops,
- },
- .id_table = coda_platform_ids,
-};
-
-module_platform_driver(coda_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
-MODULE_DESCRIPTION("Coda multi-standard codec V4L2 driver");
diff --git a/drivers/media/platform/coda/Makefile b/drivers/media/platform/coda/Makefile
new file mode 100644
index 000000000000..3543291e6273
--- /dev/null
+++ b/drivers/media/platform/coda/Makefile
@@ -0,0 +1,3 @@
+coda-objs := coda-common.o coda-bit.o coda-h264.o
+
+obj-$(CONFIG_VIDEO_CODA) += coda.o
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
new file mode 100644
index 000000000000..9b8ea8bbeb4e
--- /dev/null
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -0,0 +1,1861 @@
+/*
+ * Coda multi-standard codec IP - BIT processor functions
+ *
+ * Copyright (C) 2012 Vista Silicon S.L.
+ * Javier Martin, <javier.martin@vista-silicon.com>
+ * Xavier Duret
+ * Copyright (C) 2012-2014 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/irqreturn.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "coda.h"
+
+#define CODA7_PS_BUF_SIZE 0x28000
+#define CODA9_PS_SAVE_SIZE (512 * 1024)
+
+#define CODA_DEFAULT_GAMMA 4096
+#define CODA9_DEFAULT_GAMMA 24576 /* 0.75 * 32768 */
+
+static inline int coda_is_initialized(struct coda_dev *dev)
+{
+ return coda_read(dev, CODA_REG_BIT_CUR_PC) != 0;
+}
+
+static inline unsigned long coda_isbusy(struct coda_dev *dev)
+{
+ return coda_read(dev, CODA_REG_BIT_BUSY);
+}
+
+static int coda_wait_timeout(struct coda_dev *dev)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ while (coda_isbusy(dev)) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static void coda_command_async(struct coda_ctx *ctx, int cmd)
+{
+ struct coda_dev *dev = ctx->dev;
+
+ if (dev->devtype->product == CODA_960 ||
+ dev->devtype->product == CODA_7541) {
+ /* Restore context related registers to CODA */
+ coda_write(dev, ctx->bit_stream_param,
+ CODA_REG_BIT_BIT_STREAM_PARAM);
+ coda_write(dev, ctx->frm_dis_flg,
+ CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+ coda_write(dev, ctx->frame_mem_ctrl,
+ CODA_REG_BIT_FRAME_MEM_CTRL);
+ coda_write(dev, ctx->workbuf.paddr, CODA_REG_BIT_WORK_BUF_ADDR);
+ }
+
+ if (dev->devtype->product == CODA_960) {
+ coda_write(dev, 1, CODA9_GDI_WPROT_ERR_CLR);
+ coda_write(dev, 0, CODA9_GDI_WPROT_RGN_EN);
+ }
+
+ coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
+
+ coda_write(dev, ctx->idx, CODA_REG_BIT_RUN_INDEX);
+ coda_write(dev, ctx->params.codec_mode, CODA_REG_BIT_RUN_COD_STD);
+ coda_write(dev, ctx->params.codec_mode_aux, CODA7_REG_BIT_RUN_AUX_STD);
+
+ coda_write(dev, cmd, CODA_REG_BIT_RUN_COMMAND);
+}
+
+static int coda_command_sync(struct coda_ctx *ctx, int cmd)
+{
+ struct coda_dev *dev = ctx->dev;
+
+ coda_command_async(ctx, cmd);
+ return coda_wait_timeout(dev);
+}
+
+int coda_hw_reset(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ unsigned long timeout;
+ unsigned int idx;
+ int ret;
+
+ if (!dev->rstc)
+ return -ENOENT;
+
+ idx = coda_read(dev, CODA_REG_BIT_RUN_INDEX);
+
+ if (dev->devtype->product == CODA_960) {
+ timeout = jiffies + msecs_to_jiffies(100);
+ coda_write(dev, 0x11, CODA9_GDI_BUS_CTRL);
+ while (coda_read(dev, CODA9_GDI_BUS_STATUS) != 0x77) {
+ if (time_after(jiffies, timeout))
+ return -ETIME;
+ cpu_relax();
+ }
+ }
+
+ ret = reset_control_reset(dev->rstc);
+ if (ret < 0)
+ return ret;
+
+ if (dev->devtype->product == CODA_960)
+ coda_write(dev, 0x00, CODA9_GDI_BUS_CTRL);
+ coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
+ coda_write(dev, CODA_REG_RUN_ENABLE, CODA_REG_BIT_CODE_RUN);
+ ret = coda_wait_timeout(dev);
+ coda_write(dev, idx, CODA_REG_BIT_RUN_INDEX);
+
+ return ret;
+}
+
+static void coda_kfifo_sync_from_device(struct coda_ctx *ctx)
+{
+ struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
+ struct coda_dev *dev = ctx->dev;
+ u32 rd_ptr;
+
+ rd_ptr = coda_read(dev, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
+ kfifo->out = (kfifo->in & ~kfifo->mask) |
+ (rd_ptr - ctx->bitstream.paddr);
+ if (kfifo->out > kfifo->in)
+ kfifo->out -= kfifo->mask + 1;
+}
+
+static void coda_kfifo_sync_to_device_full(struct coda_ctx *ctx)
+{
+ struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
+ struct coda_dev *dev = ctx->dev;
+ u32 rd_ptr, wr_ptr;
+
+ rd_ptr = ctx->bitstream.paddr + (kfifo->out & kfifo->mask);
+ coda_write(dev, rd_ptr, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
+ wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
+ coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+}
+
+static void coda_kfifo_sync_to_device_write(struct coda_ctx *ctx)
+{
+ struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
+ struct coda_dev *dev = ctx->dev;
+ u32 wr_ptr;
+
+ wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
+ coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+}
+
+static int coda_bitstream_queue(struct coda_ctx *ctx,
+ struct vb2_buffer *src_buf)
+{
+ u32 src_size = vb2_get_plane_payload(src_buf, 0);
+ u32 n;
+
+ n = kfifo_in(&ctx->bitstream_fifo, vb2_plane_vaddr(src_buf, 0),
+ src_size);
+ if (n < src_size)
+ return -ENOSPC;
+
+ dma_sync_single_for_device(&ctx->dev->plat_dev->dev,
+ ctx->bitstream.paddr, ctx->bitstream.size,
+ DMA_TO_DEVICE);
+
+ src_buf->v4l2_buf.sequence = ctx->qsequence++;
+
+ return 0;
+}
+
+static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
+ struct vb2_buffer *src_buf)
+{
+ int ret;
+
+ if (coda_get_bitstream_payload(ctx) +
+ vb2_get_plane_payload(src_buf, 0) + 512 >= ctx->bitstream.size)
+ return false;
+
+ if (vb2_plane_vaddr(src_buf, 0) == NULL) {
+ v4l2_err(&ctx->dev->v4l2_dev, "trying to queue empty buffer\n");
+ return true;
+ }
+
+ ret = coda_bitstream_queue(ctx, src_buf);
+ if (ret < 0) {
+ v4l2_err(&ctx->dev->v4l2_dev, "bitstream buffer overflow\n");
+ return false;
+ }
+ /* Sync read pointer to device */
+ if (ctx == v4l2_m2m_get_curr_priv(ctx->dev->m2m_dev))
+ coda_kfifo_sync_to_device_write(ctx);
+
+ ctx->hold = false;
+
+ return true;
+}
+
+void coda_fill_bitstream(struct coda_ctx *ctx)
+{
+ struct vb2_buffer *src_buf;
+ struct coda_timestamp *ts;
+
+ while (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) {
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+
+ if (coda_bitstream_try_queue(ctx, src_buf)) {
+ /*
+ * Source buffer is queued in the bitstream ringbuffer;
+ * queue the timestamp and mark source buffer as done
+ */
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+
+ ts = kmalloc(sizeof(*ts), GFP_KERNEL);
+ if (ts) {
+ ts->sequence = src_buf->v4l2_buf.sequence;
+ ts->timecode = src_buf->v4l2_buf.timecode;
+ ts->timestamp = src_buf->v4l2_buf.timestamp;
+ list_add_tail(&ts->list, &ctx->timestamp_list);
+ }
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ } else {
+ break;
+ }
+ }
+}
+
+void coda_bit_stream_end_flag(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+
+ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+
+ /* If this context is currently running, update the hardware flag */
+ if ((dev->devtype->product == CODA_960) &&
+ coda_isbusy(dev) &&
+ (ctx->idx == coda_read(dev, CODA_REG_BIT_RUN_INDEX))) {
+ coda_write(dev, ctx->bit_stream_param,
+ CODA_REG_BIT_BIT_STREAM_PARAM);
+ }
+}
+
+static void coda_parabuf_write(struct coda_ctx *ctx, int index, u32 value)
+{
+ struct coda_dev *dev = ctx->dev;
+ u32 *p = ctx->parabuf.vaddr;
+
+ if (dev->devtype->product == CODA_DX6)
+ p[index] = value;
+ else
+ p[index ^ 1] = value;
+}
+
+static void coda_free_framebuffers(struct coda_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < CODA_MAX_FRAMEBUFFERS; i++)
+ coda_free_aux_buf(ctx->dev, &ctx->internal_frames[i]);
+}
+
+static int coda_alloc_framebuffers(struct coda_ctx *ctx,
+ struct coda_q_data *q_data, u32 fourcc)
+{
+ struct coda_dev *dev = ctx->dev;
+ int width, height;
+ dma_addr_t paddr;
+ int ysize;
+ int ret;
+ int i;
+
+ if (ctx->codec && (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
+ ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264)) {
+ width = round_up(q_data->width, 16);
+ height = round_up(q_data->height, 16);
+ } else {
+ width = round_up(q_data->width, 8);
+ height = q_data->height;
+ }
+ ysize = width * height;
+
+ /* Allocate frame buffers */
+ for (i = 0; i < ctx->num_internal_frames; i++) {
+ size_t size;
+ char *name;
+
+ size = ysize + ysize / 2;
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 &&
+ dev->devtype->product != CODA_DX6)
+ size += ysize / 4;
+ name = kasprintf(GFP_KERNEL, "fb%d", i);
+ ret = coda_alloc_context_buf(ctx, &ctx->internal_frames[i],
+ size, name);
+ kfree(name);
+ if (ret < 0) {
+ coda_free_framebuffers(ctx);
+ return ret;
+ }
+ }
+
+ /* Register frame buffers in the parameter buffer */
+ for (i = 0; i < ctx->num_internal_frames; i++) {
+ paddr = ctx->internal_frames[i].paddr;
+ /* Start addresses of Y, Cb, Cr planes */
+ coda_parabuf_write(ctx, i * 3 + 0, paddr);
+ coda_parabuf_write(ctx, i * 3 + 1, paddr + ysize);
+ coda_parabuf_write(ctx, i * 3 + 2, paddr + ysize + ysize / 4);
+
+ /* mvcol buffer for h.264 */
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 &&
+ dev->devtype->product != CODA_DX6)
+ coda_parabuf_write(ctx, 96 + i,
+ ctx->internal_frames[i].paddr +
+ ysize + ysize/4 + ysize/4);
+ }
+
+ /* mvcol buffer for mpeg4 */
+ if ((dev->devtype->product != CODA_DX6) &&
+ (ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4))
+ coda_parabuf_write(ctx, 97, ctx->internal_frames[i].paddr +
+ ysize + ysize/4 + ysize/4);
+
+ return 0;
+}
+
+static void coda_free_context_buffers(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+
+ coda_free_aux_buf(dev, &ctx->slicebuf);
+ coda_free_aux_buf(dev, &ctx->psbuf);
+ if (dev->devtype->product != CODA_DX6)
+ coda_free_aux_buf(dev, &ctx->workbuf);
+}
+
+static int coda_alloc_context_buffers(struct coda_ctx *ctx,
+ struct coda_q_data *q_data)
+{
+ struct coda_dev *dev = ctx->dev;
+ size_t size;
+ int ret;
+
+ if (dev->devtype->product == CODA_DX6)
+ return 0;
+
+ if (ctx->psbuf.vaddr) {
+ v4l2_err(&dev->v4l2_dev, "psmembuf still allocated\n");
+ return -EBUSY;
+ }
+ if (ctx->slicebuf.vaddr) {
+ v4l2_err(&dev->v4l2_dev, "slicebuf still allocated\n");
+ return -EBUSY;
+ }
+ if (ctx->workbuf.vaddr) {
+ v4l2_err(&dev->v4l2_dev, "context buffer still allocated\n");
+ ret = -EBUSY;
+ return -ENOMEM;
+ }
+
+ if (q_data->fourcc == V4L2_PIX_FMT_H264) {
+ /* worst case slice size */
+ size = (DIV_ROUND_UP(q_data->width, 16) *
+ DIV_ROUND_UP(q_data->height, 16)) * 3200 / 8 + 512;
+ ret = coda_alloc_context_buf(ctx, &ctx->slicebuf, size,
+ "slicebuf");
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to allocate %d byte slice buffer",
+ ctx->slicebuf.size);
+ return ret;
+ }
+ }
+
+ if (dev->devtype->product == CODA_7541) {
+ ret = coda_alloc_context_buf(ctx, &ctx->psbuf,
+ CODA7_PS_BUF_SIZE, "psbuf");
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to allocate psmem buffer");
+ goto err;
+ }
+ }
+
+ size = dev->devtype->workbuf_size;
+ if (dev->devtype->product == CODA_960 &&
+ q_data->fourcc == V4L2_PIX_FMT_H264)
+ size += CODA9_PS_SAVE_SIZE;
+ ret = coda_alloc_context_buf(ctx, &ctx->workbuf, size, "workbuf");
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to allocate %d byte context buffer",
+ ctx->workbuf.size);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ coda_free_context_buffers(ctx);
+ return ret;
+}
+
+static int coda_encode_header(struct coda_ctx *ctx, struct vb2_buffer *buf,
+ int header_code, u8 *header, int *size)
+{
+ struct coda_dev *dev = ctx->dev;
+ size_t bufsize;
+ int ret;
+ int i;
+
+ if (dev->devtype->product == CODA_960)
+ memset(vb2_plane_vaddr(buf, 0), 0, 64);
+
+ coda_write(dev, vb2_dma_contig_plane_dma_addr(buf, 0),
+ CODA_CMD_ENC_HEADER_BB_START);
+ bufsize = vb2_plane_size(buf, 0);
+ if (dev->devtype->product == CODA_960)
+ bufsize /= 1024;
+ coda_write(dev, bufsize, CODA_CMD_ENC_HEADER_BB_SIZE);
+ coda_write(dev, header_code, CODA_CMD_ENC_HEADER_CODE);
+ ret = coda_command_sync(ctx, CODA_COMMAND_ENCODE_HEADER);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_ENCODE_HEADER timeout\n");
+ return ret;
+ }
+
+ if (dev->devtype->product == CODA_960) {
+ for (i = 63; i > 0; i--)
+ if (((char *)vb2_plane_vaddr(buf, 0))[i] != 0)
+ break;
+ *size = i + 1;
+ } else {
+ *size = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)) -
+ coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
+ }
+ memcpy(header, vb2_plane_vaddr(buf, 0), *size);
+
+ return 0;
+}
+
+static phys_addr_t coda_iram_alloc(struct coda_iram_info *iram, size_t size)
+{
+ phys_addr_t ret;
+
+ size = round_up(size, 1024);
+ if (size > iram->remaining)
+ return 0;
+ iram->remaining -= size;
+
+ ret = iram->next_paddr;
+ iram->next_paddr += size;
+
+ return ret;
+}
+
+static void coda_setup_iram(struct coda_ctx *ctx)
+{
+ struct coda_iram_info *iram_info = &ctx->iram_info;
+ struct coda_dev *dev = ctx->dev;
+ int w64, w128;
+ int mb_width;
+ int dbk_bits;
+ int bit_bits;
+ int ip_bits;
+
+ memset(iram_info, 0, sizeof(*iram_info));
+ iram_info->next_paddr = dev->iram.paddr;
+ iram_info->remaining = dev->iram.size;
+
+ if (!dev->iram.vaddr)
+ return;
+
+ switch (dev->devtype->product) {
+ case CODA_7541:
+ dbk_bits = CODA7_USE_HOST_DBK_ENABLE | CODA7_USE_DBK_ENABLE;
+ bit_bits = CODA7_USE_HOST_BIT_ENABLE | CODA7_USE_BIT_ENABLE;
+ ip_bits = CODA7_USE_HOST_IP_ENABLE | CODA7_USE_IP_ENABLE;
+ break;
+ case CODA_960:
+ dbk_bits = CODA9_USE_HOST_DBK_ENABLE | CODA9_USE_DBK_ENABLE;
+ bit_bits = CODA9_USE_HOST_BIT_ENABLE | CODA7_USE_BIT_ENABLE;
+ ip_bits = CODA9_USE_HOST_IP_ENABLE | CODA7_USE_IP_ENABLE;
+ break;
+ default: /* CODA_DX6 */
+ return;
+ }
+
+ if (ctx->inst_type == CODA_INST_ENCODER) {
+ struct coda_q_data *q_data_src;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ mb_width = DIV_ROUND_UP(q_data_src->width, 16);
+ w128 = mb_width * 128;
+ w64 = mb_width * 64;
+
+ /* Prioritize in case IRAM is too small for everything */
+ if (dev->devtype->product == CODA_7541) {
+ iram_info->search_ram_size = round_up(mb_width * 16 *
+ 36 + 2048, 1024);
+ iram_info->search_ram_paddr = coda_iram_alloc(iram_info,
+ iram_info->search_ram_size);
+ if (!iram_info->search_ram_paddr) {
+ pr_err("IRAM is smaller than the search ram size\n");
+ goto out;
+ }
+ iram_info->axi_sram_use |= CODA7_USE_HOST_ME_ENABLE |
+ CODA7_USE_ME_ENABLE;
+ }
+
+ /* Only H.264BP and H.263P3 are considered */
+ iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w64);
+ iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w64);
+ if (!iram_info->buf_dbk_c_use)
+ goto out;
+ iram_info->axi_sram_use |= dbk_bits;
+
+ iram_info->buf_bit_use = coda_iram_alloc(iram_info, w128);
+ if (!iram_info->buf_bit_use)
+ goto out;
+ iram_info->axi_sram_use |= bit_bits;
+
+ iram_info->buf_ip_ac_dc_use = coda_iram_alloc(iram_info, w128);
+ if (!iram_info->buf_ip_ac_dc_use)
+ goto out;
+ iram_info->axi_sram_use |= ip_bits;
+
+ /* OVL and BTP disabled for encoder */
+ } else if (ctx->inst_type == CODA_INST_DECODER) {
+ struct coda_q_data *q_data_dst;
+
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ mb_width = DIV_ROUND_UP(q_data_dst->width, 16);
+ w128 = mb_width * 128;
+
+ iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w128);
+ iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w128);
+ if (!iram_info->buf_dbk_c_use)
+ goto out;
+ iram_info->axi_sram_use |= dbk_bits;
+
+ iram_info->buf_bit_use = coda_iram_alloc(iram_info, w128);
+ if (!iram_info->buf_bit_use)
+ goto out;
+ iram_info->axi_sram_use |= bit_bits;
+
+ iram_info->buf_ip_ac_dc_use = coda_iram_alloc(iram_info, w128);
+ if (!iram_info->buf_ip_ac_dc_use)
+ goto out;
+ iram_info->axi_sram_use |= ip_bits;
+
+ /* OVL and BTP unused as there is no VC1 support yet */
+ }
+
+out:
+ if (!(iram_info->axi_sram_use & CODA7_USE_HOST_IP_ENABLE))
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "IRAM smaller than needed\n");
+
+ if (dev->devtype->product == CODA_7541) {
+ /* TODO - Enabling these causes picture errors on CODA7541 */
+ if (ctx->inst_type == CODA_INST_DECODER) {
+ /* fw 1.4.50 */
+ iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
+ CODA7_USE_IP_ENABLE);
+ } else {
+ /* fw 13.4.29 */
+ iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
+ CODA7_USE_HOST_DBK_ENABLE |
+ CODA7_USE_IP_ENABLE |
+ CODA7_USE_DBK_ENABLE);
+ }
+ }
+}
+
+static u32 coda_supported_firmwares[] = {
+ CODA_FIRMWARE_VERNUM(CODA_DX6, 2, 2, 5),
+ CODA_FIRMWARE_VERNUM(CODA_7541, 1, 4, 50),
+ CODA_FIRMWARE_VERNUM(CODA_960, 2, 1, 5),
+};
+
+static bool coda_firmware_supported(u32 vernum)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(coda_supported_firmwares); i++)
+ if (vernum == coda_supported_firmwares[i])
+ return true;
+ return false;
+}
+
+int coda_check_firmware(struct coda_dev *dev)
+{
+ u16 product, major, minor, release;
+ u32 data;
+ int ret;
+
+ ret = clk_prepare_enable(dev->clk_per);
+ if (ret)
+ goto err_clk_per;
+
+ ret = clk_prepare_enable(dev->clk_ahb);
+ if (ret)
+ goto err_clk_ahb;
+
+ coda_write(dev, 0, CODA_CMD_FIRMWARE_VERNUM);
+ coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
+ coda_write(dev, 0, CODA_REG_BIT_RUN_INDEX);
+ coda_write(dev, 0, CODA_REG_BIT_RUN_COD_STD);
+ coda_write(dev, CODA_COMMAND_FIRMWARE_GET, CODA_REG_BIT_RUN_COMMAND);
+ if (coda_wait_timeout(dev)) {
+ v4l2_err(&dev->v4l2_dev, "firmware get command error\n");
+ ret = -EIO;
+ goto err_run_cmd;
+ }
+
+ if (dev->devtype->product == CODA_960) {
+ data = coda_read(dev, CODA9_CMD_FIRMWARE_CODE_REV);
+ v4l2_info(&dev->v4l2_dev, "Firmware code revision: %d\n",
+ data);
+ }
+
+ /* Check we are compatible with the loaded firmware */
+ data = coda_read(dev, CODA_CMD_FIRMWARE_VERNUM);
+ product = CODA_FIRMWARE_PRODUCT(data);
+ major = CODA_FIRMWARE_MAJOR(data);
+ minor = CODA_FIRMWARE_MINOR(data);
+ release = CODA_FIRMWARE_RELEASE(data);
+
+ clk_disable_unprepare(dev->clk_per);
+ clk_disable_unprepare(dev->clk_ahb);
+
+ if (product != dev->devtype->product) {
+ v4l2_err(&dev->v4l2_dev,
+ "Wrong firmware. Hw: %s, Fw: %s, Version: %u.%u.%u\n",
+ coda_product_name(dev->devtype->product),
+ coda_product_name(product), major, minor, release);
+ return -EINVAL;
+ }
+
+ v4l2_info(&dev->v4l2_dev, "Initialized %s.\n",
+ coda_product_name(product));
+
+ if (coda_firmware_supported(data)) {
+ v4l2_info(&dev->v4l2_dev, "Firmware version: %u.%u.%u\n",
+ major, minor, release);
+ } else {
+ v4l2_warn(&dev->v4l2_dev,
+ "Unsupported firmware version: %u.%u.%u\n",
+ major, minor, release);
+ }
+
+ return 0;
+
+err_run_cmd:
+ clk_disable_unprepare(dev->clk_ahb);
+err_clk_ahb:
+ clk_disable_unprepare(dev->clk_per);
+err_clk_per:
+ return ret;
+}
+
+/*
+ * Encoder context operations
+ */
+
+static int coda_start_encoding(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
+ struct coda_q_data *q_data_src, *q_data_dst;
+ u32 bitstream_buf, bitstream_size;
+ struct vb2_buffer *buf;
+ int gamma, ret, value;
+ u32 dst_fourcc;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ dst_fourcc = q_data_dst->fourcc;
+
+ /* Allocate per-instance buffers */
+ ret = coda_alloc_context_buffers(ctx, q_data_src);
+ if (ret < 0)
+ return ret;
+
+ buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ bitstream_buf = vb2_dma_contig_plane_dma_addr(buf, 0);
+ bitstream_size = q_data_dst->sizeimage;
+
+ if (!coda_is_initialized(dev)) {
+ v4l2_err(v4l2_dev, "coda is not initialized.\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&dev->coda_mutex);
+
+ coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
+ coda_write(dev, bitstream_buf, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
+ coda_write(dev, bitstream_buf, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ coda_write(dev, CODADX6_STREAM_BUF_DYNALLOC_EN |
+ CODADX6_STREAM_BUF_PIC_RESET, CODA_REG_BIT_STREAM_CTRL);
+ break;
+ case CODA_960:
+ coda_write(dev, 0, CODA9_GDI_WPROT_RGN_EN);
+ /* fallthrough */
+ case CODA_7541:
+ coda_write(dev, CODA7_STREAM_BUF_DYNALLOC_EN |
+ CODA7_STREAM_BUF_PIC_RESET, CODA_REG_BIT_STREAM_CTRL);
+ break;
+ }
+
+ value = coda_read(dev, CODA_REG_BIT_FRAME_MEM_CTRL);
+ value &= ~(1 << 2 | 0x7 << 9);
+ ctx->frame_mem_ctrl = value;
+ coda_write(dev, value, CODA_REG_BIT_FRAME_MEM_CTRL);
+
+ if (dev->devtype->product == CODA_DX6) {
+ /* Configure the coda */
+ coda_write(dev, dev->iram.paddr,
+ CODADX6_REG_BIT_SEARCH_RAM_BASE_ADDR);
+ }
+
+ /* Could set rotation here if needed */
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ value = (q_data_src->width & CODADX6_PICWIDTH_MASK)
+ << CODADX6_PICWIDTH_OFFSET;
+ value |= (q_data_src->height & CODADX6_PICHEIGHT_MASK)
+ << CODA_PICHEIGHT_OFFSET;
+ break;
+ case CODA_7541:
+ if (dst_fourcc == V4L2_PIX_FMT_H264) {
+ value = (round_up(q_data_src->width, 16) &
+ CODA7_PICWIDTH_MASK) << CODA7_PICWIDTH_OFFSET;
+ value |= (round_up(q_data_src->height, 16) &
+ CODA7_PICHEIGHT_MASK) << CODA_PICHEIGHT_OFFSET;
+ break;
+ }
+ /* fallthrough */
+ case CODA_960:
+ value = (q_data_src->width & CODA7_PICWIDTH_MASK)
+ << CODA7_PICWIDTH_OFFSET;
+ value |= (q_data_src->height & CODA7_PICHEIGHT_MASK)
+ << CODA_PICHEIGHT_OFFSET;
+ }
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_SRC_SIZE);
+ coda_write(dev, ctx->params.framerate,
+ CODA_CMD_ENC_SEQ_SRC_F_RATE);
+
+ ctx->params.codec_mode = ctx->codec->mode;
+ switch (dst_fourcc) {
+ case V4L2_PIX_FMT_MPEG4:
+ if (dev->devtype->product == CODA_960)
+ coda_write(dev, CODA9_STD_MPEG4,
+ CODA_CMD_ENC_SEQ_COD_STD);
+ else
+ coda_write(dev, CODA_STD_MPEG4,
+ CODA_CMD_ENC_SEQ_COD_STD);
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_MP4_PARA);
+ break;
+ case V4L2_PIX_FMT_H264:
+ if (dev->devtype->product == CODA_960)
+ coda_write(dev, CODA9_STD_H264,
+ CODA_CMD_ENC_SEQ_COD_STD);
+ else
+ coda_write(dev, CODA_STD_H264,
+ CODA_CMD_ENC_SEQ_COD_STD);
+ if (ctx->params.h264_deblk_enabled) {
+ value = ((ctx->params.h264_deblk_alpha &
+ CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) <<
+ CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
+ ((ctx->params.h264_deblk_beta &
+ CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
+ CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET);
+ } else {
+ value = 1 << CODA_264PARAM_DISABLEDEBLK_OFFSET;
+ }
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA);
+ break;
+ default:
+ v4l2_err(v4l2_dev,
+ "dst format (0x%08x) invalid.\n", dst_fourcc);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (ctx->params.slice_mode) {
+ case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE:
+ value = 0;
+ break;
+ case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB:
+ value = (ctx->params.slice_max_mb & CODA_SLICING_SIZE_MASK)
+ << CODA_SLICING_SIZE_OFFSET;
+ value |= (1 & CODA_SLICING_UNIT_MASK)
+ << CODA_SLICING_UNIT_OFFSET;
+ value |= 1 & CODA_SLICING_MODE_MASK;
+ break;
+ case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES:
+ value = (ctx->params.slice_max_bits & CODA_SLICING_SIZE_MASK)
+ << CODA_SLICING_SIZE_OFFSET;
+ value |= (0 & CODA_SLICING_UNIT_MASK)
+ << CODA_SLICING_UNIT_OFFSET;
+ value |= 1 & CODA_SLICING_MODE_MASK;
+ break;
+ }
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_SLICE_MODE);
+ value = ctx->params.gop_size & CODA_GOP_SIZE_MASK;
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_GOP_SIZE);
+
+ if (ctx->params.bitrate) {
+ /* Rate control enabled */
+ value = (ctx->params.bitrate & CODA_RATECONTROL_BITRATE_MASK)
+ << CODA_RATECONTROL_BITRATE_OFFSET;
+ value |= 1 & CODA_RATECONTROL_ENABLE_MASK;
+ if (dev->devtype->product == CODA_960)
+ value |= BIT(31); /* disable autoskip */
+ } else {
+ value = 0;
+ }
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_RC_PARA);
+
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_RC_BUF_SIZE);
+ coda_write(dev, ctx->params.intra_refresh,
+ CODA_CMD_ENC_SEQ_INTRA_REFRESH);
+
+ coda_write(dev, bitstream_buf, CODA_CMD_ENC_SEQ_BB_START);
+ coda_write(dev, bitstream_size / 1024, CODA_CMD_ENC_SEQ_BB_SIZE);
+
+
+ value = 0;
+ if (dev->devtype->product == CODA_960)
+ gamma = CODA9_DEFAULT_GAMMA;
+ else
+ gamma = CODA_DEFAULT_GAMMA;
+ if (gamma > 0) {
+ coda_write(dev, (gamma & CODA_GAMMA_MASK) << CODA_GAMMA_OFFSET,
+ CODA_CMD_ENC_SEQ_RC_GAMMA);
+ }
+
+ if (ctx->params.h264_min_qp || ctx->params.h264_max_qp) {
+ coda_write(dev,
+ ctx->params.h264_min_qp << CODA_QPMIN_OFFSET |
+ ctx->params.h264_max_qp << CODA_QPMAX_OFFSET,
+ CODA_CMD_ENC_SEQ_RC_QP_MIN_MAX);
+ }
+ if (dev->devtype->product == CODA_960) {
+ if (ctx->params.h264_max_qp)
+ value |= 1 << CODA9_OPTION_RCQPMAX_OFFSET;
+ if (CODA_DEFAULT_GAMMA > 0)
+ value |= 1 << CODA9_OPTION_GAMMA_OFFSET;
+ } else {
+ if (CODA_DEFAULT_GAMMA > 0) {
+ if (dev->devtype->product == CODA_DX6)
+ value |= 1 << CODADX6_OPTION_GAMMA_OFFSET;
+ else
+ value |= 1 << CODA7_OPTION_GAMMA_OFFSET;
+ }
+ if (ctx->params.h264_min_qp)
+ value |= 1 << CODA7_OPTION_RCQPMIN_OFFSET;
+ if (ctx->params.h264_max_qp)
+ value |= 1 << CODA7_OPTION_RCQPMAX_OFFSET;
+ }
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_OPTION);
+
+ coda_write(dev, 0, CODA_CMD_ENC_SEQ_RC_INTERVAL_MODE);
+
+ coda_setup_iram(ctx);
+
+ if (dst_fourcc == V4L2_PIX_FMT_H264) {
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ value = FMO_SLICE_SAVE_BUF_SIZE << 7;
+ coda_write(dev, value, CODADX6_CMD_ENC_SEQ_FMO);
+ break;
+ case CODA_7541:
+ coda_write(dev, ctx->iram_info.search_ram_paddr,
+ CODA7_CMD_ENC_SEQ_SEARCH_BASE);
+ coda_write(dev, ctx->iram_info.search_ram_size,
+ CODA7_CMD_ENC_SEQ_SEARCH_SIZE);
+ break;
+ case CODA_960:
+ coda_write(dev, 0, CODA9_CMD_ENC_SEQ_ME_OPTION);
+ coda_write(dev, 0, CODA9_CMD_ENC_SEQ_INTRA_WEIGHT);
+ }
+ }
+
+ ret = coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
+ goto out;
+ }
+
+ if (coda_read(dev, CODA_RET_ENC_SEQ_SUCCESS) == 0) {
+ v4l2_err(v4l2_dev, "CODA_COMMAND_SEQ_INIT failed\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (dev->devtype->product == CODA_960)
+ ctx->num_internal_frames = 4;
+ else
+ ctx->num_internal_frames = 2;
+ ret = coda_alloc_framebuffers(ctx, q_data_src, dst_fourcc);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "failed to allocate framebuffers\n");
+ goto out;
+ }
+
+ coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
+ coda_write(dev, q_data_src->bytesperline,
+ CODA_CMD_SET_FRAME_BUF_STRIDE);
+ if (dev->devtype->product == CODA_7541) {
+ coda_write(dev, q_data_src->bytesperline,
+ CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE);
+ }
+ if (dev->devtype->product != CODA_DX6) {
+ coda_write(dev, ctx->iram_info.buf_bit_use,
+ CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
+ CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_y_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_c_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ovl_use,
+ CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
+ if (dev->devtype->product == CODA_960) {
+ coda_write(dev, ctx->iram_info.buf_btp_use,
+ CODA9_CMD_SET_FRAME_AXI_BTP_ADDR);
+
+ /* FIXME */
+ coda_write(dev, ctx->internal_frames[2].paddr,
+ CODA9_CMD_SET_FRAME_SUBSAMP_A);
+ coda_write(dev, ctx->internal_frames[3].paddr,
+ CODA9_CMD_SET_FRAME_SUBSAMP_B);
+ }
+ }
+
+ ret = coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "CODA_COMMAND_SET_FRAME_BUF timeout\n");
+ goto out;
+ }
+
+ /* Save stream headers */
+ buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ switch (dst_fourcc) {
+ case V4L2_PIX_FMT_H264:
+ /*
+ * Get SPS in the first frame and copy it to an
+ * intermediate buffer.
+ */
+ ret = coda_encode_header(ctx, buf, CODA_HEADER_H264_SPS,
+ &ctx->vpu_header[0][0],
+ &ctx->vpu_header_size[0]);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * Get PPS in the first frame and copy it to an
+ * intermediate buffer.
+ */
+ ret = coda_encode_header(ctx, buf, CODA_HEADER_H264_PPS,
+ &ctx->vpu_header[1][0],
+ &ctx->vpu_header_size[1]);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * Length of H.264 headers is variable and thus it might not be
+ * aligned for the coda to append the encoded frame. In that is
+ * the case a filler NAL must be added to header 2.
+ */
+ ctx->vpu_header_size[2] = coda_h264_padding(
+ (ctx->vpu_header_size[0] +
+ ctx->vpu_header_size[1]),
+ ctx->vpu_header[2]);
+ break;
+ case V4L2_PIX_FMT_MPEG4:
+ /*
+ * Get VOS in the first frame and copy it to an
+ * intermediate buffer
+ */
+ ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VOS,
+ &ctx->vpu_header[0][0],
+ &ctx->vpu_header_size[0]);
+ if (ret < 0)
+ goto out;
+
+ ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VIS,
+ &ctx->vpu_header[1][0],
+ &ctx->vpu_header_size[1]);
+ if (ret < 0)
+ goto out;
+
+ ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VOL,
+ &ctx->vpu_header[2][0],
+ &ctx->vpu_header_size[2]);
+ if (ret < 0)
+ goto out;
+ break;
+ default:
+ /* No more formats need to save headers at the moment */
+ break;
+ }
+
+out:
+ mutex_unlock(&dev->coda_mutex);
+ return ret;
+}
+
+static int coda_prepare_encode(struct coda_ctx *ctx)
+{
+ struct coda_q_data *q_data_src, *q_data_dst;
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct coda_dev *dev = ctx->dev;
+ int force_ipicture;
+ int quant_param = 0;
+ u32 picture_y, picture_cb, picture_cr;
+ u32 pic_stream_buffer_addr, pic_stream_buffer_size;
+ u32 dst_fourcc;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ dst_fourcc = q_data_dst->fourcc;
+
+ src_buf->v4l2_buf.sequence = ctx->osequence;
+ dst_buf->v4l2_buf.sequence = ctx->osequence;
+ ctx->osequence++;
+
+ /*
+ * Workaround coda firmware BUG that only marks the first
+ * frame as IDR. This is a problem for some decoders that can't
+ * recover when a frame is lost.
+ */
+ if (src_buf->v4l2_buf.sequence % ctx->params.gop_size) {
+ src_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+ src_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+ } else {
+ src_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ src_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
+ }
+
+ if (dev->devtype->product == CODA_960)
+ coda_set_gdi_regs(ctx);
+
+ /*
+ * Copy headers at the beginning of the first frame for H.264 only.
+ * In MPEG4 they are already copied by the coda.
+ */
+ if (src_buf->v4l2_buf.sequence == 0) {
+ pic_stream_buffer_addr =
+ vb2_dma_contig_plane_dma_addr(dst_buf, 0) +
+ ctx->vpu_header_size[0] +
+ ctx->vpu_header_size[1] +
+ ctx->vpu_header_size[2];
+ pic_stream_buffer_size = CODA_MAX_FRAME_SIZE -
+ ctx->vpu_header_size[0] -
+ ctx->vpu_header_size[1] -
+ ctx->vpu_header_size[2];
+ memcpy(vb2_plane_vaddr(dst_buf, 0),
+ &ctx->vpu_header[0][0], ctx->vpu_header_size[0]);
+ memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0],
+ &ctx->vpu_header[1][0], ctx->vpu_header_size[1]);
+ memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0] +
+ ctx->vpu_header_size[1], &ctx->vpu_header[2][0],
+ ctx->vpu_header_size[2]);
+ } else {
+ pic_stream_buffer_addr =
+ vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ pic_stream_buffer_size = CODA_MAX_FRAME_SIZE;
+ }
+
+ if (src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) {
+ force_ipicture = 1;
+ switch (dst_fourcc) {
+ case V4L2_PIX_FMT_H264:
+ quant_param = ctx->params.h264_intra_qp;
+ break;
+ case V4L2_PIX_FMT_MPEG4:
+ quant_param = ctx->params.mpeg4_intra_qp;
+ break;
+ default:
+ v4l2_warn(&ctx->dev->v4l2_dev,
+ "cannot set intra qp, fmt not supported\n");
+ break;
+ }
+ } else {
+ force_ipicture = 0;
+ switch (dst_fourcc) {
+ case V4L2_PIX_FMT_H264:
+ quant_param = ctx->params.h264_inter_qp;
+ break;
+ case V4L2_PIX_FMT_MPEG4:
+ quant_param = ctx->params.mpeg4_inter_qp;
+ break;
+ default:
+ v4l2_warn(&ctx->dev->v4l2_dev,
+ "cannot set inter qp, fmt not supported\n");
+ break;
+ }
+ }
+
+ /* submit */
+ coda_write(dev, CODA_ROT_MIR_ENABLE | ctx->params.rot_mode,
+ CODA_CMD_ENC_PIC_ROT_MODE);
+ coda_write(dev, quant_param, CODA_CMD_ENC_PIC_QS);
+
+
+ picture_y = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ switch (q_data_src->fourcc) {
+ case V4L2_PIX_FMT_YVU420:
+ /* Switch Cb and Cr for YVU420 format */
+ picture_cr = picture_y + q_data_src->bytesperline *
+ q_data_src->height;
+ picture_cb = picture_cr + q_data_src->bytesperline / 2 *
+ q_data_src->height / 2;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ default:
+ picture_cb = picture_y + q_data_src->bytesperline *
+ q_data_src->height;
+ picture_cr = picture_cb + q_data_src->bytesperline / 2 *
+ q_data_src->height / 2;
+ break;
+ }
+
+ if (dev->devtype->product == CODA_960) {
+ coda_write(dev, 4/*FIXME: 0*/, CODA9_CMD_ENC_PIC_SRC_INDEX);
+ coda_write(dev, q_data_src->width, CODA9_CMD_ENC_PIC_SRC_STRIDE);
+ coda_write(dev, 0, CODA9_CMD_ENC_PIC_SUB_FRAME_SYNC);
+
+ coda_write(dev, picture_y, CODA9_CMD_ENC_PIC_SRC_ADDR_Y);
+ coda_write(dev, picture_cb, CODA9_CMD_ENC_PIC_SRC_ADDR_CB);
+ coda_write(dev, picture_cr, CODA9_CMD_ENC_PIC_SRC_ADDR_CR);
+ } else {
+ coda_write(dev, picture_y, CODA_CMD_ENC_PIC_SRC_ADDR_Y);
+ coda_write(dev, picture_cb, CODA_CMD_ENC_PIC_SRC_ADDR_CB);
+ coda_write(dev, picture_cr, CODA_CMD_ENC_PIC_SRC_ADDR_CR);
+ }
+ coda_write(dev, force_ipicture << 1 & 0x2,
+ CODA_CMD_ENC_PIC_OPTION);
+
+ coda_write(dev, pic_stream_buffer_addr, CODA_CMD_ENC_PIC_BB_START);
+ coda_write(dev, pic_stream_buffer_size / 1024,
+ CODA_CMD_ENC_PIC_BB_SIZE);
+
+ if (!ctx->streamon_out) {
+ /* After streamoff on the output side, set stream end flag */
+ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+ coda_write(dev, ctx->bit_stream_param,
+ CODA_REG_BIT_BIT_STREAM_PARAM);
+ }
+
+ if (dev->devtype->product != CODA_DX6)
+ coda_write(dev, ctx->iram_info.axi_sram_use,
+ CODA7_REG_BIT_AXI_SRAM_USE);
+
+ coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
+
+ return 0;
+}
+
+static void coda_finish_encode(struct coda_ctx *ctx)
+{
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct coda_dev *dev = ctx->dev;
+ u32 wr_ptr, start_ptr;
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ /* Get results from the coda */
+ start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START);
+ wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+
+ /* Calculate bytesused field */
+ if (dst_buf->v4l2_buf.sequence == 0) {
+ vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr +
+ ctx->vpu_header_size[0] +
+ ctx->vpu_header_size[1] +
+ ctx->vpu_header_size[2]);
+ } else {
+ vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr);
+ }
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "frame size = %u\n",
+ wr_ptr - start_ptr);
+
+ coda_read(dev, CODA_RET_ENC_PIC_SLICE_NUM);
+ coda_read(dev, CODA_RET_ENC_PIC_FLAG);
+
+ if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0) {
+ dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
+ } else {
+ dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+ dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+ }
+
+ dst_buf->v4l2_buf.timestamp = src_buf->v4l2_buf.timestamp;
+ dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->v4l2_buf.flags |=
+ src_buf->v4l2_buf.flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_buf->v4l2_buf.timecode = src_buf->v4l2_buf.timecode;
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+
+ ctx->gopcounter--;
+ if (ctx->gopcounter < 0)
+ ctx->gopcounter = ctx->params.gop_size - 1;
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "job finished: encoding frame (%d) (%s)\n",
+ dst_buf->v4l2_buf.sequence,
+ (dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ?
+ "KEYFRAME" : "PFRAME");
+}
+
+static void coda_seq_end_work(struct work_struct *work)
+{
+ struct coda_ctx *ctx = container_of(work, struct coda_ctx, seq_end_work);
+ struct coda_dev *dev = ctx->dev;
+
+ mutex_lock(&ctx->buffer_mutex);
+ mutex_lock(&dev->coda_mutex);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%d: %s: sent command 'SEQ_END' to coda\n", ctx->idx,
+ __func__);
+ if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) {
+ v4l2_err(&dev->v4l2_dev,
+ "CODA_COMMAND_SEQ_END failed\n");
+ }
+
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+
+ coda_free_framebuffers(ctx);
+ coda_free_context_buffers(ctx);
+
+ mutex_unlock(&dev->coda_mutex);
+ mutex_unlock(&ctx->buffer_mutex);
+}
+
+static void coda_bit_release(struct coda_ctx *ctx)
+{
+ coda_free_framebuffers(ctx);
+ coda_free_context_buffers(ctx);
+}
+
+const struct coda_context_ops coda_bit_encode_ops = {
+ .queue_init = coda_encoder_queue_init,
+ .start_streaming = coda_start_encoding,
+ .prepare_run = coda_prepare_encode,
+ .finish_run = coda_finish_encode,
+ .seq_end_work = coda_seq_end_work,
+ .release = coda_bit_release,
+};
+
+/*
+ * Decoder context operations
+ */
+
+static int __coda_start_decoding(struct coda_ctx *ctx)
+{
+ struct coda_q_data *q_data_src, *q_data_dst;
+ u32 bitstream_buf, bitstream_size;
+ struct coda_dev *dev = ctx->dev;
+ int width, height;
+ u32 src_fourcc;
+ u32 val;
+ int ret;
+
+ /* Start decoding */
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ bitstream_buf = ctx->bitstream.paddr;
+ bitstream_size = ctx->bitstream.size;
+ src_fourcc = q_data_src->fourcc;
+
+ /* Allocate per-instance buffers */
+ ret = coda_alloc_context_buffers(ctx, q_data_src);
+ if (ret < 0)
+ return ret;
+
+ coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
+
+ /* Update coda bitstream read and write pointers from kfifo */
+ coda_kfifo_sync_to_device_full(ctx);
+
+ ctx->display_idx = -1;
+ ctx->frm_dis_flg = 0;
+ coda_write(dev, 0, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+
+ coda_write(dev, CODA_BIT_DEC_SEQ_INIT_ESCAPE,
+ CODA_REG_BIT_BIT_STREAM_PARAM);
+
+ coda_write(dev, bitstream_buf, CODA_CMD_DEC_SEQ_BB_START);
+ coda_write(dev, bitstream_size / 1024, CODA_CMD_DEC_SEQ_BB_SIZE);
+ val = 0;
+ if ((dev->devtype->product == CODA_7541) ||
+ (dev->devtype->product == CODA_960))
+ val |= CODA_REORDER_ENABLE;
+ coda_write(dev, val, CODA_CMD_DEC_SEQ_OPTION);
+
+ ctx->params.codec_mode = ctx->codec->mode;
+ if (dev->devtype->product == CODA_960 &&
+ src_fourcc == V4L2_PIX_FMT_MPEG4)
+ ctx->params.codec_mode_aux = CODA_MP4_AUX_MPEG4;
+ else
+ ctx->params.codec_mode_aux = 0;
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ if (dev->devtype->product == CODA_7541) {
+ coda_write(dev, ctx->psbuf.paddr,
+ CODA_CMD_DEC_SEQ_PS_BB_START);
+ coda_write(dev, (CODA7_PS_BUF_SIZE / 1024),
+ CODA_CMD_DEC_SEQ_PS_BB_SIZE);
+ }
+ if (dev->devtype->product == CODA_960) {
+ coda_write(dev, 0, CODA_CMD_DEC_SEQ_X264_MV_EN);
+ coda_write(dev, 512, CODA_CMD_DEC_SEQ_SPP_CHUNK_SIZE);
+ }
+ }
+ if (dev->devtype->product != CODA_960)
+ coda_write(dev, 0, CODA_CMD_DEC_SEQ_SRC_SIZE);
+
+ if (coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT)) {
+ v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
+ coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
+ return -ETIMEDOUT;
+ }
+
+ /* Update kfifo out pointer from coda bitstream read pointer */
+ coda_kfifo_sync_from_device(ctx);
+
+ coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
+
+ if (coda_read(dev, CODA_RET_DEC_SEQ_SUCCESS) == 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "CODA_COMMAND_SEQ_INIT failed, error code = %d\n",
+ coda_read(dev, CODA_RET_DEC_SEQ_ERR_REASON));
+ return -EAGAIN;
+ }
+
+ val = coda_read(dev, CODA_RET_DEC_SEQ_SRC_SIZE);
+ if (dev->devtype->product == CODA_DX6) {
+ width = (val >> CODADX6_PICWIDTH_OFFSET) & CODADX6_PICWIDTH_MASK;
+ height = val & CODADX6_PICHEIGHT_MASK;
+ } else {
+ width = (val >> CODA7_PICWIDTH_OFFSET) & CODA7_PICWIDTH_MASK;
+ height = val & CODA7_PICHEIGHT_MASK;
+ }
+
+ if (width > q_data_dst->width || height > q_data_dst->height) {
+ v4l2_err(&dev->v4l2_dev, "stream is %dx%d, not %dx%d\n",
+ width, height, q_data_dst->width, q_data_dst->height);
+ return -EINVAL;
+ }
+
+ width = round_up(width, 16);
+ height = round_up(height, 16);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "%s instance %d now: %dx%d\n",
+ __func__, ctx->idx, width, height);
+
+ ctx->num_internal_frames = coda_read(dev, CODA_RET_DEC_SEQ_FRAME_NEED);
+ if (ctx->num_internal_frames > CODA_MAX_FRAMEBUFFERS) {
+ v4l2_err(&dev->v4l2_dev,
+ "not enough framebuffers to decode (%d < %d)\n",
+ CODA_MAX_FRAMEBUFFERS, ctx->num_internal_frames);
+ return -EINVAL;
+ }
+
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ u32 left_right;
+ u32 top_bottom;
+
+ left_right = coda_read(dev, CODA_RET_DEC_SEQ_CROP_LEFT_RIGHT);
+ top_bottom = coda_read(dev, CODA_RET_DEC_SEQ_CROP_TOP_BOTTOM);
+
+ q_data_dst->rect.left = (left_right >> 10) & 0x3ff;
+ q_data_dst->rect.top = (top_bottom >> 10) & 0x3ff;
+ q_data_dst->rect.width = width - q_data_dst->rect.left -
+ (left_right & 0x3ff);
+ q_data_dst->rect.height = height - q_data_dst->rect.top -
+ (top_bottom & 0x3ff);
+ }
+
+ ret = coda_alloc_framebuffers(ctx, q_data_dst, src_fourcc);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to allocate framebuffers\n");
+ return ret;
+ }
+
+ /* Tell the decoder how many frame buffers we allocated. */
+ coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
+ coda_write(dev, width, CODA_CMD_SET_FRAME_BUF_STRIDE);
+
+ if (dev->devtype->product != CODA_DX6) {
+ /* Set secondary AXI IRAM */
+ coda_setup_iram(ctx);
+
+ coda_write(dev, ctx->iram_info.buf_bit_use,
+ CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
+ CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_y_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_c_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ovl_use,
+ CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
+ if (dev->devtype->product == CODA_960)
+ coda_write(dev, ctx->iram_info.buf_btp_use,
+ CODA9_CMD_SET_FRAME_AXI_BTP_ADDR);
+ }
+
+ if (dev->devtype->product == CODA_960) {
+ coda_write(dev, -1, CODA9_CMD_SET_FRAME_DELAY);
+
+ coda_write(dev, 0x20262024, CODA9_CMD_SET_FRAME_CACHE_SIZE);
+ coda_write(dev, 2 << CODA9_CACHE_PAGEMERGE_OFFSET |
+ 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
+ 8 << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET |
+ 8 << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET,
+ CODA9_CMD_SET_FRAME_CACHE_CONFIG);
+ }
+
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ coda_write(dev, ctx->slicebuf.paddr,
+ CODA_CMD_SET_FRAME_SLICE_BB_START);
+ coda_write(dev, ctx->slicebuf.size / 1024,
+ CODA_CMD_SET_FRAME_SLICE_BB_SIZE);
+ }
+
+ if (dev->devtype->product == CODA_7541) {
+ int max_mb_x = 1920 / 16;
+ int max_mb_y = 1088 / 16;
+ int max_mb_num = max_mb_x * max_mb_y;
+
+ coda_write(dev, max_mb_num << 16 | max_mb_x << 8 | max_mb_y,
+ CODA7_CMD_SET_FRAME_MAX_DEC_SIZE);
+ } else if (dev->devtype->product == CODA_960) {
+ int max_mb_x = 1920 / 16;
+ int max_mb_y = 1088 / 16;
+ int max_mb_num = max_mb_x * max_mb_y;
+
+ coda_write(dev, max_mb_num << 16 | max_mb_x << 8 | max_mb_y,
+ CODA9_CMD_SET_FRAME_MAX_DEC_SIZE);
+ }
+
+ if (coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "CODA_COMMAND_SET_FRAME_BUF timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int coda_start_decoding(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ int ret;
+
+ mutex_lock(&dev->coda_mutex);
+ ret = __coda_start_decoding(ctx);
+ mutex_unlock(&dev->coda_mutex);
+
+ return ret;
+}
+
+static int coda_prepare_decode(struct coda_ctx *ctx)
+{
+ struct vb2_buffer *dst_buf;
+ struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data_dst;
+ u32 stridey, height;
+ u32 picture_y, picture_cb, picture_cr;
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ if (ctx->params.rot_mode & CODA_ROT_90) {
+ stridey = q_data_dst->height;
+ height = q_data_dst->width;
+ } else {
+ stridey = q_data_dst->width;
+ height = q_data_dst->height;
+ }
+
+ /* Try to copy source buffer contents into the bitstream ringbuffer */
+ mutex_lock(&ctx->bitstream_mutex);
+ coda_fill_bitstream(ctx);
+ mutex_unlock(&ctx->bitstream_mutex);
+
+ if (coda_get_bitstream_payload(ctx) < 512 &&
+ (!(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "bitstream payload: %d, skipping\n",
+ coda_get_bitstream_payload(ctx));
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ return -EAGAIN;
+ }
+
+ /* Run coda_start_decoding (again) if not yet initialized */
+ if (!ctx->initialized) {
+ int ret = __coda_start_decoding(ctx);
+
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to start decoding\n");
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ return -EAGAIN;
+ } else {
+ ctx->initialized = 1;
+ }
+ }
+
+ if (dev->devtype->product == CODA_960)
+ coda_set_gdi_regs(ctx);
+
+ /* Set rotator output */
+ picture_y = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ if (q_data_dst->fourcc == V4L2_PIX_FMT_YVU420) {
+ /* Switch Cr and Cb for YVU420 format */
+ picture_cr = picture_y + stridey * height;
+ picture_cb = picture_cr + stridey / 2 * height / 2;
+ } else {
+ picture_cb = picture_y + stridey * height;
+ picture_cr = picture_cb + stridey / 2 * height / 2;
+ }
+
+ if (dev->devtype->product == CODA_960) {
+ /*
+ * The CODA960 seems to have an internal list of buffers with
+ * 64 entries that includes the registered frame buffers as
+ * well as the rotator buffer output.
+ * ROT_INDEX needs to be < 0x40, but > ctx->num_internal_frames.
+ */
+ coda_write(dev, CODA_MAX_FRAMEBUFFERS + dst_buf->v4l2_buf.index,
+ CODA9_CMD_DEC_PIC_ROT_INDEX);
+ coda_write(dev, picture_y, CODA9_CMD_DEC_PIC_ROT_ADDR_Y);
+ coda_write(dev, picture_cb, CODA9_CMD_DEC_PIC_ROT_ADDR_CB);
+ coda_write(dev, picture_cr, CODA9_CMD_DEC_PIC_ROT_ADDR_CR);
+ coda_write(dev, stridey, CODA9_CMD_DEC_PIC_ROT_STRIDE);
+ } else {
+ coda_write(dev, picture_y, CODA_CMD_DEC_PIC_ROT_ADDR_Y);
+ coda_write(dev, picture_cb, CODA_CMD_DEC_PIC_ROT_ADDR_CB);
+ coda_write(dev, picture_cr, CODA_CMD_DEC_PIC_ROT_ADDR_CR);
+ coda_write(dev, stridey, CODA_CMD_DEC_PIC_ROT_STRIDE);
+ }
+ coda_write(dev, CODA_ROT_MIR_ENABLE | ctx->params.rot_mode,
+ CODA_CMD_DEC_PIC_ROT_MODE);
+
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ /* TBD */
+ case CODA_7541:
+ coda_write(dev, CODA_PRE_SCAN_EN, CODA_CMD_DEC_PIC_OPTION);
+ break;
+ case CODA_960:
+ /* 'hardcode to use interrupt disable mode'? */
+ coda_write(dev, (1 << 10), CODA_CMD_DEC_PIC_OPTION);
+ break;
+ }
+
+ coda_write(dev, 0, CODA_CMD_DEC_PIC_SKIP_NUM);
+
+ coda_write(dev, 0, CODA_CMD_DEC_PIC_BB_START);
+ coda_write(dev, 0, CODA_CMD_DEC_PIC_START_BYTE);
+
+ if (dev->devtype->product != CODA_DX6)
+ coda_write(dev, ctx->iram_info.axi_sram_use,
+ CODA7_REG_BIT_AXI_SRAM_USE);
+
+ coda_kfifo_sync_to_device_full(ctx);
+
+ coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
+
+ return 0;
+}
+
+static void coda_finish_decode(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data_src;
+ struct coda_q_data *q_data_dst;
+ struct vb2_buffer *dst_buf;
+ struct coda_timestamp *ts;
+ int width, height;
+ int decoded_idx;
+ int display_idx;
+ u32 src_fourcc;
+ int success;
+ u32 err_mb;
+ u32 val;
+
+ /* Update kfifo out pointer from coda bitstream read pointer */
+ coda_kfifo_sync_from_device(ctx);
+
+ /*
+ * in stream-end mode, the read pointer can overshoot the write pointer
+ * by up to 512 bytes
+ */
+ if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) {
+ if (coda_get_bitstream_payload(ctx) >= CODA_MAX_FRAME_SIZE - 512)
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+ }
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ src_fourcc = q_data_src->fourcc;
+
+ val = coda_read(dev, CODA_RET_DEC_PIC_SUCCESS);
+ if (val != 1)
+ pr_err("DEC_PIC_SUCCESS = %d\n", val);
+
+ success = val & 0x1;
+ if (!success)
+ v4l2_err(&dev->v4l2_dev, "decode failed\n");
+
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ if (val & (1 << 3))
+ v4l2_err(&dev->v4l2_dev,
+ "insufficient PS buffer space (%d bytes)\n",
+ ctx->psbuf.size);
+ if (val & (1 << 2))
+ v4l2_err(&dev->v4l2_dev,
+ "insufficient slice buffer space (%d bytes)\n",
+ ctx->slicebuf.size);
+ }
+
+ val = coda_read(dev, CODA_RET_DEC_PIC_SIZE);
+ width = (val >> 16) & 0xffff;
+ height = val & 0xffff;
+
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ /* frame crop information */
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ u32 left_right;
+ u32 top_bottom;
+
+ left_right = coda_read(dev, CODA_RET_DEC_PIC_CROP_LEFT_RIGHT);
+ top_bottom = coda_read(dev, CODA_RET_DEC_PIC_CROP_TOP_BOTTOM);
+
+ if (left_right == 0xffffffff && top_bottom == 0xffffffff) {
+ /* Keep current crop information */
+ } else {
+ struct v4l2_rect *rect = &q_data_dst->rect;
+
+ rect->left = left_right >> 16 & 0xffff;
+ rect->top = top_bottom >> 16 & 0xffff;
+ rect->width = width - rect->left -
+ (left_right & 0xffff);
+ rect->height = height - rect->top -
+ (top_bottom & 0xffff);
+ }
+ } else {
+ /* no cropping */
+ }
+
+ err_mb = coda_read(dev, CODA_RET_DEC_PIC_ERR_MB);
+ if (err_mb > 0)
+ v4l2_err(&dev->v4l2_dev,
+ "errors in %d macroblocks\n", err_mb);
+
+ if (dev->devtype->product == CODA_7541) {
+ val = coda_read(dev, CODA_RET_DEC_PIC_OPTION);
+ if (val == 0) {
+ /* not enough bitstream data */
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "prescan failed: %d\n", val);
+ ctx->hold = true;
+ return;
+ }
+ }
+
+ ctx->frm_dis_flg = coda_read(dev,
+ CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+
+ /*
+ * The previous display frame was copied out by the rotator,
+ * now it can be overwritten again
+ */
+ if (ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames) {
+ ctx->frm_dis_flg &= ~(1 << ctx->display_idx);
+ coda_write(dev, ctx->frm_dis_flg,
+ CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+ }
+
+ /*
+ * The index of the last decoded frame, not necessarily in
+ * display order, and the index of the next display frame.
+ * The latter could have been decoded in a previous run.
+ */
+ decoded_idx = coda_read(dev, CODA_RET_DEC_PIC_CUR_IDX);
+ display_idx = coda_read(dev, CODA_RET_DEC_PIC_FRAME_IDX);
+
+ if (decoded_idx == -1) {
+ /* no frame was decoded, but we might have a display frame */
+ if (display_idx >= 0 && display_idx < ctx->num_internal_frames)
+ ctx->sequence_offset++;
+ else if (ctx->display_idx < 0)
+ ctx->hold = true;
+ } else if (decoded_idx == -2) {
+ /* no frame was decoded, we still return remaining buffers */
+ } else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) {
+ v4l2_err(&dev->v4l2_dev,
+ "decoded frame index out of range: %d\n", decoded_idx);
+ } else {
+ val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1;
+ val -= ctx->sequence_offset;
+ mutex_lock(&ctx->bitstream_mutex);
+ if (!list_empty(&ctx->timestamp_list)) {
+ ts = list_first_entry(&ctx->timestamp_list,
+ struct coda_timestamp, list);
+ list_del(&ts->list);
+ if (val != (ts->sequence & 0xffff)) {
+ v4l2_err(&dev->v4l2_dev,
+ "sequence number mismatch (%d(%d) != %d)\n",
+ val, ctx->sequence_offset,
+ ts->sequence);
+ }
+ ctx->frame_timestamps[decoded_idx] = *ts;
+ kfree(ts);
+ } else {
+ v4l2_err(&dev->v4l2_dev, "empty timestamp list!\n");
+ memset(&ctx->frame_timestamps[decoded_idx], 0,
+ sizeof(struct coda_timestamp));
+ ctx->frame_timestamps[decoded_idx].sequence = val;
+ }
+ mutex_unlock(&ctx->bitstream_mutex);
+
+ val = coda_read(dev, CODA_RET_DEC_PIC_TYPE) & 0x7;
+ if (val == 0)
+ ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_KEYFRAME;
+ else if (val == 1)
+ ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_PFRAME;
+ else
+ ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_BFRAME;
+
+ ctx->frame_errors[decoded_idx] = err_mb;
+ }
+
+ if (display_idx == -1) {
+ /*
+ * no more frames to be decoded, but there could still
+ * be rotator output to dequeue
+ */
+ ctx->hold = true;
+ } else if (display_idx == -3) {
+ /* possibly prescan failure */
+ } else if (display_idx < 0 || display_idx >= ctx->num_internal_frames) {
+ v4l2_err(&dev->v4l2_dev,
+ "presentation frame index out of range: %d\n",
+ display_idx);
+ }
+
+ /* If a frame was copied out, return it */
+ if (ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames) {
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf->v4l2_buf.sequence = ctx->osequence++;
+
+ dst_buf->v4l2_buf.flags &= ~(V4L2_BUF_FLAG_KEYFRAME |
+ V4L2_BUF_FLAG_PFRAME |
+ V4L2_BUF_FLAG_BFRAME);
+ dst_buf->v4l2_buf.flags |= ctx->frame_types[ctx->display_idx];
+ ts = &ctx->frame_timestamps[ctx->display_idx];
+ dst_buf->v4l2_buf.timecode = ts->timecode;
+ dst_buf->v4l2_buf.timestamp = ts->timestamp;
+
+ vb2_set_plane_payload(dst_buf, 0, width * height * 3 / 2);
+
+ v4l2_m2m_buf_done(dst_buf, ctx->frame_errors[display_idx] ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "job finished: decoding frame (%d) (%s)\n",
+ dst_buf->v4l2_buf.sequence,
+ (dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ?
+ "KEYFRAME" : "PFRAME");
+ } else {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "job finished: no frame decoded\n");
+ }
+
+ /* The rotator will copy the current display frame next time */
+ ctx->display_idx = display_idx;
+}
+
+const struct coda_context_ops coda_bit_decode_ops = {
+ .queue_init = coda_decoder_queue_init,
+ .start_streaming = coda_start_decoding,
+ .prepare_run = coda_prepare_decode,
+ .finish_run = coda_finish_decode,
+ .seq_end_work = coda_seq_end_work,
+ .release = coda_bit_release,
+};
+
+irqreturn_t coda_irq_handler(int irq, void *data)
+{
+ struct coda_dev *dev = data;
+ struct coda_ctx *ctx;
+
+ /* read status register to attend the IRQ */
+ coda_read(dev, CODA_REG_BIT_INT_STATUS);
+ coda_write(dev, CODA_REG_BIT_INT_CLEAR_SET,
+ CODA_REG_BIT_INT_CLEAR);
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (ctx == NULL) {
+ v4l2_err(&dev->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ mutex_unlock(&dev->coda_mutex);
+ return IRQ_HANDLED;
+ }
+
+ if (ctx->aborting) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "task has been aborted\n");
+ }
+
+ if (coda_isbusy(ctx->dev)) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "coda is still busy!!!!\n");
+ return IRQ_NONE;
+ }
+
+ complete(&ctx->completion);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
new file mode 100644
index 000000000000..ced47609f5ef
--- /dev/null
+++ b/drivers/media/platform/coda/coda-common.c
@@ -0,0 +1,2052 @@
+/*
+ * Coda multi-standard codec IP
+ *
+ * Copyright (C) 2012 Vista Silicon S.L.
+ * Javier Martin, <javier.martin@vista-silicon.com>
+ * Xavier Duret
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/genalloc.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kfifo.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/of.h>
+#include <linux/platform_data/coda.h>
+#include <linux/reset.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "coda.h"
+
+#define CODA_NAME "coda"
+
+#define CODADX6_MAX_INSTANCES 4
+
+#define CODA_PARA_BUF_SIZE (10 * 1024)
+#define CODA_ISRAM_SIZE (2048 * 2)
+
+#define MIN_W 176
+#define MIN_H 144
+
+#define S_ALIGN 1 /* multiple of 2 */
+#define W_ALIGN 1 /* multiple of 2 */
+#define H_ALIGN 1 /* multiple of 2 */
+
+#define fh_to_ctx(__fh) container_of(__fh, struct coda_ctx, fh)
+
+int coda_debug;
+module_param(coda_debug, int, 0644);
+MODULE_PARM_DESC(coda_debug, "Debug level (0-2)");
+
+struct coda_fmt {
+ char *name;
+ u32 fourcc;
+};
+
+void coda_write(struct coda_dev *dev, u32 data, u32 reg)
+{
+ v4l2_dbg(2, coda_debug, &dev->v4l2_dev,
+ "%s: data=0x%x, reg=0x%x\n", __func__, data, reg);
+ writel(data, dev->regs_base + reg);
+}
+
+unsigned int coda_read(struct coda_dev *dev, u32 reg)
+{
+ u32 data;
+
+ data = readl(dev->regs_base + reg);
+ v4l2_dbg(2, coda_debug, &dev->v4l2_dev,
+ "%s: data=0x%x, reg=0x%x\n", __func__, data, reg);
+ return data;
+}
+
+/*
+ * Array of all formats supported by any version of Coda:
+ */
+static const struct coda_fmt coda_formats[] = {
+ {
+ .name = "YUV 4:2:0 Planar, YCbCr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ },
+ {
+ .name = "YUV 4:2:0 Planar, YCrCb",
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ },
+ {
+ .name = "H264 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_H264,
+ },
+ {
+ .name = "MPEG4 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_MPEG4,
+ },
+};
+
+#define CODA_CODEC(mode, src_fourcc, dst_fourcc, max_w, max_h) \
+ { mode, src_fourcc, dst_fourcc, max_w, max_h }
+
+/*
+ * Arrays of codecs supported by each given version of Coda:
+ * i.MX27 -> codadx6
+ * i.MX5x -> coda7
+ * i.MX6 -> coda960
+ * Use V4L2_PIX_FMT_YUV420 as placeholder for all supported YUV 4:2:0 variants
+ */
+static const struct coda_codec codadx6_codecs[] = {
+ CODA_CODEC(CODADX6_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 720, 576),
+ CODA_CODEC(CODADX6_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 720, 576),
+};
+
+static const struct coda_codec coda7_codecs[] = {
+ CODA_CODEC(CODA7_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1280, 720),
+ CODA_CODEC(CODA7_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1280, 720),
+ CODA_CODEC(CODA7_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088),
+ CODA_CODEC(CODA7_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088),
+};
+
+static const struct coda_codec coda9_codecs[] = {
+ CODA_CODEC(CODA9_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1920, 1088),
+ CODA_CODEC(CODA9_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1920, 1088),
+ CODA_CODEC(CODA9_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088),
+ CODA_CODEC(CODA9_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088),
+};
+
+static bool coda_format_is_yuv(u32 fourcc)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * Normalize all supported YUV 4:2:0 formats to the value used in the codec
+ * tables.
+ */
+static u32 coda_format_normalize_yuv(u32 fourcc)
+{
+ return coda_format_is_yuv(fourcc) ? V4L2_PIX_FMT_YUV420 : fourcc;
+}
+
+static const struct coda_codec *coda_find_codec(struct coda_dev *dev,
+ int src_fourcc, int dst_fourcc)
+{
+ const struct coda_codec *codecs = dev->devtype->codecs;
+ int num_codecs = dev->devtype->num_codecs;
+ int k;
+
+ src_fourcc = coda_format_normalize_yuv(src_fourcc);
+ dst_fourcc = coda_format_normalize_yuv(dst_fourcc);
+ if (src_fourcc == dst_fourcc)
+ return NULL;
+
+ for (k = 0; k < num_codecs; k++) {
+ if (codecs[k].src_fourcc == src_fourcc &&
+ codecs[k].dst_fourcc == dst_fourcc)
+ break;
+ }
+
+ if (k == num_codecs)
+ return NULL;
+
+ return &codecs[k];
+}
+
+static void coda_get_max_dimensions(struct coda_dev *dev,
+ const struct coda_codec *codec,
+ int *max_w, int *max_h)
+{
+ const struct coda_codec *codecs = dev->devtype->codecs;
+ int num_codecs = dev->devtype->num_codecs;
+ unsigned int w, h;
+ int k;
+
+ if (codec) {
+ w = codec->max_w;
+ h = codec->max_h;
+ } else {
+ for (k = 0, w = 0, h = 0; k < num_codecs; k++) {
+ w = max(w, codecs[k].max_w);
+ h = max(h, codecs[k].max_h);
+ }
+ }
+
+ if (max_w)
+ *max_w = w;
+ if (max_h)
+ *max_h = h;
+}
+
+const char *coda_product_name(int product)
+{
+ static char buf[9];
+
+ switch (product) {
+ case CODA_DX6:
+ return "CodaDx6";
+ case CODA_7541:
+ return "CODA7541";
+ case CODA_960:
+ return "CODA960";
+ default:
+ snprintf(buf, sizeof(buf), "(0x%04x)", product);
+ return buf;
+ }
+}
+
+/*
+ * V4L2 ioctl() operations.
+ */
+static int coda_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
+ strlcpy(cap->driver, CODA_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, coda_product_name(ctx->dev->devtype->product),
+ sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:" CODA_NAME, sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int coda_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ const struct coda_codec *codecs = ctx->dev->devtype->codecs;
+ const struct coda_fmt *formats = coda_formats;
+ const struct coda_fmt *fmt;
+ int num_codecs = ctx->dev->devtype->num_codecs;
+ int num_formats = ARRAY_SIZE(coda_formats);
+ int i, k, num = 0;
+ bool yuv;
+
+ if (ctx->inst_type == CODA_INST_ENCODER)
+ yuv = (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ else
+ yuv = (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ for (i = 0; i < num_formats; i++) {
+ /* Skip either raw or compressed formats */
+ if (yuv != coda_format_is_yuv(formats[i].fourcc))
+ continue;
+ /* All uncompressed formats are always supported */
+ if (yuv) {
+ if (num == f->index)
+ break;
+ ++num;
+ continue;
+ }
+ /* Compressed formats may be supported, check the codec list */
+ for (k = 0; k < num_codecs; k++) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ formats[i].fourcc == codecs[k].dst_fourcc)
+ break;
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ formats[i].fourcc == codecs[k].src_fourcc)
+ break;
+ }
+ if (k < num_codecs) {
+ if (num == f->index)
+ break;
+ ++num;
+ }
+ }
+
+ if (i < num_formats) {
+ fmt = &formats[i];
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ if (!yuv)
+ f->flags |= V4L2_FMT_FLAG_COMPRESSED;
+ return 0;
+ }
+
+ /* Format not found */
+ return -EINVAL;
+}
+
+static int coda_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct coda_q_data *q_data;
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = q_data->fourcc;
+ f->fmt.pix.width = q_data->width;
+ f->fmt.pix.height = q_data->height;
+ f->fmt.pix.bytesperline = q_data->bytesperline;
+
+ f->fmt.pix.sizeimage = q_data->sizeimage;
+ f->fmt.pix.colorspace = ctx->colorspace;
+
+ return 0;
+}
+
+static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
+ struct v4l2_format *f)
+{
+ struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data;
+ unsigned int max_w, max_h;
+ enum v4l2_field field;
+
+ field = f->fmt.pix.field;
+ if (field == V4L2_FIELD_ANY)
+ field = V4L2_FIELD_NONE;
+ else if (V4L2_FIELD_NONE != field)
+ return -EINVAL;
+
+ /* V4L2 specification suggests the driver corrects the format struct
+ * if any of the dimensions is unsupported */
+ f->fmt.pix.field = field;
+
+ coda_get_max_dimensions(dev, codec, &max_w, &max_h);
+ v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w, W_ALIGN,
+ &f->fmt.pix.height, MIN_H, max_h, H_ALIGN,
+ S_ALIGN);
+
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_MPEG4:
+ case V4L2_PIX_FMT_JPEG:
+ break;
+ default:
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+ f->fmt.pix.pixelformat = q_data->fourcc;
+ }
+
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ /* Frame stride must be multiple of 8, but 16 for h.264 */
+ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_MPEG4:
+ case V4L2_PIX_FMT_JPEG:
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage = CODA_MAX_FRAME_SIZE;
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+static int coda_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ const struct coda_codec *codec = NULL;
+ struct vb2_queue *src_vq;
+ int ret;
+
+ /*
+ * If the source format is already fixed, try to find a codec that
+ * converts to the given destination format
+ */
+ src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (vb2_is_streaming(src_vq)) {
+ struct coda_q_data *q_data_src;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
+ f->fmt.pix.pixelformat);
+ if (!codec)
+ return -EINVAL;
+
+ f->fmt.pix.width = q_data_src->width;
+ f->fmt.pix.height = q_data_src->height;
+ } else {
+ /* Otherwise determine codec by encoded format, if possible */
+ codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420,
+ f->fmt.pix.pixelformat);
+ }
+
+ f->fmt.pix.colorspace = ctx->colorspace;
+
+ ret = coda_try_fmt(ctx, codec, f);
+ if (ret < 0)
+ return ret;
+
+ /* The h.264 decoder only returns complete 16x16 macroblocks */
+ if (codec && codec->src_fourcc == V4L2_PIX_FMT_H264) {
+ f->fmt.pix.width = f->fmt.pix.width;
+ f->fmt.pix.height = round_up(f->fmt.pix.height, 16);
+ f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height * 3 / 2;
+ }
+
+ return 0;
+}
+
+static int coda_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ const struct coda_codec *codec = NULL;
+
+ /* Determine codec by encoded format, returns NULL if raw or invalid */
+ if (ctx->inst_type == CODA_INST_DECODER) {
+ codec = coda_find_codec(ctx->dev, f->fmt.pix.pixelformat,
+ V4L2_PIX_FMT_YUV420);
+ if (!codec)
+ codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_H264,
+ V4L2_PIX_FMT_YUV420);
+ if (!codec)
+ return -EINVAL;
+ }
+
+ if (!f->fmt.pix.colorspace)
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+
+ return coda_try_fmt(ctx, codec, f);
+}
+
+static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f)
+{
+ struct coda_q_data *q_data;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ q_data->fourcc = f->fmt.pix.pixelformat;
+ q_data->width = f->fmt.pix.width;
+ q_data->height = f->fmt.pix.height;
+ q_data->bytesperline = f->fmt.pix.bytesperline;
+ q_data->sizeimage = f->fmt.pix.sizeimage;
+ q_data->rect.left = 0;
+ q_data->rect.top = 0;
+ q_data->rect.width = f->fmt.pix.width;
+ q_data->rect.height = f->fmt.pix.height;
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "Setting format for type %d, wxh: %dx%d, fmt: %d\n",
+ f->type, q_data->width, q_data->height, q_data->fourcc);
+
+ return 0;
+}
+
+static int coda_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+
+ ret = coda_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ return coda_s_fmt(ctx, f);
+}
+
+static int coda_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_format f_cap;
+ int ret;
+
+ ret = coda_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = coda_s_fmt(ctx, f);
+ if (ret)
+ return ret;
+
+ ctx->colorspace = f->fmt.pix.colorspace;
+
+ f_cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ coda_g_fmt(file, priv, &f_cap);
+ f_cap.fmt.pix.width = f->fmt.pix.width;
+ f_cap.fmt.pix.height = f->fmt.pix.height;
+
+ ret = coda_try_fmt_vid_cap(file, priv, &f_cap);
+ if (ret)
+ return ret;
+
+ return coda_s_fmt(ctx, &f_cap);
+}
+
+static int coda_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+
+ return v4l2_m2m_qbuf(file, ctx->fh.m2m_ctx, buf);
+}
+
+static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx,
+ struct v4l2_buffer *buf)
+{
+ struct vb2_queue *src_vq;
+
+ src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ return ((ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) &&
+ (buf->sequence == (ctx->qsequence - 1)));
+}
+
+static int coda_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+
+ ret = v4l2_m2m_dqbuf(file, ctx->fh.m2m_ctx, buf);
+
+ /* If this is the last capture buffer, emit an end-of-stream event */
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ coda_buf_is_end_of_stream(ctx, buf)) {
+ const struct v4l2_event eos_event = {
+ .type = V4L2_EVENT_EOS
+ };
+
+ v4l2_event_queue_fh(&ctx->fh, &eos_event);
+ }
+
+ return ret;
+}
+
+static int coda_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_q_data *q_data;
+ struct v4l2_rect r, *rsel;
+
+ q_data = get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+
+ r.left = 0;
+ r.top = 0;
+ r.width = q_data->width;
+ r.height = q_data->height;
+ rsel = &q_data->rect;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ rsel = &r;
+ /* fallthrough */
+ case V4L2_SEL_TGT_CROP:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ rsel = &r;
+ /* fallthrough */
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ s->r = *rsel;
+
+ return 0;
+}
+
+static int coda_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ if (dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
+ return -EINVAL;
+
+ if (!(dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) && (dc->stop.pts != 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int coda_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ int ret;
+
+ ret = coda_try_decoder_cmd(file, fh, dc);
+ if (ret < 0)
+ return ret;
+
+ /* Ignore decoder stop command silently in encoder context */
+ if (ctx->inst_type != CODA_INST_DECODER)
+ return 0;
+
+ /* Set the stream-end flag on this context */
+ coda_bit_stream_end_flag(ctx);
+ ctx->hold = false;
+ v4l2_m2m_try_schedule(ctx->fh.m2m_ctx);
+
+ return 0;
+}
+
+static int coda_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ }
+}
+
+static const struct v4l2_ioctl_ops coda_ioctl_ops = {
+ .vidioc_querycap = coda_querycap,
+
+ .vidioc_enum_fmt_vid_cap = coda_enum_fmt,
+ .vidioc_g_fmt_vid_cap = coda_g_fmt,
+ .vidioc_try_fmt_vid_cap = coda_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = coda_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = coda_enum_fmt,
+ .vidioc_g_fmt_vid_out = coda_g_fmt,
+ .vidioc_try_fmt_vid_out = coda_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = coda_s_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+
+ .vidioc_qbuf = coda_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = coda_dqbuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_g_selection = coda_g_selection,
+
+ .vidioc_try_decoder_cmd = coda_try_decoder_cmd,
+ .vidioc_decoder_cmd = coda_decoder_cmd,
+
+ .vidioc_subscribe_event = coda_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+void coda_set_gdi_regs(struct coda_ctx *ctx)
+{
+ struct gdi_tiled_map *tiled_map = &ctx->tiled_map;
+ struct coda_dev *dev = ctx->dev;
+ int i;
+
+ for (i = 0; i < 16; i++)
+ coda_write(dev, tiled_map->xy2ca_map[i],
+ CODA9_GDI_XY2_CAS_0 + 4 * i);
+ for (i = 0; i < 4; i++)
+ coda_write(dev, tiled_map->xy2ba_map[i],
+ CODA9_GDI_XY2_BA_0 + 4 * i);
+ for (i = 0; i < 16; i++)
+ coda_write(dev, tiled_map->xy2ra_map[i],
+ CODA9_GDI_XY2_RAS_0 + 4 * i);
+ coda_write(dev, tiled_map->xy2rbc_config, CODA9_GDI_XY2_RBC_CONFIG);
+ for (i = 0; i < 32; i++)
+ coda_write(dev, tiled_map->rbc2axi_map[i],
+ CODA9_GDI_RBC2_AXI_0 + 4 * i);
+}
+
+/*
+ * Mem-to-mem operations.
+ */
+
+static void coda_device_run(void *m2m_priv)
+{
+ struct coda_ctx *ctx = m2m_priv;
+ struct coda_dev *dev = ctx->dev;
+
+ queue_work(dev->workqueue, &ctx->pic_run_work);
+}
+
+static void coda_pic_run_work(struct work_struct *work)
+{
+ struct coda_ctx *ctx = container_of(work, struct coda_ctx, pic_run_work);
+ struct coda_dev *dev = ctx->dev;
+ int ret;
+
+ mutex_lock(&ctx->buffer_mutex);
+ mutex_lock(&dev->coda_mutex);
+
+ ret = ctx->ops->prepare_run(ctx);
+ if (ret < 0 && ctx->inst_type == CODA_INST_DECODER) {
+ mutex_unlock(&dev->coda_mutex);
+ mutex_unlock(&ctx->buffer_mutex);
+ /* job_finish scheduled by prepare_decode */
+ return;
+ }
+
+ if (!wait_for_completion_timeout(&ctx->completion,
+ msecs_to_jiffies(1000))) {
+ dev_err(&dev->plat_dev->dev, "CODA PIC_RUN timeout\n");
+
+ ctx->hold = true;
+
+ coda_hw_reset(ctx);
+ } else if (!ctx->aborting) {
+ ctx->ops->finish_run(ctx);
+ }
+
+ if (ctx->aborting || (!ctx->streamon_cap && !ctx->streamon_out))
+ queue_work(dev->workqueue, &ctx->seq_end_work);
+
+ mutex_unlock(&dev->coda_mutex);
+ mutex_unlock(&ctx->buffer_mutex);
+
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static int coda_job_ready(void *m2m_priv)
+{
+ struct coda_ctx *ctx = m2m_priv;
+
+ /*
+ * For both 'P' and 'key' frame cases 1 picture
+ * and 1 frame are needed. In the decoder case,
+ * the compressed frame can be in the bitstream.
+ */
+ if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) &&
+ ctx->inst_type != CODA_INST_DECODER) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "not ready: not enough video buffers.\n");
+ return 0;
+ }
+
+ if (!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "not ready: not enough video capture buffers.\n");
+ return 0;
+ }
+
+ if (ctx->hold ||
+ ((ctx->inst_type == CODA_INST_DECODER) &&
+ (coda_get_bitstream_payload(ctx) < 512) &&
+ !(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "%d: not ready: not enough bitstream data.\n",
+ ctx->idx);
+ return 0;
+ }
+
+ if (ctx->aborting) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "not ready: aborting\n");
+ return 0;
+ }
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "job ready\n");
+ return 1;
+}
+
+static void coda_job_abort(void *priv)
+{
+ struct coda_ctx *ctx = priv;
+
+ ctx->aborting = 1;
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "Aborting task\n");
+}
+
+static void coda_lock(void *m2m_priv)
+{
+ struct coda_ctx *ctx = m2m_priv;
+ struct coda_dev *pcdev = ctx->dev;
+
+ mutex_lock(&pcdev->dev_mutex);
+}
+
+static void coda_unlock(void *m2m_priv)
+{
+ struct coda_ctx *ctx = m2m_priv;
+ struct coda_dev *pcdev = ctx->dev;
+
+ mutex_unlock(&pcdev->dev_mutex);
+}
+
+static const struct v4l2_m2m_ops coda_m2m_ops = {
+ .device_run = coda_device_run,
+ .job_ready = coda_job_ready,
+ .job_abort = coda_job_abort,
+ .lock = coda_lock,
+ .unlock = coda_unlock,
+};
+
+static void coda_set_tiled_map_type(struct coda_ctx *ctx, int tiled_map_type)
+{
+ struct gdi_tiled_map *tiled_map = &ctx->tiled_map;
+ int luma_map, chro_map, i;
+
+ memset(tiled_map, 0, sizeof(*tiled_map));
+
+ luma_map = 64;
+ chro_map = 64;
+ tiled_map->map_type = tiled_map_type;
+ for (i = 0; i < 16; i++)
+ tiled_map->xy2ca_map[i] = luma_map << 8 | chro_map;
+ for (i = 0; i < 4; i++)
+ tiled_map->xy2ba_map[i] = luma_map << 8 | chro_map;
+ for (i = 0; i < 16; i++)
+ tiled_map->xy2ra_map[i] = luma_map << 8 | chro_map;
+
+ if (tiled_map_type == GDI_LINEAR_FRAME_MAP) {
+ tiled_map->xy2rbc_config = 0;
+ } else {
+ dev_err(&ctx->dev->plat_dev->dev, "invalid map type: %d\n",
+ tiled_map_type);
+ return;
+ }
+}
+
+static void set_default_params(struct coda_ctx *ctx)
+{
+ u32 src_fourcc, dst_fourcc;
+ int max_w;
+ int max_h;
+
+ if (ctx->inst_type == CODA_INST_ENCODER) {
+ src_fourcc = V4L2_PIX_FMT_YUV420;
+ dst_fourcc = V4L2_PIX_FMT_H264;
+ } else {
+ src_fourcc = V4L2_PIX_FMT_H264;
+ dst_fourcc = V4L2_PIX_FMT_YUV420;
+ }
+ ctx->codec = coda_find_codec(ctx->dev, src_fourcc, dst_fourcc);
+ max_w = ctx->codec->max_w;
+ max_h = ctx->codec->max_h;
+
+ ctx->params.codec_mode = ctx->codec->mode;
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+ ctx->params.framerate = 30;
+ ctx->aborting = 0;
+
+ /* Default formats for output and input queues */
+ ctx->q_data[V4L2_M2M_SRC].fourcc = ctx->codec->src_fourcc;
+ ctx->q_data[V4L2_M2M_DST].fourcc = ctx->codec->dst_fourcc;
+ ctx->q_data[V4L2_M2M_SRC].width = max_w;
+ ctx->q_data[V4L2_M2M_SRC].height = max_h;
+ ctx->q_data[V4L2_M2M_DST].width = max_w;
+ ctx->q_data[V4L2_M2M_DST].height = max_h;
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_YUV420) {
+ ctx->q_data[V4L2_M2M_SRC].bytesperline = max_w;
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = (max_w * max_h * 3) / 2;
+ ctx->q_data[V4L2_M2M_DST].bytesperline = 0;
+ ctx->q_data[V4L2_M2M_DST].sizeimage = CODA_MAX_FRAME_SIZE;
+ } else {
+ ctx->q_data[V4L2_M2M_SRC].bytesperline = 0;
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = CODA_MAX_FRAME_SIZE;
+ ctx->q_data[V4L2_M2M_DST].bytesperline = max_w;
+ ctx->q_data[V4L2_M2M_DST].sizeimage = (max_w * max_h * 3) / 2;
+ }
+ ctx->q_data[V4L2_M2M_SRC].rect.width = max_w;
+ ctx->q_data[V4L2_M2M_SRC].rect.height = max_h;
+ ctx->q_data[V4L2_M2M_DST].rect.width = max_w;
+ ctx->q_data[V4L2_M2M_DST].rect.height = max_h;
+
+ if (ctx->dev->devtype->product == CODA_960)
+ coda_set_tiled_map_type(ctx, GDI_LINEAR_FRAME_MAP);
+}
+
+/*
+ * Queue operations
+ */
+static int coda_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(vq);
+ struct coda_q_data *q_data;
+ unsigned int size;
+
+ q_data = get_q_data(ctx, vq->type);
+ size = q_data->sizeimage;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ alloc_ctxs[0] = ctx->dev->alloc_ctx;
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "get %d buffer(s) of size %d each.\n", *nbuffers, size);
+
+ return 0;
+}
+
+static int coda_buf_prepare(struct vb2_buffer *vb)
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct coda_q_data *q_data;
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+
+ if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
+ v4l2_warn(&ctx->dev->v4l2_dev,
+ "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0),
+ (long)q_data->sizeimage);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void coda_buf_queue(struct vb2_buffer *vb)
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct coda_q_data *q_data;
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+
+ /*
+ * In the decoder case, immediately try to copy the buffer into the
+ * bitstream ringbuffer and mark it as ready to be dequeued.
+ */
+ if (q_data->fourcc == V4L2_PIX_FMT_H264 &&
+ vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ /*
+ * For backwards compatibility, queuing an empty buffer marks
+ * the stream end
+ */
+ if (vb2_get_plane_payload(vb, 0) == 0)
+ coda_bit_stream_end_flag(ctx);
+ mutex_lock(&ctx->bitstream_mutex);
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+ if (vb2_is_streaming(vb->vb2_queue))
+ coda_fill_bitstream(ctx);
+ mutex_unlock(&ctx->bitstream_mutex);
+ } else {
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
+ }
+}
+
+int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
+ size_t size, const char *name, struct dentry *parent)
+{
+ buf->vaddr = dma_alloc_coherent(&dev->plat_dev->dev, size, &buf->paddr,
+ GFP_KERNEL);
+ if (!buf->vaddr) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to allocate %s buffer of size %u\n",
+ name, size);
+ return -ENOMEM;
+ }
+
+ buf->size = size;
+
+ if (name && parent) {
+ buf->blob.data = buf->vaddr;
+ buf->blob.size = size;
+ buf->dentry = debugfs_create_blob(name, 0644, parent,
+ &buf->blob);
+ if (!buf->dentry)
+ dev_warn(&dev->plat_dev->dev,
+ "failed to create debugfs entry %s\n", name);
+ }
+
+ return 0;
+}
+
+void coda_free_aux_buf(struct coda_dev *dev,
+ struct coda_aux_buf *buf)
+{
+ if (buf->vaddr) {
+ dma_free_coherent(&dev->plat_dev->dev, buf->size,
+ buf->vaddr, buf->paddr);
+ buf->vaddr = NULL;
+ buf->size = 0;
+ }
+ debugfs_remove(buf->dentry);
+}
+
+static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(q);
+ struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev;
+ struct coda_q_data *q_data_src, *q_data_dst;
+ struct vb2_buffer *buf;
+ u32 dst_fourcc;
+ int ret = 0;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (q_data_src->fourcc == V4L2_PIX_FMT_H264) {
+ /* copy the buffers that where queued before streamon */
+ mutex_lock(&ctx->bitstream_mutex);
+ coda_fill_bitstream(ctx);
+ mutex_unlock(&ctx->bitstream_mutex);
+
+ if (coda_get_bitstream_payload(ctx) < 512) {
+ ret = -EINVAL;
+ goto err;
+ }
+ } else {
+ if (count < 1) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ ctx->streamon_out = 1;
+ } else {
+ if (count < 1) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ctx->streamon_cap = 1;
+ }
+
+ /* Don't start the coda unless both queues are on */
+ if (!(ctx->streamon_out & ctx->streamon_cap))
+ return 0;
+
+ /* Allow decoder device_run with no new buffers queued */
+ if (ctx->inst_type == CODA_INST_DECODER)
+ v4l2_m2m_set_src_buffered(ctx->fh.m2m_ctx, true);
+
+ ctx->gopcounter = ctx->params.gop_size - 1;
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ dst_fourcc = q_data_dst->fourcc;
+
+ ctx->codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
+ q_data_dst->fourcc);
+ if (!ctx->codec) {
+ v4l2_err(v4l2_dev, "couldn't tell instance type.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = ctx->ops->start_streaming(ctx);
+ if (ctx->inst_type == CODA_INST_DECODER) {
+ if (ret == -EAGAIN)
+ return 0;
+ else if (ret < 0)
+ goto err;
+ }
+
+ ctx->initialized = 1;
+ return ret;
+
+err:
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_DEQUEUED);
+ } else {
+ while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_DEQUEUED);
+ }
+ return ret;
+}
+
+static void coda_stop_streaming(struct vb2_queue *q)
+{
+ struct coda_ctx *ctx = vb2_get_drv_priv(q);
+ struct coda_dev *dev = ctx->dev;
+ struct vb2_buffer *buf;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%s: output\n", __func__);
+ ctx->streamon_out = 0;
+
+ coda_bit_stream_end_flag(ctx);
+
+ ctx->isequence = 0;
+
+ while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ } else {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%s: capture\n", __func__);
+ ctx->streamon_cap = 0;
+
+ ctx->osequence = 0;
+ ctx->sequence_offset = 0;
+
+ while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ }
+
+ if (!ctx->streamon_out && !ctx->streamon_cap) {
+ struct coda_timestamp *ts;
+
+ mutex_lock(&ctx->bitstream_mutex);
+ while (!list_empty(&ctx->timestamp_list)) {
+ ts = list_first_entry(&ctx->timestamp_list,
+ struct coda_timestamp, list);
+ list_del(&ts->list);
+ kfree(ts);
+ }
+ mutex_unlock(&ctx->bitstream_mutex);
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+ ctx->runcounter = 0;
+ }
+}
+
+static const struct vb2_ops coda_qops = {
+ .queue_setup = coda_queue_setup,
+ .buf_prepare = coda_buf_prepare,
+ .buf_queue = coda_buf_queue,
+ .start_streaming = coda_start_streaming,
+ .stop_streaming = coda_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct coda_ctx *ctx =
+ container_of(ctrl->handler, struct coda_ctx, ctrls);
+
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ if (ctrl->val)
+ ctx->params.rot_mode |= CODA_MIR_HOR;
+ else
+ ctx->params.rot_mode &= ~CODA_MIR_HOR;
+ break;
+ case V4L2_CID_VFLIP:
+ if (ctrl->val)
+ ctx->params.rot_mode |= CODA_MIR_VER;
+ else
+ ctx->params.rot_mode &= ~CODA_MIR_VER;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ ctx->params.bitrate = ctrl->val / 1000;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ ctx->params.gop_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ ctx->params.h264_intra_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+ ctx->params.h264_inter_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+ ctx->params.h264_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ ctx->params.h264_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
+ ctx->params.h264_deblk_alpha = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
+ ctx->params.h264_deblk_beta = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ ctx->params.h264_deblk_enabled = (ctrl->val ==
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP:
+ ctx->params.mpeg4_intra_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:
+ ctx->params.mpeg4_inter_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ ctx->params.slice_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+ ctx->params.slice_max_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+ ctx->params.slice_max_bits = ctrl->val * 8;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ break;
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
+ ctx->params.intra_refresh = ctrl->val;
+ break;
+ default:
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "Invalid control, id=%d, val=%d\n",
+ ctrl->id, ctrl->val);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops coda_ctrl_ops = {
+ .s_ctrl = coda_s_ctrl,
+};
+
+static int coda_ctrls_setup(struct coda_ctx *ctx)
+{
+ v4l2_ctrl_handler_init(&ctx->ctrls, 9);
+
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE, 0, 32767000, 1, 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE, 1, 60, 1, 16);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 0, 51, 1, 25);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 0, 51, 1, 25);
+ if (ctx->dev->devtype->product != CODA_960) {
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 0, 51, 1, 12);
+ }
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 0, 51, 1, 51);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, 0, 15, 1, 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, 0, 15, 1, 0);
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED, 0x0,
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP, 1, 31, 1, 2);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP, 1, 31, 1, 2);
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
+ V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES, 0x0,
+ V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, 1, 0x3fffffff, 1, 1);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES, 1, 0x3fffffff, 1,
+ 500);
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+ V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ (1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE),
+ V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB, 0,
+ 1920 * 1088 / 256, 1, 0);
+
+ if (ctx->ctrls.error) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "control initialization error (%d)",
+ ctx->ctrls.error);
+ return -EINVAL;
+ }
+
+ return v4l2_ctrl_handler_setup(&ctx->ctrls);
+}
+
+static int coda_queue_init(struct coda_ctx *ctx, struct vb2_queue *vq)
+{
+ vq->drv_priv = ctx;
+ vq->ops = &coda_qops;
+ vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ vq->lock = &ctx->dev->dev_mutex;
+
+ return vb2_queue_init(vq);
+}
+
+int coda_encoder_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+
+ ret = coda_queue_init(priv, src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+
+ return coda_queue_init(priv, dst_vq);
+}
+
+int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+
+ ret = coda_queue_init(priv, src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+
+ return coda_queue_init(priv, dst_vq);
+}
+
+static int coda_next_free_instance(struct coda_dev *dev)
+{
+ int idx = ffz(dev->instance_mask);
+
+ if ((idx < 0) ||
+ (dev->devtype->product == CODA_DX6 && idx > CODADX6_MAX_INSTANCES))
+ return -EBUSY;
+
+ return idx;
+}
+
+static int coda_open(struct file *file, enum coda_inst_type inst_type,
+ const struct coda_context_ops *ctx_ops)
+{
+ struct coda_dev *dev = video_drvdata(file);
+ struct coda_ctx *ctx = NULL;
+ char *name;
+ int ret;
+ int idx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ idx = coda_next_free_instance(dev);
+ if (idx < 0) {
+ ret = idx;
+ goto err_coda_max;
+ }
+ set_bit(idx, &dev->instance_mask);
+
+ name = kasprintf(GFP_KERNEL, "context%d", idx);
+ ctx->debugfs_entry = debugfs_create_dir(name, dev->debugfs_root);
+ kfree(name);
+
+ ctx->inst_type = inst_type;
+ ctx->ops = ctx_ops;
+ init_completion(&ctx->completion);
+ INIT_WORK(&ctx->pic_run_work, coda_pic_run_work);
+ INIT_WORK(&ctx->seq_end_work, ctx->ops->seq_end_work);
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+ ctx->dev = dev;
+ ctx->idx = idx;
+ switch (dev->devtype->product) {
+ case CODA_7541:
+ case CODA_960:
+ ctx->reg_idx = 0;
+ break;
+ default:
+ ctx->reg_idx = idx;
+ }
+
+ /* Power up and upload firmware if necessary */
+ ret = pm_runtime_get_sync(&dev->plat_dev->dev);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to power up: %d\n", ret);
+ goto err_pm_get;
+ }
+
+ ret = clk_prepare_enable(dev->clk_per);
+ if (ret)
+ goto err_clk_per;
+
+ ret = clk_prepare_enable(dev->clk_ahb);
+ if (ret)
+ goto err_clk_ahb;
+
+ set_default_params(ctx);
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
+ ctx->ops->queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+
+ v4l2_err(&dev->v4l2_dev, "%s return error (%d)\n",
+ __func__, ret);
+ goto err_ctx_init;
+ }
+
+ ret = coda_ctrls_setup(ctx);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "failed to setup coda controls\n");
+ goto err_ctrls_setup;
+ }
+
+ ctx->fh.ctrl_handler = &ctx->ctrls;
+
+ ret = coda_alloc_context_buf(ctx, &ctx->parabuf, CODA_PARA_BUF_SIZE,
+ "parabuf");
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to allocate parabuf");
+ goto err_dma_alloc;
+ }
+
+ ctx->bitstream.size = CODA_MAX_FRAME_SIZE;
+ ctx->bitstream.vaddr = dma_alloc_writecombine(&dev->plat_dev->dev,
+ ctx->bitstream.size, &ctx->bitstream.paddr, GFP_KERNEL);
+ if (!ctx->bitstream.vaddr) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to allocate bitstream ringbuffer");
+ ret = -ENOMEM;
+ goto err_dma_writecombine;
+ }
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+ mutex_init(&ctx->bitstream_mutex);
+ mutex_init(&ctx->buffer_mutex);
+ INIT_LIST_HEAD(&ctx->timestamp_list);
+
+ coda_lock(ctx);
+ list_add(&ctx->list, &dev->instances);
+ coda_unlock(ctx);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Created instance %d (%p)\n",
+ ctx->idx, ctx);
+
+ return 0;
+
+err_dma_writecombine:
+ if (ctx->dev->devtype->product == CODA_DX6)
+ coda_free_aux_buf(dev, &ctx->workbuf);
+ coda_free_aux_buf(dev, &ctx->parabuf);
+err_dma_alloc:
+ v4l2_ctrl_handler_free(&ctx->ctrls);
+err_ctrls_setup:
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+err_ctx_init:
+ clk_disable_unprepare(dev->clk_ahb);
+err_clk_ahb:
+ clk_disable_unprepare(dev->clk_per);
+err_clk_per:
+ pm_runtime_put_sync(&dev->plat_dev->dev);
+err_pm_get:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ clear_bit(ctx->idx, &dev->instance_mask);
+err_coda_max:
+ kfree(ctx);
+ return ret;
+}
+
+static int coda_encoder_open(struct file *file)
+{
+ return coda_open(file, CODA_INST_ENCODER, &coda_bit_encode_ops);
+}
+
+static int coda_decoder_open(struct file *file)
+{
+ return coda_open(file, CODA_INST_DECODER, &coda_bit_decode_ops);
+}
+
+static int coda_release(struct file *file)
+{
+ struct coda_dev *dev = video_drvdata(file);
+ struct coda_ctx *ctx = fh_to_ctx(file->private_data);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Releasing instance %p\n",
+ ctx);
+
+ debugfs_remove_recursive(ctx->debugfs_entry);
+
+ /* If this instance is running, call .job_abort and wait for it to end */
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ /* In case the instance was not running, we still need to call SEQ_END */
+ if (ctx->initialized) {
+ queue_work(dev->workqueue, &ctx->seq_end_work);
+ flush_work(&ctx->seq_end_work);
+ }
+
+ coda_lock(ctx);
+ list_del(&ctx->list);
+ coda_unlock(ctx);
+
+ dma_free_writecombine(&dev->plat_dev->dev, ctx->bitstream.size,
+ ctx->bitstream.vaddr, ctx->bitstream.paddr);
+ if (ctx->dev->devtype->product == CODA_DX6)
+ coda_free_aux_buf(dev, &ctx->workbuf);
+
+ coda_free_aux_buf(dev, &ctx->parabuf);
+ v4l2_ctrl_handler_free(&ctx->ctrls);
+ clk_disable_unprepare(dev->clk_ahb);
+ clk_disable_unprepare(dev->clk_per);
+ pm_runtime_put_sync(&dev->plat_dev->dev);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ clear_bit(ctx->idx, &dev->instance_mask);
+ if (ctx->ops->release)
+ ctx->ops->release(ctx);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations coda_encoder_fops = {
+ .owner = THIS_MODULE,
+ .open = coda_encoder_open,
+ .release = coda_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct v4l2_file_operations coda_decoder_fops = {
+ .owner = THIS_MODULE,
+ .open = coda_decoder_open,
+ .release = coda_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int coda_hw_init(struct coda_dev *dev)
+{
+ u32 data;
+ u16 *p;
+ int i, ret;
+
+ ret = clk_prepare_enable(dev->clk_per);
+ if (ret)
+ goto err_clk_per;
+
+ ret = clk_prepare_enable(dev->clk_ahb);
+ if (ret)
+ goto err_clk_ahb;
+
+ if (dev->rstc)
+ reset_control_reset(dev->rstc);
+
+ /*
+ * Copy the first CODA_ISRAM_SIZE in the internal SRAM.
+ * The 16-bit chars in the code buffer are in memory access
+ * order, re-sort them to CODA order for register download.
+ * Data in this SRAM survives a reboot.
+ */
+ p = (u16 *)dev->codebuf.vaddr;
+ if (dev->devtype->product == CODA_DX6) {
+ for (i = 0; i < (CODA_ISRAM_SIZE / 2); i++) {
+ data = CODA_DOWN_ADDRESS_SET(i) |
+ CODA_DOWN_DATA_SET(p[i ^ 1]);
+ coda_write(dev, data, CODA_REG_BIT_CODE_DOWN);
+ }
+ } else {
+ for (i = 0; i < (CODA_ISRAM_SIZE / 2); i++) {
+ data = CODA_DOWN_ADDRESS_SET(i) |
+ CODA_DOWN_DATA_SET(p[round_down(i, 4) +
+ 3 - (i % 4)]);
+ coda_write(dev, data, CODA_REG_BIT_CODE_DOWN);
+ }
+ }
+
+ /* Clear registers */
+ for (i = 0; i < 64; i++)
+ coda_write(dev, 0, CODA_REG_BIT_CODE_BUF_ADDR + i * 4);
+
+ /* Tell the BIT where to find everything it needs */
+ if (dev->devtype->product == CODA_960 ||
+ dev->devtype->product == CODA_7541) {
+ coda_write(dev, dev->tempbuf.paddr,
+ CODA_REG_BIT_TEMP_BUF_ADDR);
+ coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
+ } else {
+ coda_write(dev, dev->workbuf.paddr,
+ CODA_REG_BIT_WORK_BUF_ADDR);
+ }
+ coda_write(dev, dev->codebuf.paddr,
+ CODA_REG_BIT_CODE_BUF_ADDR);
+ coda_write(dev, 0, CODA_REG_BIT_CODE_RUN);
+
+ /* Set default values */
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ coda_write(dev, CODADX6_STREAM_BUF_PIC_FLUSH,
+ CODA_REG_BIT_STREAM_CTRL);
+ break;
+ default:
+ coda_write(dev, CODA7_STREAM_BUF_PIC_FLUSH,
+ CODA_REG_BIT_STREAM_CTRL);
+ }
+ if (dev->devtype->product == CODA_960)
+ coda_write(dev, 1 << 12, CODA_REG_BIT_FRAME_MEM_CTRL);
+ else
+ coda_write(dev, 0, CODA_REG_BIT_FRAME_MEM_CTRL);
+
+ if (dev->devtype->product != CODA_DX6)
+ coda_write(dev, 0, CODA7_REG_BIT_AXI_SRAM_USE);
+
+ coda_write(dev, CODA_INT_INTERRUPT_ENABLE,
+ CODA_REG_BIT_INT_ENABLE);
+
+ /* Reset VPU and start processor */
+ data = coda_read(dev, CODA_REG_BIT_CODE_RESET);
+ data |= CODA_REG_RESET_ENABLE;
+ coda_write(dev, data, CODA_REG_BIT_CODE_RESET);
+ udelay(10);
+ data &= ~CODA_REG_RESET_ENABLE;
+ coda_write(dev, data, CODA_REG_BIT_CODE_RESET);
+ coda_write(dev, CODA_REG_RUN_ENABLE, CODA_REG_BIT_CODE_RUN);
+
+ clk_disable_unprepare(dev->clk_ahb);
+ clk_disable_unprepare(dev->clk_per);
+
+ return 0;
+
+err_clk_ahb:
+ clk_disable_unprepare(dev->clk_per);
+err_clk_per:
+ return ret;
+}
+
+static int coda_register_device(struct coda_dev *dev, struct video_device *vfd)
+{
+ vfd->release = video_device_release_empty,
+ vfd->lock = &dev->dev_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->vfl_dir = VFL_DIR_M2M;
+ video_set_drvdata(vfd, dev);
+
+ /* Not applicable, use the selection API instead */
+ v4l2_disable_ioctl(vfd, VIDIOC_CROPCAP);
+ v4l2_disable_ioctl(vfd, VIDIOC_G_CROP);
+ v4l2_disable_ioctl(vfd, VIDIOC_S_CROP);
+
+ return video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+}
+
+static void coda_fw_callback(const struct firmware *fw, void *context)
+{
+ struct coda_dev *dev = context;
+ struct platform_device *pdev = dev->plat_dev;
+ int ret;
+
+ if (!fw) {
+ v4l2_err(&dev->v4l2_dev, "firmware request failed\n");
+ goto put_pm;
+ }
+
+ /* allocate auxiliary per-device code buffer for the BIT processor */
+ ret = coda_alloc_aux_buf(dev, &dev->codebuf, fw->size, "codebuf",
+ dev->debugfs_root);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate code buffer\n");
+ goto put_pm;
+ }
+
+ /* Copy the whole firmware image to the code buffer */
+ memcpy(dev->codebuf.vaddr, fw->data, fw->size);
+ release_firmware(fw);
+
+ ret = coda_hw_init(dev);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "HW initialization failed\n");
+ goto put_pm;
+ }
+
+ ret = coda_check_firmware(dev);
+ if (ret < 0)
+ goto put_pm;
+
+ dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(dev->alloc_ctx)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to alloc vb2 context\n");
+ goto put_pm;
+ }
+
+ dev->m2m_dev = v4l2_m2m_init(&coda_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+ goto rel_ctx;
+ }
+
+ dev->vfd[0].fops = &coda_encoder_fops,
+ dev->vfd[0].ioctl_ops = &coda_ioctl_ops;
+ snprintf(dev->vfd[0].name, sizeof(dev->vfd[0].name), "coda-encoder");
+ ret = coda_register_device(dev, &dev->vfd[0]);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to register encoder video device\n");
+ goto rel_m2m;
+ }
+
+ dev->vfd[1].fops = &coda_decoder_fops,
+ dev->vfd[1].ioctl_ops = &coda_ioctl_ops;
+ snprintf(dev->vfd[1].name, sizeof(dev->vfd[1].name), "coda-decoder");
+ ret = coda_register_device(dev, &dev->vfd[1]);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to register decoder video device\n");
+ goto rel_m2m;
+ }
+
+ v4l2_info(&dev->v4l2_dev, "codec registered as /dev/video[%d-%d]\n",
+ dev->vfd[0].num, dev->vfd[1].num);
+
+ pm_runtime_put_sync(&pdev->dev);
+ return;
+
+rel_m2m:
+ v4l2_m2m_release(dev->m2m_dev);
+rel_ctx:
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
+put_pm:
+ pm_runtime_put_sync(&pdev->dev);
+}
+
+static int coda_firmware_request(struct coda_dev *dev)
+{
+ char *fw = dev->devtype->firmware;
+
+ dev_dbg(&dev->plat_dev->dev, "requesting firmware '%s' for %s\n", fw,
+ coda_product_name(dev->devtype->product));
+
+ return request_firmware_nowait(THIS_MODULE, true,
+ fw, &dev->plat_dev->dev, GFP_KERNEL, dev, coda_fw_callback);
+}
+
+enum coda_platform {
+ CODA_IMX27,
+ CODA_IMX53,
+ CODA_IMX6Q,
+ CODA_IMX6DL,
+};
+
+static const struct coda_devtype coda_devdata[] = {
+ [CODA_IMX27] = {
+ .firmware = "v4l-codadx6-imx27.bin",
+ .product = CODA_DX6,
+ .codecs = codadx6_codecs,
+ .num_codecs = ARRAY_SIZE(codadx6_codecs),
+ .workbuf_size = 288 * 1024 + FMO_SLICE_SAVE_BUF_SIZE * 8 * 1024,
+ .iram_size = 0xb000,
+ },
+ [CODA_IMX53] = {
+ .firmware = "v4l-coda7541-imx53.bin",
+ .product = CODA_7541,
+ .codecs = coda7_codecs,
+ .num_codecs = ARRAY_SIZE(coda7_codecs),
+ .workbuf_size = 128 * 1024,
+ .tempbuf_size = 304 * 1024,
+ .iram_size = 0x14000,
+ },
+ [CODA_IMX6Q] = {
+ .firmware = "v4l-coda960-imx6q.bin",
+ .product = CODA_960,
+ .codecs = coda9_codecs,
+ .num_codecs = ARRAY_SIZE(coda9_codecs),
+ .workbuf_size = 80 * 1024,
+ .tempbuf_size = 204 * 1024,
+ .iram_size = 0x21000,
+ },
+ [CODA_IMX6DL] = {
+ .firmware = "v4l-coda960-imx6dl.bin",
+ .product = CODA_960,
+ .codecs = coda9_codecs,
+ .num_codecs = ARRAY_SIZE(coda9_codecs),
+ .workbuf_size = 80 * 1024,
+ .tempbuf_size = 204 * 1024,
+ .iram_size = 0x20000,
+ },
+};
+
+static struct platform_device_id coda_platform_ids[] = {
+ { .name = "coda-imx27", .driver_data = CODA_IMX27 },
+ { .name = "coda-imx53", .driver_data = CODA_IMX53 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, coda_platform_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id coda_dt_ids[] = {
+ { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] },
+ { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] },
+ { .compatible = "fsl,imx6q-vpu", .data = &coda_devdata[CODA_IMX6Q] },
+ { .compatible = "fsl,imx6dl-vpu", .data = &coda_devdata[CODA_IMX6DL] },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, coda_dt_ids);
+#endif
+
+static int coda_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(of_match_ptr(coda_dt_ids), &pdev->dev);
+ const struct platform_device_id *pdev_id;
+ struct coda_platform_data *pdata = pdev->dev.platform_data;
+ struct device_node *np = pdev->dev.of_node;
+ struct gen_pool *pool;
+ struct coda_dev *dev;
+ struct resource *res;
+ int ret, irq;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&pdev->dev, "Not enough memory for %s\n",
+ CODA_NAME);
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&dev->irqlock);
+ INIT_LIST_HEAD(&dev->instances);
+
+ dev->plat_dev = pdev;
+ dev->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(dev->clk_per)) {
+ dev_err(&pdev->dev, "Could not get per clock\n");
+ return PTR_ERR(dev->clk_per);
+ }
+
+ dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(dev->clk_ahb)) {
+ dev_err(&pdev->dev, "Could not get ahb clock\n");
+ return PTR_ERR(dev->clk_ahb);
+ }
+
+ /* Get memory for physical registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->regs_base))
+ return PTR_ERR(dev->regs_base);
+
+ /* IRQ */
+ irq = platform_get_irq_byname(pdev, "bit");
+ if (irq < 0)
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, coda_irq_handler,
+ IRQF_ONESHOT, dev_name(&pdev->dev), dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
+ return ret;
+ }
+
+ dev->rstc = devm_reset_control_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(dev->rstc)) {
+ ret = PTR_ERR(dev->rstc);
+ if (ret == -ENOENT || ret == -ENOSYS) {
+ dev->rstc = NULL;
+ } else {
+ dev_err(&pdev->dev, "failed get reset control: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ /* Get IRAM pool from device tree or platform data */
+ pool = of_get_named_gen_pool(np, "iram", 0);
+ if (!pool && pdata)
+ pool = dev_get_gen_pool(pdata->iram_dev);
+ if (!pool) {
+ dev_err(&pdev->dev, "iram pool not available\n");
+ return -ENOMEM;
+ }
+ dev->iram_pool = pool;
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ return ret;
+
+ mutex_init(&dev->dev_mutex);
+ mutex_init(&dev->coda_mutex);
+
+ pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
+
+ if (of_id) {
+ dev->devtype = of_id->data;
+ } else if (pdev_id) {
+ dev->devtype = &coda_devdata[pdev_id->driver_data];
+ } else {
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return -EINVAL;
+ }
+
+ dev->debugfs_root = debugfs_create_dir("coda", NULL);
+ if (!dev->debugfs_root)
+ dev_warn(&pdev->dev, "failed to create debugfs root\n");
+
+ /* allocate auxiliary per-device buffers for the BIT processor */
+ if (dev->devtype->product == CODA_DX6) {
+ ret = coda_alloc_aux_buf(dev, &dev->workbuf,
+ dev->devtype->workbuf_size, "workbuf",
+ dev->debugfs_root);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate work buffer\n");
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return ret;
+ }
+ }
+
+ if (dev->devtype->tempbuf_size) {
+ ret = coda_alloc_aux_buf(dev, &dev->tempbuf,
+ dev->devtype->tempbuf_size, "tempbuf",
+ dev->debugfs_root);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate temp buffer\n");
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return ret;
+ }
+ }
+
+ dev->iram.size = dev->devtype->iram_size;
+ dev->iram.vaddr = gen_pool_dma_alloc(dev->iram_pool, dev->iram.size,
+ &dev->iram.paddr);
+ if (!dev->iram.vaddr) {
+ dev_warn(&pdev->dev, "unable to alloc iram\n");
+ } else {
+ dev->iram.blob.data = dev->iram.vaddr;
+ dev->iram.blob.size = dev->iram.size;
+ dev->iram.dentry = debugfs_create_blob("iram", 0644,
+ dev->debugfs_root,
+ &dev->iram.blob);
+ }
+
+ dev->workqueue = alloc_workqueue("coda", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!dev->workqueue) {
+ dev_err(&pdev->dev, "unable to alloc workqueue\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ /*
+ * Start activated so we can directly call coda_hw_init in
+ * coda_fw_callback regardless of whether CONFIG_PM_RUNTIME is
+ * enabled or whether the device is associated with a PM domain.
+ */
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ return coda_firmware_request(dev);
+}
+
+static int coda_remove(struct platform_device *pdev)
+{
+ struct coda_dev *dev = platform_get_drvdata(pdev);
+
+ video_unregister_device(&dev->vfd[0]);
+ video_unregister_device(&dev->vfd[1]);
+ if (dev->m2m_dev)
+ v4l2_m2m_release(dev->m2m_dev);
+ pm_runtime_disable(&pdev->dev);
+ if (dev->alloc_ctx)
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ destroy_workqueue(dev->workqueue);
+ if (dev->iram.vaddr)
+ gen_pool_free(dev->iram_pool, (unsigned long)dev->iram.vaddr,
+ dev->iram.size);
+ coda_free_aux_buf(dev, &dev->codebuf);
+ coda_free_aux_buf(dev, &dev->tempbuf);
+ coda_free_aux_buf(dev, &dev->workbuf);
+ debugfs_remove_recursive(dev->debugfs_root);
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int coda_runtime_resume(struct device *dev)
+{
+ struct coda_dev *cdev = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (dev->pm_domain && cdev->codebuf.vaddr) {
+ ret = coda_hw_init(cdev);
+ if (ret)
+ v4l2_err(&cdev->v4l2_dev, "HW initialization failed\n");
+ }
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops coda_pm_ops = {
+ SET_RUNTIME_PM_OPS(NULL, coda_runtime_resume, NULL)
+};
+
+static struct platform_driver coda_driver = {
+ .probe = coda_probe,
+ .remove = coda_remove,
+ .driver = {
+ .name = CODA_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(coda_dt_ids),
+ .pm = &coda_pm_ops,
+ },
+ .id_table = coda_platform_ids,
+};
+
+module_platform_driver(coda_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
+MODULE_DESCRIPTION("Coda multi-standard codec V4L2 driver");
diff --git a/drivers/media/platform/coda/coda-h264.c b/drivers/media/platform/coda/coda-h264.c
new file mode 100644
index 000000000000..456773af1f1d
--- /dev/null
+++ b/drivers/media/platform/coda/coda-h264.c
@@ -0,0 +1,37 @@
+/*
+ * Coda multi-standard codec IP - H.264 helper functions
+ *
+ * Copyright (C) 2012 Vista Silicon S.L.
+ * Javier Martin, <javier.martin@vista-silicon.com>
+ * Xavier Duret
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+static const u8 coda_filler_nal[14] = { 0x00, 0x00, 0x00, 0x01, 0x0c, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x80 };
+static const u8 coda_filler_size[8] = { 0, 7, 14, 13, 12, 11, 10, 9 };
+
+int coda_h264_padding(int size, char *p)
+{
+ int nal_size;
+ int diff;
+
+ diff = size - (size & ~0x7);
+ if (diff == 0)
+ return 0;
+
+ nal_size = coda_filler_size[diff];
+ memcpy(p, coda_filler_nal, nal_size);
+
+ /* Add rbsp stop bit and trailing at the end */
+ *(p + nal_size - 1) = 0x80;
+
+ return nal_size;
+}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
new file mode 100644
index 000000000000..bbc18c0dacd9
--- /dev/null
+++ b/drivers/media/platform/coda/coda.h
@@ -0,0 +1,287 @@
+/*
+ * Coda multi-standard codec IP
+ *
+ * Copyright (C) 2012 Vista Silicon S.L.
+ * Javier Martin, <javier.martin@vista-silicon.com>
+ * Xavier Duret
+ * Copyright (C) 2012-2014 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/irqreturn.h>
+#include <linux/mutex.h>
+#include <linux/kfifo.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-core.h>
+
+#include "coda_regs.h"
+
+#define CODA_MAX_FRAMEBUFFERS 8
+#define CODA_MAX_FRAME_SIZE 0x100000
+#define FMO_SLICE_SAVE_BUF_SIZE (32)
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+enum coda_inst_type {
+ CODA_INST_ENCODER,
+ CODA_INST_DECODER,
+};
+
+enum coda_product {
+ CODA_DX6 = 0xf001,
+ CODA_7541 = 0xf012,
+ CODA_960 = 0xf020,
+};
+
+struct coda_devtype {
+ char *firmware;
+ enum coda_product product;
+ const struct coda_codec *codecs;
+ unsigned int num_codecs;
+ size_t workbuf_size;
+ size_t tempbuf_size;
+ size_t iram_size;
+};
+
+struct coda_aux_buf {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 size;
+ struct debugfs_blob_wrapper blob;
+ struct dentry *dentry;
+};
+
+struct coda_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd[2];
+ struct platform_device *plat_dev;
+ const struct coda_devtype *devtype;
+
+ void __iomem *regs_base;
+ struct clk *clk_per;
+ struct clk *clk_ahb;
+ struct reset_control *rstc;
+
+ struct coda_aux_buf codebuf;
+ struct coda_aux_buf tempbuf;
+ struct coda_aux_buf workbuf;
+ struct gen_pool *iram_pool;
+ struct coda_aux_buf iram;
+
+ spinlock_t irqlock;
+ struct mutex dev_mutex;
+ struct mutex coda_mutex;
+ struct workqueue_struct *workqueue;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct list_head instances;
+ unsigned long instance_mask;
+ struct dentry *debugfs_root;
+};
+
+struct coda_codec {
+ u32 mode;
+ u32 src_fourcc;
+ u32 dst_fourcc;
+ u32 max_w;
+ u32 max_h;
+};
+
+struct coda_huff_tab;
+
+struct coda_params {
+ u8 rot_mode;
+ u8 h264_intra_qp;
+ u8 h264_inter_qp;
+ u8 h264_min_qp;
+ u8 h264_max_qp;
+ u8 h264_deblk_enabled;
+ u8 h264_deblk_alpha;
+ u8 h264_deblk_beta;
+ u8 mpeg4_intra_qp;
+ u8 mpeg4_inter_qp;
+ u8 gop_size;
+ int intra_refresh;
+ int codec_mode;
+ int codec_mode_aux;
+ enum v4l2_mpeg_video_multi_slice_mode slice_mode;
+ u32 framerate;
+ u16 bitrate;
+ u32 slice_max_bits;
+ u32 slice_max_mb;
+};
+
+struct coda_timestamp {
+ struct list_head list;
+ u32 sequence;
+ struct v4l2_timecode timecode;
+ struct timeval timestamp;
+};
+
+/* Per-queue, driver-specific private data */
+struct coda_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int bytesperline;
+ unsigned int sizeimage;
+ unsigned int fourcc;
+ struct v4l2_rect rect;
+};
+
+struct coda_iram_info {
+ u32 axi_sram_use;
+ phys_addr_t buf_bit_use;
+ phys_addr_t buf_ip_ac_dc_use;
+ phys_addr_t buf_dbk_y_use;
+ phys_addr_t buf_dbk_c_use;
+ phys_addr_t buf_ovl_use;
+ phys_addr_t buf_btp_use;
+ phys_addr_t search_ram_paddr;
+ int search_ram_size;
+ int remaining;
+ phys_addr_t next_paddr;
+};
+
+struct gdi_tiled_map {
+ int xy2ca_map[16];
+ int xy2ba_map[16];
+ int xy2ra_map[16];
+ int rbc2axi_map[32];
+ int xy2rbc_config;
+ int map_type;
+#define GDI_LINEAR_FRAME_MAP 0
+};
+
+struct coda_ctx;
+
+struct coda_context_ops {
+ int (*queue_init)(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+ int (*start_streaming)(struct coda_ctx *ctx);
+ int (*prepare_run)(struct coda_ctx *ctx);
+ void (*finish_run)(struct coda_ctx *ctx);
+ void (*seq_end_work)(struct work_struct *work);
+ void (*release)(struct coda_ctx *ctx);
+};
+
+struct coda_ctx {
+ struct coda_dev *dev;
+ struct mutex buffer_mutex;
+ struct list_head list;
+ struct work_struct pic_run_work;
+ struct work_struct seq_end_work;
+ struct completion completion;
+ const struct coda_context_ops *ops;
+ int aborting;
+ int initialized;
+ int streamon_out;
+ int streamon_cap;
+ u32 isequence;
+ u32 qsequence;
+ u32 osequence;
+ u32 sequence_offset;
+ struct coda_q_data q_data[2];
+ enum coda_inst_type inst_type;
+ const struct coda_codec *codec;
+ enum v4l2_colorspace colorspace;
+ struct coda_params params;
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_fh fh;
+ int gopcounter;
+ int runcounter;
+ char vpu_header[3][64];
+ int vpu_header_size[3];
+ struct kfifo bitstream_fifo;
+ struct mutex bitstream_mutex;
+ struct coda_aux_buf bitstream;
+ bool hold;
+ struct coda_aux_buf parabuf;
+ struct coda_aux_buf psbuf;
+ struct coda_aux_buf slicebuf;
+ struct coda_aux_buf internal_frames[CODA_MAX_FRAMEBUFFERS];
+ u32 frame_types[CODA_MAX_FRAMEBUFFERS];
+ struct coda_timestamp frame_timestamps[CODA_MAX_FRAMEBUFFERS];
+ u32 frame_errors[CODA_MAX_FRAMEBUFFERS];
+ struct list_head timestamp_list;
+ struct coda_aux_buf workbuf;
+ int num_internal_frames;
+ int idx;
+ int reg_idx;
+ struct coda_iram_info iram_info;
+ struct gdi_tiled_map tiled_map;
+ u32 bit_stream_param;
+ u32 frm_dis_flg;
+ u32 frame_mem_ctrl;
+ int display_idx;
+ struct dentry *debugfs_entry;
+};
+
+extern int coda_debug;
+
+void coda_write(struct coda_dev *dev, u32 data, u32 reg);
+unsigned int coda_read(struct coda_dev *dev, u32 reg);
+
+int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
+ size_t size, const char *name, struct dentry *parent);
+void coda_free_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf);
+
+static inline int coda_alloc_context_buf(struct coda_ctx *ctx,
+ struct coda_aux_buf *buf, size_t size,
+ const char *name)
+{
+ return coda_alloc_aux_buf(ctx->dev, buf, size, name, ctx->debugfs_entry);
+}
+
+int coda_encoder_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+
+int coda_hw_reset(struct coda_ctx *ctx);
+
+void coda_fill_bitstream(struct coda_ctx *ctx);
+
+void coda_set_gdi_regs(struct coda_ctx *ctx);
+
+static inline struct coda_q_data *get_q_data(struct coda_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &(ctx->q_data[V4L2_M2M_SRC]);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &(ctx->q_data[V4L2_M2M_DST]);
+ default:
+ return NULL;
+ }
+}
+
+const char *coda_product_name(int product);
+
+int coda_check_firmware(struct coda_dev *dev);
+
+static inline int coda_get_bitstream_payload(struct coda_ctx *ctx)
+{
+ return kfifo_len(&ctx->bitstream_fifo);
+}
+
+void coda_bit_stream_end_flag(struct coda_ctx *ctx);
+
+int coda_h264_padding(int size, char *p);
+
+extern const struct coda_context_ops coda_bit_encode_ops;
+extern const struct coda_context_ops coda_bit_decode_ops;
+
+irqreturn_t coda_irq_handler(int irq, void *data);
diff --git a/drivers/media/platform/coda.h b/drivers/media/platform/coda/coda_regs.h
index c791275e307b..c791275e307b 100644
--- a/drivers/media/platform/coda.h
+++ b/drivers/media/platform/coda/coda_regs.h
diff --git a/drivers/media/platform/davinci/Kconfig b/drivers/media/platform/davinci/Kconfig
index afb3aec1320e..d9e1ddb586b1 100644
--- a/drivers/media/platform/davinci/Kconfig
+++ b/drivers/media/platform/davinci/Kconfig
@@ -1,6 +1,8 @@
config VIDEO_DAVINCI_VPIF_DISPLAY
tristate "TI DaVinci VPIF V4L2-Display driver"
- depends on VIDEO_DEV && ARCH_DAVINCI
+ depends on VIDEO_DEV
+ depends on ARCH_DAVINCI || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select VIDEO_ADV7343 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_THS7303 if MEDIA_SUBDRV_AUTOSELECT
@@ -14,7 +16,9 @@ config VIDEO_DAVINCI_VPIF_DISPLAY
config VIDEO_DAVINCI_VPIF_CAPTURE
tristate "TI DaVinci VPIF video capture driver"
- depends on VIDEO_DEV && ARCH_DAVINCI
+ depends on VIDEO_DEV
+ depends on ARCH_DAVINCI || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
help
Enables Davinci VPIF module used for capture devices.
@@ -26,7 +30,9 @@ config VIDEO_DAVINCI_VPIF_CAPTURE
config VIDEO_DM6446_CCDC
tristate "TI DM6446 CCDC video capture driver"
- depends on VIDEO_V4L2 && (ARCH_DAVINCI || ARCH_OMAP3)
+ depends on VIDEO_V4L2
+ depends on ARCH_DAVINCI || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF_DMA_CONTIG
help
Enables DaVinci CCD hw module. DaVinci CCDC hw interfaces
@@ -40,7 +46,9 @@ config VIDEO_DM6446_CCDC
config VIDEO_DM355_CCDC
tristate "TI DM355 CCDC video capture driver"
- depends on VIDEO_V4L2 && ARCH_DAVINCI
+ depends on VIDEO_V4L2
+ depends on ARCH_DAVINCI || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF_DMA_CONTIG
help
Enables DM355 CCD hw module. DM355 CCDC hw interfaces
@@ -55,6 +63,7 @@ config VIDEO_DM355_CCDC
config VIDEO_DM365_ISIF
tristate "TI DM365 ISIF video capture driver"
depends on VIDEO_V4L2 && ARCH_DAVINCI
+ depends on HAS_DMA
select VIDEOBUF_DMA_CONTIG
help
Enables ISIF hw module. This is the hardware module for
@@ -67,6 +76,7 @@ config VIDEO_DM365_ISIF
config VIDEO_DAVINCI_VPBE_DISPLAY
tristate "TI DaVinci VPBE V4L2-Display driver"
depends on ARCH_DAVINCI
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
help
Enables Davinci VPBE module used for display devices.
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index 05f8fb7f7b70..3f44deb5b7a7 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -460,7 +460,7 @@ static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp)
* ccdc_write_dfc_entry()
* write an entry in the dfc table.
*/
-int ccdc_write_dfc_entry(int index, struct ccdc_vertical_dft *dfc)
+static int ccdc_write_dfc_entry(int index, struct ccdc_vertical_dft *dfc)
{
/* TODO This is to be re-visited and adjusted */
#define DFC_WRITE_WAIT_COUNT 1000
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index 07e98df3d867..62a0ebb01056 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -130,9 +130,9 @@ static void ccdc_enable_vport(int flag)
* This function will configure the window size
* to be capture in CCDC reg
*/
-void ccdc_setwin(struct v4l2_rect *image_win,
- enum ccdc_frmfmt frm_fmt,
- int ppc)
+static void ccdc_setwin(struct v4l2_rect *image_win,
+ enum ccdc_frmfmt frm_fmt,
+ int ppc)
{
int horz_start, horz_nr_pixels;
int vert_start, vert_nr_lines;
@@ -291,7 +291,7 @@ static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params)
dev_dbg(ccdc_cfg.dev, "\n copy_from_user failed");
return -EFAULT;
}
- config_params->fault_pxl.fpc_table_addr = (unsigned int)fpc_physaddr;
+ config_params->fault_pxl.fpc_table_addr = (unsigned long)fpc_physaddr;
return 0;
}
@@ -370,7 +370,7 @@ static int ccdc_set_params(void __user *params)
* ccdc_config_ycbcr()
* This function will configure CCDC for YCbCr video capture
*/
-void ccdc_config_ycbcr(void)
+static void ccdc_config_ycbcr(void)
{
struct ccdc_params_ycbcr *params = &ccdc_cfg.ycbcr;
u32 syn_mode;
@@ -506,7 +506,7 @@ static void ccdc_config_fpc(struct ccdc_fault_pixel *fpc)
/* Configure Fault pixel if needed */
regw(fpc->fpc_table_addr, CCDC_FPC_ADDR);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC_ADDR...\n",
+ dev_dbg(ccdc_cfg.dev, "\nWriting 0x%lx to FPC_ADDR...\n",
(fpc->fpc_table_addr));
/* Write the FPC params with FPC disable */
val = fpc->fp_num & CCDC_FPC_FPC_NUM_MASK;
@@ -523,7 +523,7 @@ static void ccdc_config_fpc(struct ccdc_fault_pixel *fpc)
* ccdc_config_raw()
* This function will configure CCDC for Raw capture mode
*/
-void ccdc_config_raw(void)
+static void ccdc_config_raw(void)
{
struct ccdc_params_raw *params = &ccdc_cfg.bayer;
struct ccdc_config_params_raw *config_params =
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index ea7661a27479..de55f47a77db 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -125,7 +125,7 @@ static DEFINE_MUTEX(ccdc_lock);
/* ccdc configuration */
static struct ccdc_config *ccdc_cfg;
-const struct vpfe_standard vpfe_standards[] = {
+static const struct vpfe_standard vpfe_standards[] = {
{V4L2_STD_525_60, 720, 480, {11, 10}, 1},
{V4L2_STD_625_50, 720, 576, {54, 59}, 1},
};
@@ -442,11 +442,10 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
return ret;
/* Update the values of sizeimage and bytesperline */
- if (!ret) {
- pix->bytesperline = ccdc_dev->hw_ops.get_line_length();
- pix->sizeimage = pix->bytesperline * pix->height;
- }
- return ret;
+ pix->bytesperline = ccdc_dev->hw_ops.get_line_length();
+ pix->sizeimage = pix->bytesperline * pix->height;
+
+ return 0;
}
static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
@@ -943,12 +942,11 @@ static int vpfe_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
- int ret = 0;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_fmt_vid_cap\n");
/* Fill in the information about format */
*fmt = vpfe_dev->fmt;
- return ret;
+ return 0;
}
static int vpfe_enum_fmt_vid_cap(struct file *file, void *priv,
@@ -1914,7 +1912,7 @@ static int vpfe_probe(struct platform_device *pdev)
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
"trying to register vpfe device.\n");
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "video_dev=%x\n", (int)&vpfe_dev->video_dev);
+ "video_dev=%p\n", &vpfe_dev->video_dev);
vpfe_dev->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = video_register_device(vpfe_dev->video_dev,
VFL_TYPE_GRABBER, -1);
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
index cd08e5248387..3dad5bd7fe0a 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/davinci/vpif.c
@@ -38,6 +38,7 @@ MODULE_LICENSE("GPL");
#define VPIF_CH3_MAX_MODES 2
spinlock_t vpif_lock;
+EXPORT_SYMBOL_GPL(vpif_lock);
void __iomem *vpif_base;
EXPORT_SYMBOL_GPL(vpif_base);
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index b054b7eec53d..3ccb26ff43c8 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -213,8 +213,6 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
/* Remove buffer from the buffer queue */
list_del(&common->cur_frm->list);
spin_unlock_irqrestore(&common->irqlock, flags);
- /* Mark state of the current frame to active */
- common->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0);
@@ -350,7 +348,6 @@ static void vpif_schedule_next_buffer(struct common_obj *common)
/* Remove that buffer from the buffer queue */
list_del(&common->next_frm->list);
spin_unlock(&common->irqlock);
- common->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0);
/* Set top and bottom field addresses in VPIF registers */
@@ -373,7 +370,6 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
struct vpif_device *dev = &vpif_obj;
struct common_obj *common;
struct channel_obj *ch;
- enum v4l2_field field;
int channel_id = 0;
int fid = -1, i;
@@ -383,8 +379,6 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
ch = dev->dev[channel_id];
- field = ch->common[VPIF_VIDEO_INDEX].fmt.fmt.pix.field;
-
for (i = 0; i < VPIF_NUMBER_OF_OBJECTS; i++) {
common = &ch->common[i];
/* skip If streaming is not started in this channel */
@@ -533,7 +527,7 @@ static int vpif_update_std_info(struct channel_obj *ch)
*/
static void vpif_calculate_offsets(struct channel_obj *ch)
{
- unsigned int hpitch, vpitch, sizeimage;
+ unsigned int hpitch, sizeimage;
struct video_obj *vid_ch = &(ch->video);
struct vpif_params *vpifparams = &ch->vpifparams;
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
@@ -552,7 +546,6 @@ static void vpif_calculate_offsets(struct channel_obj *ch)
sizeimage = common->fmt.fmt.pix.sizeimage;
hpitch = common->fmt.fmt.pix.bytesperline;
- vpitch = sizeimage / (hpitch * 2);
if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
(V4L2_FIELD_INTERLACED == vid_ch->buf_field)) {
@@ -1603,7 +1596,7 @@ static int vpif_suspend(struct device *dev)
ch = vpif_obj.dev[i];
common = &ch->common[VPIF_VIDEO_INDEX];
- if (!vb2_is_streaming(&common->buffer_queue))
+ if (!vb2_start_streaming_called(&common->buffer_queue))
continue;
mutex_lock(&common->lock);
@@ -1637,7 +1630,7 @@ static int vpif_resume(struct device *dev)
ch = vpif_obj.dev[i];
common = &ch->common[VPIF_VIDEO_INDEX];
- if (!vb2_is_streaming(&common->buffer_queue))
+ if (!vb2_start_streaming_called(&common->buffer_queue))
continue;
mutex_lock(&common->lock);
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index a03ec7381cfe..8d6ced56253c 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -196,8 +196,6 @@ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
list_del(&common->cur_frm->list);
spin_unlock_irqrestore(&common->irqlock, flags);
- /* Mark state of the current frame to active */
- common->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0);
common->set_addr((addr + common->ytop_off),
@@ -306,8 +304,6 @@ static void process_progressive_mode(struct common_obj *common)
/* Remove that buffer from the buffer queue */
list_del(&common->next_frm->list);
spin_unlock(&common->irqlock);
- /* Mark status of the buffer as active */
- common->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
/* Set top and bottom field addrs in VPIF registers */
addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0);
@@ -360,7 +356,6 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
struct vpif_device *dev = &vpif_obj;
struct channel_obj *ch;
struct common_obj *common;
- enum v4l2_field field;
int fid = -1, i;
int channel_id = 0;
@@ -369,7 +364,6 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
return IRQ_NONE;
ch = dev->dev[channel_id];
- field = ch->common[VPIF_VIDEO_INDEX].fmt.fmt.pix.field;
for (i = 0; i < VPIF_NUMOBJECTS; i++) {
common = &ch->common[i];
/* If streaming is started in this channel */
@@ -502,7 +496,7 @@ static void vpif_calculate_offsets(struct channel_obj *ch)
struct vpif_params *vpifparams = &ch->vpifparams;
enum v4l2_field field = common->fmt.fmt.pix.field;
struct video_obj *vid_ch = &ch->video;
- unsigned int hpitch, vpitch, sizeimage;
+ unsigned int hpitch, sizeimage;
if (V4L2_FIELD_ANY == common->fmt.fmt.pix.field) {
if (ch->vpifparams.std_info.frm_fmt)
@@ -516,7 +510,6 @@ static void vpif_calculate_offsets(struct channel_obj *ch)
sizeimage = common->fmt.fmt.pix.sizeimage;
hpitch = common->fmt.fmt.pix.bytesperline;
- vpitch = sizeimage / (hpitch * 2);
if ((V4L2_FIELD_NONE == vid_ch->buf_field) ||
(V4L2_FIELD_INTERLACED == vid_ch->buf_field)) {
common->ytop_off = 0;
@@ -813,17 +806,14 @@ static int vpif_set_output(struct vpif_display_config *vpif_cfg,
{
struct vpif_display_chan_config *chan_cfg =
&vpif_cfg->chan_config[ch->channel_id];
- struct vpif_subdev_info *subdev_info = NULL;
struct v4l2_subdev *sd = NULL;
u32 input = 0, output = 0;
int sd_index;
int ret;
sd_index = vpif_output_to_subdev(vpif_cfg, chan_cfg, index);
- if (sd_index >= 0) {
+ if (sd_index >= 0)
sd = vpif_obj.sd[sd_index];
- subdev_info = &vpif_cfg->subdevinfo[sd_index];
- }
if (sd) {
input = chan_cfg->outputs[index].input_route;
@@ -1210,8 +1200,8 @@ static int vpif_probe_complete(void)
INIT_LIST_HEAD(&common->dma_queue);
/* register video device */
- vpif_dbg(1, debug, "channel=%x,channel->video_dev=%x\n",
- (int)ch, (int)&ch->video_dev);
+ vpif_dbg(1, debug, "channel=%p,channel->video_dev=%p\n",
+ ch, &ch->video_dev);
/* Initialize the video_device structure */
vdev = ch->video_dev;
@@ -1410,7 +1400,7 @@ static int vpif_suspend(struct device *dev)
ch = vpif_obj.dev[i];
common = &ch->common[VPIF_VIDEO_INDEX];
- if (!vb2_is_streaming(&common->buffer_queue))
+ if (!vb2_start_streaming_called(&common->buffer_queue))
continue;
mutex_lock(&common->lock);
@@ -1442,7 +1432,7 @@ static int vpif_resume(struct device *dev)
ch = vpif_obj.dev[i];
common = &ch->common[VPIF_VIDEO_INDEX];
- if (!vb2_is_streaming(&common->buffer_queue))
+ if (!vb2_start_streaming_called(&common->buffer_queue))
continue;
mutex_lock(&common->lock);
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 9d0cc04d7ab7..b4c9f1d08968 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -852,8 +852,8 @@ int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M))
swap(addr->cb, addr->cr);
- pr_debug("ADDR: y= 0x%X cb= 0x%X cr= 0x%X ret= %d",
- addr->y, addr->cb, addr->cr, ret);
+ pr_debug("ADDR: y= %pad cb= %pad cr= %pad ret= %d",
+ &addr->y, &addr->cb, &addr->cr, ret);
return ret;
}
@@ -1086,7 +1086,7 @@ static int gsc_probe(struct platform_device *pdev)
else
gsc->id = pdev->id;
- if (gsc->id < 0 || gsc->id >= drv_data->num_entities) {
+ if (gsc->id >= drv_data->num_entities) {
dev_err(dev, "Invalid platform device id: %d\n", gsc->id);
return -EINVAL;
}
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index e434f1f03d7b..74e1de637e8f 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -362,7 +362,6 @@ static int gsc_m2m_reqbufs(struct file *file, void *fh,
{
struct gsc_ctx *ctx = fh_to_ctx(fh);
struct gsc_dev *gsc = ctx->gsc_dev;
- struct gsc_frame *frame;
u32 max_cnt;
max_cnt = (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
@@ -376,8 +375,6 @@ static int gsc_m2m_reqbufs(struct file *file, void *fh,
gsc_ctx_state_lock_clear(GSC_DST_FMT, ctx);
}
- frame = ctx_get_frame(ctx, reqbufs->type);
-
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
diff --git a/drivers/media/platform/exynos-gsc/gsc-regs.c b/drivers/media/platform/exynos-gsc/gsc-regs.c
index e22d147a6940..ce12a1100511 100644
--- a/drivers/media/platform/exynos-gsc/gsc-regs.c
+++ b/drivers/media/platform/exynos-gsc/gsc-regs.c
@@ -90,8 +90,8 @@ void gsc_hw_set_output_buf_masking(struct gsc_dev *dev, u32 shift,
void gsc_hw_set_input_addr(struct gsc_dev *dev, struct gsc_addr *addr,
int index)
{
- pr_debug("src_buf[%d]: 0x%X, cb: 0x%X, cr: 0x%X", index,
- addr->y, addr->cb, addr->cr);
+ pr_debug("src_buf[%d]: %pad, cb: %pad, cr: %pad", index,
+ &addr->y, &addr->cb, &addr->cr);
writel(addr->y, dev->regs + GSC_IN_BASE_ADDR_Y(index));
writel(addr->cb, dev->regs + GSC_IN_BASE_ADDR_CB(index));
writel(addr->cr, dev->regs + GSC_IN_BASE_ADDR_CR(index));
@@ -101,8 +101,8 @@ void gsc_hw_set_input_addr(struct gsc_dev *dev, struct gsc_addr *addr,
void gsc_hw_set_output_addr(struct gsc_dev *dev,
struct gsc_addr *addr, int index)
{
- pr_debug("dst_buf[%d]: 0x%X, cb: 0x%X, cr: 0x%X",
- index, addr->y, addr->cb, addr->cr);
+ pr_debug("dst_buf[%d]: %pad, cb: %pad, cr: %pad",
+ index, &addr->y, &addr->cb, &addr->cr);
writel(addr->y, dev->regs + GSC_OUT_BASE_ADDR_Y(index));
writel(addr->cb, dev->regs + GSC_OUT_BASE_ADDR_CB(index));
writel(addr->cr, dev->regs + GSC_OUT_BASE_ADDR_CR(index));
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index 5dcaa0a80540..77c951237744 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -2,7 +2,7 @@
config VIDEO_SAMSUNG_EXYNOS4_IS
bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on (PLAT_S5P || ARCH_EXYNOS)
+ depends on (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
depends on OF && COMMON_CLK
help
Say Y here to enable camera host interface devices for
@@ -16,6 +16,7 @@ config VIDEO_EXYNOS4_IS_COMMON
config VIDEO_S5P_FIMC
tristate "S5P/EXYNOS4 FIMC/CAMIF camera interface driver"
depends on I2C
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
select MFD_SYSCON
@@ -43,6 +44,7 @@ if SOC_EXYNOS4212 || SOC_EXYNOS4412 || SOC_EXYNOS5250
config VIDEO_EXYNOS_FIMC_LITE
tristate "EXYNOS FIMC-LITE camera interface driver"
depends on I2C
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select VIDEO_EXYNOS4_IS_COMMON
help
@@ -55,6 +57,7 @@ endif
config VIDEO_EXYNOS4_FIMC_IS
tristate "EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver"
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
depends on OF
select FW_LOADER
diff --git a/drivers/media/platform/exynos4-is/fimc-is-errno.c b/drivers/media/platform/exynos4-is/fimc-is-errno.c
index e8519e151c1a..e050e63fe358 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-errno.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-errno.c
@@ -15,7 +15,7 @@
#include "fimc-is-errno.h"
-const char * const fimc_is_param_strerr(unsigned int error)
+const char *fimc_is_param_strerr(unsigned int error)
{
switch (error) {
case ERROR_COMMON_CMD:
@@ -146,7 +146,7 @@ const char * const fimc_is_param_strerr(unsigned int error)
}
}
-const char * const fimc_is_strerr(unsigned int error)
+const char *fimc_is_strerr(unsigned int error)
{
error &= ~IS_ERROR_TIME_OUT_FLAG;
diff --git a/drivers/media/platform/exynos4-is/fimc-is-errno.h b/drivers/media/platform/exynos4-is/fimc-is-errno.h
index 3de6f6da6f87..ef981e74513a 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-errno.h
+++ b/drivers/media/platform/exynos4-is/fimc-is-errno.h
@@ -242,7 +242,7 @@ enum fimc_is_error {
ERROR_SCALER_FLIP = 521,
};
-const char * const fimc_is_strerr(unsigned int error);
-const char * const fimc_is_param_strerr(unsigned int error);
+const char *fimc_is_strerr(unsigned int error);
+const char *fimc_is_param_strerr(unsigned int error);
#endif /* FIMC_IS_ERR_H_ */
diff --git a/drivers/media/platform/exynos4-is/fimc-is-param.c b/drivers/media/platform/exynos4-is/fimc-is-param.c
index bf1465d1bf6d..72b9b436c5c0 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-param.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-param.c
@@ -667,7 +667,6 @@ void __is_set_fd_config_orientation_val(struct fimc_is *is, u32 val)
void fimc_is_set_initial_params(struct fimc_is *is)
{
struct global_param *global;
- struct sensor_param *sensor;
struct isp_param *isp;
struct drc_param *drc;
struct fd_param *fd;
@@ -676,7 +675,6 @@ void fimc_is_set_initial_params(struct fimc_is *is)
index = is->config_index;
global = &is->config[index].global;
- sensor = &is->config[index].sensor;
isp = &is->config[index].isp;
drc = &is->config[index].drc;
fd = &is->config[index].fd;
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 5476dce3ad29..22162b2567da 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -388,7 +388,7 @@ static void fimc_is_load_firmware(const struct firmware *fw, void *context)
mutex_lock(&is->lock);
if (fw->size < FIMC_IS_FW_SIZE_MIN || fw->size > FIMC_IS_FW_SIZE_MAX) {
- dev_err(dev, "wrong firmware size: %d\n", fw->size);
+ dev_err(dev, "wrong firmware size: %zu\n", fw->size);
goto done;
}
@@ -416,7 +416,7 @@ static void fimc_is_load_firmware(const struct firmware *fw, void *context)
dev_info(dev, "loaded firmware: %s, rev. %s\n",
is->fw.info, is->fw.version);
- dev_dbg(dev, "FW size: %d, paddr: %#x\n", fw->size, is->memory.paddr);
+ dev_dbg(dev, "FW size: %zu, paddr: %pad\n", fw->size, &is->memory.paddr);
is->is_shared_region->chip_id = 0xe4412;
is->is_shared_region->chip_rev_no = 1;
@@ -693,9 +693,9 @@ int fimc_is_hw_initialize(struct fimc_is *is)
return -EIO;
}
- pr_debug("shared region: %#x, parameter region: %#x\n",
- is->memory.paddr + FIMC_IS_SHARED_REGION_OFFSET,
- is->is_dma_p_region);
+ pr_debug("shared region: %pad, parameter region: %pad\n",
+ &is->memory.paddr + FIMC_IS_SHARED_REGION_OFFSET,
+ &is->is_dma_p_region);
is->setfile.sub_index = 0;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 93f9cf2ebcd6..76b6b4d14616 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -219,9 +219,9 @@ static void isp_video_capture_buffer_queue(struct vb2_buffer *vb)
ivb->dma_addr[i];
isp_dbg(2, &video->ve.vdev,
- "dma_buf %d (%d/%d/%d) addr: %#x\n",
- buf_index, ivb->index, i, vb->v4l2_buf.index,
- ivb->dma_addr[i]);
+ "dma_buf %pad (%d/%d/%d) addr: %pad\n",
+ &buf_index, ivb->index, i, vb->v4l2_buf.index,
+ &ivb->dma_addr[i]);
}
if (++video->buf_count < video->reqbufs_count)
@@ -313,7 +313,6 @@ static int isp_video_release(struct file *file)
struct fimc_is_video *ivc = &isp->video_capture;
struct media_entity *entity = &ivc->ve.vdev.entity;
struct media_device *mdev = entity->parent;
- int ret = 0;
mutex_lock(&isp->video_lock);
@@ -335,7 +334,7 @@ static int isp_video_release(struct file *file)
pm_runtime_put(&isp->pdev->dev);
mutex_unlock(&isp->video_lock);
- return ret;
+ return 0;
}
static const struct v4l2_file_operations isp_video_fops = {
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 344718df5c62..54c49d5e7690 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1098,8 +1098,10 @@ static int fimc_md_link_notify(struct media_link *link, unsigned int flags,
if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH) {
if (!(flags & MEDIA_LNK_FL_ENABLED))
ret = __fimc_md_modify_pipelines(sink, false);
+#if 0
else
- ; /* TODO: Link state change validation */
+ /* TODO: Link state change validation */
+#endif
/* After link activation */
} else if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
(link->flags & MEDIA_LNK_FL_ENABLED)) {
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index ae54ef5f535d..db6fd14d1936 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/videodev2.h>
@@ -752,7 +753,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
v4l2_of_parse_endpoint(node, &endpoint);
state->index = endpoint.base.port - FIMC_INPUT_MIPI_CSI2_0;
- if (state->index < 0 || state->index >= CSIS_MAX_ENTITIES)
+ if (state->index >= CSIS_MAX_ENTITIES)
return -ENXIO;
/* Get MIPI CSI-2 bus configration from the endpoint node. */
diff --git a/drivers/media/platform/marvell-ccic/Kconfig b/drivers/media/platform/marvell-ccic/Kconfig
index bf739e3b3398..6265d36adceb 100644
--- a/drivers/media/platform/marvell-ccic/Kconfig
+++ b/drivers/media/platform/marvell-ccic/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_CAFE_CCIC
tristate "Marvell 88ALP01 (Cafe) CMOS Camera Controller support"
depends on PCI && I2C && VIDEO_V4L2
+ depends on HAS_DMA
select VIDEO_OV7670
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
@@ -12,6 +13,7 @@ config VIDEO_CAFE_CCIC
config VIDEO_MMP_CAMERA
tristate "Marvell Armada 610 integrated camera controller support"
depends on ARCH_MMP && I2C && VIDEO_V4L2
+ depends on HAS_DMA
select VIDEO_OV7670
select I2C_GPIO
select VIDEOBUF2_DMA_SG
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index be4b51212106..7a86c77bffa0 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -67,7 +67,7 @@ MODULE_PARM_DESC(dma_buf_size,
"parameters require larger buffers, an attempt to reallocate "
"will be made.");
#else /* MCAM_MODE_VMALLOC */
-static const bool alloc_bufs_at_read = 0;
+static const bool alloc_bufs_at_read;
static const int n_dma_bufs = 3; /* Used by S/G_PARM */
#endif /* MCAM_MODE_VMALLOC */
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index fa8f7cabe364..4971ff21f82b 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -27,7 +27,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-dma-contig.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#define EMMAPRP_MODULE_NAME "mem2mem-emmaprp"
diff --git a/drivers/media/platform/omap/Kconfig b/drivers/media/platform/omap/Kconfig
index 37ad446b35b3..05de442d24e4 100644
--- a/drivers/media/platform/omap/Kconfig
+++ b/drivers/media/platform/omap/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_OMAP2_VOUT_VRFB
config VIDEO_OMAP2_VOUT
tristate "OMAP2/OMAP3 V4L2-Display driver"
- depends on ARCH_OMAP2 || ARCH_OMAP3
+ depends on ARCH_OMAP2 || ARCH_OMAP3 || (COMPILE_TEST && HAS_MMU)
select VIDEOBUF_GEN
select VIDEOBUF_DMA_CONTIG
select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 2d177fa58471..64ab6fb06b9c 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -369,7 +369,7 @@ static int omapvid_setup_overlay(struct omap_vout_device *vout,
{
int ret = 0;
struct omap_overlay_info info;
- int cropheight, cropwidth, pixheight, pixwidth;
+ int cropheight, cropwidth, pixwidth;
if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0 &&
(outw != vout->pix.width || outh != vout->pix.height)) {
@@ -389,12 +389,10 @@ static int omapvid_setup_overlay(struct omap_vout_device *vout,
if (is_rotation_90_or_270(vout)) {
cropheight = vout->crop.width;
cropwidth = vout->crop.height;
- pixheight = vout->pix.width;
pixwidth = vout->pix.height;
} else {
cropheight = vout->crop.height;
cropwidth = vout->crop.width;
- pixheight = vout->pix.height;
pixwidth = vout->pix.width;
}
@@ -991,7 +989,7 @@ static int omap_vout_release(struct file *file)
mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN |
DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_VSYNC2;
omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
- vout->streaming = 0;
+ vout->streaming = false;
videobuf_streamoff(q);
videobuf_queue_cancel(q);
@@ -1451,12 +1449,10 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
}
case V4L2_CID_VFLIP:
{
- struct omap_overlay *ovl;
struct omapvideo_info *ovid;
unsigned int mirror = a->value;
ovid = &vout->vid_info;
- ovl = ovid->overlays[0];
mutex_lock(&vout->lock);
if (mirror && ovid->rotation_type == VOUT_ROT_NONE) {
@@ -1489,7 +1485,7 @@ static int vidioc_reqbufs(struct file *file, void *fh,
struct omap_vout_device *vout = fh;
struct videobuf_queue *q = &vout->vbq;
- if ((req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) || (req->count < 0))
+ if (req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
/* if memory is not mmp or userptr
return error */
@@ -1648,7 +1644,7 @@ static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
vout->field_id = 0;
/* set flag here. Next QBUF will start DMA */
- vout->streaming = 1;
+ vout->streaming = true;
vout->first_int = 1;
@@ -1708,7 +1704,7 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
if (!vout->streaming)
return -EINVAL;
- vout->streaming = 0;
+ vout->streaming = false;
mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
| DISPC_IRQ_VSYNC2;
@@ -1916,7 +1912,7 @@ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
control[0].id = V4L2_CID_ROTATE;
control[0].value = 0;
vout->rotation = 0;
- vout->mirror = 0;
+ vout->mirror = false;
vout->control[2].id = V4L2_CID_HFLIP;
vout->control[2].value = 0;
if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 62e7e5783ce8..aa39306afc73 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -148,7 +148,7 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
ret = -ENOMEM;
goto release_vrfb_ctx;
}
- vout->vrfb_static_allocation = 1;
+ vout->vrfb_static_allocation = true;
}
return 0;
@@ -336,7 +336,7 @@ void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout)
offset = vout->vrfb_context[0].yoffset *
vout->vrfb_context[0].bytespp;
temp_ps = ps / vr_ps;
- if (mirroring == 0) {
+ if (!mirroring) {
*cropped_offset = offset + line_length *
temp_ps * cleft + crop->top * temp_ps;
} else {
@@ -350,7 +350,7 @@ void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout)
vout->vrfb_context[0].bytespp) +
(vout->vrfb_context[0].xoffset *
vout->vrfb_context[0].bytespp));
- if (mirroring == 0) {
+ if (!mirroring) {
*cropped_offset = offset + (line_length * ps * ctop) +
(cleft / vr_ps) * ps;
@@ -364,7 +364,7 @@ void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout)
offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset *
vout->vrfb_context[0].bytespp;
temp_ps = ps / vr_ps;
- if (mirroring == 0) {
+ if (!mirroring) {
*cropped_offset = offset + line_length *
temp_ps * crop->left + ctop * ps;
} else {
@@ -375,7 +375,7 @@ void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout)
}
break;
case dss_rotation_0_degree:
- if (mirroring == 0) {
+ if (!mirroring) {
*cropped_offset = (line_length * ps) *
crop->top + (crop->left / vr_ps) * ps;
} else {
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.h b/drivers/media/platform/omap/omap_vout_vrfb.h
index ffde741e0590..4c2314839b48 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.h
+++ b/drivers/media/platform/omap/omap_vout_vrfb.h
@@ -23,18 +23,18 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
struct videobuf_buffer *vb);
void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout);
#else
-void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout) { }
-int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+static inline void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout) { };
+static inline int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
u32 static_vrfb_allocation)
- { return 0; }
-void omap_vout_release_vrfb(struct omap_vout_device *vout) { }
-int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ { return 0; };
+static inline void omap_vout_release_vrfb(struct omap_vout_device *vout) { };
+static inline int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
unsigned int *count, unsigned int startindex)
- { return 0; }
-int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+ { return 0; };
+static inline int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
struct videobuf_buffer *vb)
- { return 0; }
-void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout) { }
+ { return 0; };
+static inline void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout) { };
#endif
#endif
diff --git a/drivers/media/platform/omap3isp/cfa_coef_table.h b/drivers/media/platform/omap3isp/cfa_coef_table.h
index c84df0706f3e..e75b0eb2519b 100644
--- a/drivers/media/platform/omap3isp/cfa_coef_table.h
+++ b/drivers/media/platform/omap3isp/cfa_coef_table.h
@@ -11,16 +11,6 @@
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
{ 244, 0, 247, 0, 12, 27, 36, 247, 250, 0, 27, 0, 4, 250, 12, 244,
diff --git a/drivers/media/platform/omap3isp/gamma_table.h b/drivers/media/platform/omap3isp/gamma_table.h
index 78deebf7d965..3b507078016d 100644
--- a/drivers/media/platform/omap3isp/gamma_table.h
+++ b/drivers/media/platform/omap3isp/gamma_table.h
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
0, 0, 1, 2, 3, 3, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20,
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 2c7aa6720569..72265e58ca60 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -40,16 +40,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <asm/cacheflush.h>
@@ -999,16 +989,14 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
video, s_stream, 0);
}
- v4l2_subdev_call(subdev, video, s_stream, 0);
+ ret = v4l2_subdev_call(subdev, video, s_stream, 0);
if (subdev == &isp->isp_res.subdev)
- ret = isp_pipeline_wait(isp, isp_pipeline_wait_resizer);
+ ret |= isp_pipeline_wait(isp, isp_pipeline_wait_resizer);
else if (subdev == &isp->isp_prev.subdev)
- ret = isp_pipeline_wait(isp, isp_pipeline_wait_preview);
+ ret |= isp_pipeline_wait(isp, isp_pipeline_wait_preview);
else if (subdev == &isp->isp_ccdc.subdev)
- ret = isp_pipeline_wait(isp, isp_pipeline_wait_ccdc);
- else
- ret = 0;
+ ret |= isp_pipeline_wait(isp, isp_pipeline_wait_ccdc);
/* Handle stop failures. An entity that fails to stop can
* usually just be restarted. Flag the stop failure nonetheless
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index 2c314eea1252..cfdfc8714b6b 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef OMAP3_ISP_CORE_H
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 9f727d20f06d..81a9dc053d58 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <linux/module.h>
@@ -491,14 +481,13 @@ done:
static inline int ccdc_lsc_is_configured(struct isp_ccdc_device *ccdc)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
- if (ccdc->lsc.active) {
- spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
- return 1;
- }
+ ret = ccdc->lsc.active != NULL;
spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
- return 0;
+
+ return ret;
}
static int ccdc_lsc_enable(struct isp_ccdc_device *ccdc)
@@ -818,29 +807,48 @@ static void ccdc_config_vp(struct isp_ccdc_device *ccdc)
struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
struct isp_device *isp = to_isp_device(ccdc);
const struct isp_format_info *info;
+ struct v4l2_mbus_framefmt *format;
unsigned long l3_ick = pipe->l3_ick;
unsigned int max_div = isp->revision == ISP_REVISION_15_0 ? 64 : 8;
unsigned int div = 0;
- u32 fmtcfg_vp;
+ u32 fmtcfg = ISPCCDC_FMTCFG_VPEN;
+
+ format = &ccdc->formats[CCDC_PAD_SOURCE_VP];
+
+ if (!format->code) {
+ /* Disable the video port when the input format isn't supported.
+ * This is indicated by a pixel code set to 0.
+ */
+ isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG);
+ return;
+ }
+
+ isp_reg_writel(isp, (0 << ISPCCDC_FMT_HORZ_FMTSPH_SHIFT) |
+ (format->width << ISPCCDC_FMT_HORZ_FMTLNH_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_HORZ);
+ isp_reg_writel(isp, (0 << ISPCCDC_FMT_VERT_FMTSLV_SHIFT) |
+ ((format->height + 1) << ISPCCDC_FMT_VERT_FMTLNV_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_VERT);
- fmtcfg_vp = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG)
- & ~(ISPCCDC_FMTCFG_VPIN_MASK | ISPCCDC_FMTCFG_VPIF_FRQ_MASK);
+ isp_reg_writel(isp, (format->width << ISPCCDC_VP_OUT_HORZ_NUM_SHIFT) |
+ (format->height << ISPCCDC_VP_OUT_VERT_NUM_SHIFT),
+ OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VP_OUT);
info = omap3isp_video_format_info(ccdc->formats[CCDC_PAD_SINK].code);
switch (info->width) {
case 8:
case 10:
- fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_9_0;
+ fmtcfg |= ISPCCDC_FMTCFG_VPIN_9_0;
break;
case 11:
- fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_10_1;
+ fmtcfg |= ISPCCDC_FMTCFG_VPIN_10_1;
break;
case 12:
- fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_11_2;
+ fmtcfg |= ISPCCDC_FMTCFG_VPIN_11_2;
break;
case 13:
- fmtcfg_vp |= ISPCCDC_FMTCFG_VPIN_12_3;
+ fmtcfg |= ISPCCDC_FMTCFG_VPIN_12_3;
break;
}
@@ -850,75 +858,59 @@ static void ccdc_config_vp(struct isp_ccdc_device *ccdc)
div = l3_ick / pipe->external_rate;
div = clamp(div, 2U, max_div);
- fmtcfg_vp |= (div - 2) << ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT;
+ fmtcfg |= (div - 2) << ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT;
- isp_reg_writel(isp, fmtcfg_vp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG);
-}
-
-/*
- * ccdc_enable_vp - Enable Video Port.
- * @ccdc: Pointer to ISP CCDC device.
- * @enable: 0 Disables VP, 1 Enables VP
- *
- * This is needed for outputting image to Preview, H3A and HIST ISP submodules.
- */
-static void ccdc_enable_vp(struct isp_ccdc_device *ccdc, u8 enable)
-{
- struct isp_device *isp = to_isp_device(ccdc);
-
- isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG,
- ISPCCDC_FMTCFG_VPEN, enable ? ISPCCDC_FMTCFG_VPEN : 0);
+ isp_reg_writel(isp, fmtcfg, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG);
}
/*
* ccdc_config_outlineoffset - Configure memory saving output line offset
* @ccdc: Pointer to ISP CCDC device.
- * @offset: Address offset to start a new line. Must be twice the
- * Output width and aligned on 32 byte boundary
- * @oddeven: Specifies the odd/even line pattern to be chosen to store the
- * output.
- * @numlines: Set the value 0-3 for +1-4lines, 4-7 for -1-4lines.
+ * @bpl: Number of bytes per line when stored in memory.
+ * @field: Field order when storing interlaced formats in memory.
+ *
+ * Configure the offsets for the line output control:
+ *
+ * - The horizontal line offset is defined as the number of bytes between the
+ * start of two consecutive lines in memory. Set it to the given bytes per
+ * line value.
+ *
+ * - The field offset value is defined as the number of lines to offset the
+ * start of the field identified by FID = 1. Set it to one.
*
- * - Configures the output line offset when stored in memory
- * - Sets the odd/even line pattern to store the output
- * (EVENEVEN (1), ODDEVEN (2), EVENODD (3), ODDODD (4))
- * - Configures the number of even and odd line fields in case of rearranging
- * the lines.
+ * - The line offset values are defined as the number of lines (as defined by
+ * the horizontal line offset) between the start of two consecutive lines for
+ * all combinations of odd/even lines in odd/even fields. When interleaving
+ * fields set them all to two lines, and to one line otherwise.
*/
static void ccdc_config_outlineoffset(struct isp_ccdc_device *ccdc,
- u32 offset, u8 oddeven, u8 numlines)
+ unsigned int bpl,
+ enum v4l2_field field)
{
struct isp_device *isp = to_isp_device(ccdc);
+ u32 sdofst = 0;
- isp_reg_writel(isp, offset & 0xffff,
- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HSIZE_OFF);
+ isp_reg_writel(isp, bpl & 0xffff, OMAP3_ISP_IOMEM_CCDC,
+ ISPCCDC_HSIZE_OFF);
- isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
- ISPCCDC_SDOFST_FINV);
-
- isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
- ISPCCDC_SDOFST_FOFST_4L);
-
- switch (oddeven) {
- case EVENEVEN:
- isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
- (numlines & 0x7) << ISPCCDC_SDOFST_LOFST0_SHIFT);
- break;
- case ODDEVEN:
- isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
- (numlines & 0x7) << ISPCCDC_SDOFST_LOFST1_SHIFT);
- break;
- case EVENODD:
- isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
- (numlines & 0x7) << ISPCCDC_SDOFST_LOFST2_SHIFT);
- break;
- case ODDODD:
- isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST,
- (numlines & 0x7) << ISPCCDC_SDOFST_LOFST3_SHIFT);
+ switch (field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ /* When interleaving fields in memory offset field one by one
+ * line and set the line offset to two lines.
+ */
+ sdofst |= (1 << ISPCCDC_SDOFST_LOFST0_SHIFT)
+ | (1 << ISPCCDC_SDOFST_LOFST1_SHIFT)
+ | (1 << ISPCCDC_SDOFST_LOFST2_SHIFT)
+ | (1 << ISPCCDC_SDOFST_LOFST3_SHIFT);
break;
+
default:
+ /* In all other cases set the line offsets to one line. */
break;
}
+
+ isp_reg_writel(isp, sdofst, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST);
}
/*
@@ -981,10 +973,16 @@ static void ccdc_config_sync_if(struct isp_ccdc_device *ccdc,
if (format->code == V4L2_MBUS_FMT_YUYV8_2X8 ||
format->code == V4L2_MBUS_FMT_UYVY8_2X8) {
- /* The bridge is enabled for YUV8 formats. Configure the input
- * mode accordingly.
+ /* According to the OMAP3 TRM the input mode only affects SYNC
+ * mode, enabling BT.656 mode should take precedence. However,
+ * in practice setting the input mode to YCbCr data on 8 bits
+ * seems to be required in BT.656 mode. In SYNC mode set it to
+ * YCbCr on 16 bits as the bridge is enabled in that case.
*/
- syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR16;
+ if (ccdc->bt656)
+ syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR8;
+ else
+ syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR16;
}
switch (data_size) {
@@ -1008,9 +1006,15 @@ static void ccdc_config_sync_if(struct isp_ccdc_device *ccdc,
if (pdata && pdata->hs_pol)
syn_mode |= ISPCCDC_SYN_MODE_HDPOL;
- if (pdata && pdata->vs_pol)
+ /* The polarity of the vertical sync signal output by the BT.656
+ * decoder is not documented and seems to be active low.
+ */
+ if ((pdata && pdata->vs_pol) || ccdc->bt656)
syn_mode |= ISPCCDC_SYN_MODE_VDPOL;
+ if (pdata && pdata->fld_pol)
+ syn_mode |= ISPCCDC_SYN_MODE_FLDPOL;
+
isp_reg_writel(isp, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
/* The CCDC_CFG.Y8POS bit is used in YCbCr8 input mode only. The
@@ -1023,8 +1027,16 @@ static void ccdc_config_sync_if(struct isp_ccdc_device *ccdc,
isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
ISPCCDC_CFG_Y8POS);
- isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_REC656IF,
- ISPCCDC_REC656IF_R656ON);
+ /* Enable or disable BT.656 mode, including error correction for the
+ * synchronization codes.
+ */
+ if (ccdc->bt656)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_REC656IF,
+ ISPCCDC_REC656IF_R656ON | ISPCCDC_REC656IF_ECCFVH);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_REC656IF,
+ ISPCCDC_REC656IF_R656ON | ISPCCDC_REC656IF_ECCFVH);
+
}
/* CCDC formats descriptions */
@@ -1115,17 +1127,33 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
unsigned long flags;
unsigned int bridge;
unsigned int shift;
+ unsigned int nph;
+ unsigned int sph;
u32 syn_mode;
u32 ccdc_pattern;
+ ccdc->bt656 = false;
+ ccdc->fields = 0;
+
pad = media_entity_remote_pad(&ccdc->pads[CCDC_PAD_SINK]);
sensor = media_entity_to_v4l2_subdev(pad->entity);
- if (ccdc->input == CCDC_INPUT_PARALLEL)
+ if (ccdc->input == CCDC_INPUT_PARALLEL) {
+ struct v4l2_mbus_config cfg;
+ int ret;
+
+ ret = v4l2_subdev_call(sensor, video, g_mbus_config, &cfg);
+ if (!ret)
+ ccdc->bt656 = cfg.type == V4L2_MBUS_BT656;
+
pdata = &((struct isp_v4l2_subdevs_group *)sensor->host_priv)
->bus.parallel;
+ }
+
+ /* CCDC_PAD_SINK */
+ format = &ccdc->formats[CCDC_PAD_SINK];
/* Compute the lane shifter shift value and enable the bridge when the
- * input format is YUV.
+ * input format is a non-BT.656 YUV variant.
*/
fmt_src.pad = pad->index;
fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
@@ -1134,12 +1162,13 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
depth_in = fmt_info->width;
}
- fmt_info = omap3isp_video_format_info
- (isp->isp_ccdc.formats[CCDC_PAD_SINK].code);
+ fmt_info = omap3isp_video_format_info(format->code);
depth_out = fmt_info->width;
shift = depth_in - depth_out;
- if (fmt_info->code == V4L2_MBUS_FMT_YUYV8_2X8)
+ if (ccdc->bt656)
+ bridge = ISPCTRL_PAR_BRIDGE_DISABLE;
+ else if (fmt_info->code == V4L2_MBUS_FMT_YUYV8_2X8)
bridge = ISPCTRL_PAR_BRIDGE_LENDIAN;
else if (fmt_info->code == V4L2_MBUS_FMT_UYVY8_2X8)
bridge = ISPCTRL_PAR_BRIDGE_BENDIAN;
@@ -1148,6 +1177,7 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
omap3isp_configure_bridge(isp, ccdc->input, pdata, shift, bridge);
+ /* Configure the sync interface. */
ccdc_config_sync_if(ccdc, pdata, depth_out);
syn_mode = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
@@ -1167,9 +1197,6 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
else
syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ;
- /* CCDC_PAD_SINK */
- format = &ccdc->formats[CCDC_PAD_SINK];
-
/* Mosaic filter */
switch (format->code) {
case V4L2_MBUS_FMT_SRGGB10_1X10:
@@ -1202,16 +1229,40 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
format = &ccdc->formats[CCDC_PAD_SOURCE_OF];
crop = &ccdc->crop;
- isp_reg_writel(isp, (crop->left << ISPCCDC_HORZ_INFO_SPH_SHIFT) |
- ((crop->width - 1) << ISPCCDC_HORZ_INFO_NPH_SHIFT),
+ /* The horizontal coordinates are expressed in pixel clock cycles. We
+ * need two cycles per pixel in BT.656 mode, and one cycle per pixel in
+ * SYNC mode regardless of the format as the bridge is enabled for YUV
+ * formats in that case.
+ */
+ if (ccdc->bt656) {
+ sph = crop->left * 2;
+ nph = crop->width * 2 - 1;
+ } else {
+ sph = crop->left;
+ nph = crop->width - 1;
+ }
+
+ isp_reg_writel(isp, (sph << ISPCCDC_HORZ_INFO_SPH_SHIFT) |
+ (nph << ISPCCDC_HORZ_INFO_NPH_SHIFT),
OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HORZ_INFO);
- isp_reg_writel(isp, crop->top << ISPCCDC_VERT_START_SLV0_SHIFT,
+ isp_reg_writel(isp, (crop->top << ISPCCDC_VERT_START_SLV0_SHIFT) |
+ (crop->top << ISPCCDC_VERT_START_SLV1_SHIFT),
OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_START);
isp_reg_writel(isp, (crop->height - 1)
<< ISPCCDC_VERT_LINES_NLV_SHIFT,
OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_LINES);
- ccdc_config_outlineoffset(ccdc, ccdc->video_out.bpl_value, 0, 0);
+ ccdc_config_outlineoffset(ccdc, ccdc->video_out.bpl_value,
+ format->field);
+
+ /* When interleaving fields enable processing of the field input signal.
+ * This will cause the line output control module to apply the field
+ * offset to field 1.
+ */
+ if (ccdc->formats[CCDC_PAD_SINK].field == V4L2_FIELD_ALTERNATE &&
+ (format->field == V4L2_FIELD_INTERLACED_TB ||
+ format->field == V4L2_FIELD_INTERLACED_BT))
+ syn_mode |= ISPCCDC_SYN_MODE_FLDMODE;
/* The CCDC outputs data in UYVY order by default. Swap bytes to get
* YUYV.
@@ -1223,8 +1274,11 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG,
ISPCCDC_CFG_BSWD);
- /* Use PACK8 mode for 1byte per pixel formats. */
- if (omap3isp_video_format_info(format->code)->width <= 8)
+ /* Use PACK8 mode for 1byte per pixel formats. Check for BT.656 mode
+ * explicitly as the driver reports 1X16 instead of 2X8 at the OF pad
+ * for simplicity.
+ */
+ if (omap3isp_video_format_info(format->code)->width <= 8 || ccdc->bt656)
syn_mode |= ISPCCDC_SYN_MODE_PACK8;
else
syn_mode &= ~ISPCCDC_SYN_MODE_PACK8;
@@ -1232,18 +1286,7 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
isp_reg_writel(isp, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE);
/* CCDC_PAD_SOURCE_VP */
- format = &ccdc->formats[CCDC_PAD_SOURCE_VP];
-
- isp_reg_writel(isp, (0 << ISPCCDC_FMT_HORZ_FMTSPH_SHIFT) |
- (format->width << ISPCCDC_FMT_HORZ_FMTLNH_SHIFT),
- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_HORZ);
- isp_reg_writel(isp, (0 << ISPCCDC_FMT_VERT_FMTSLV_SHIFT) |
- ((format->height + 1) << ISPCCDC_FMT_VERT_FMTLNV_SHIFT),
- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_VERT);
-
- isp_reg_writel(isp, (format->width << ISPCCDC_VP_OUT_HORZ_NUM_SHIFT) |
- (format->height << ISPCCDC_VP_OUT_VERT_NUM_SHIFT),
- OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VP_OUT);
+ ccdc_config_vp(ccdc);
/* Lens shading correction. */
spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
@@ -1277,6 +1320,8 @@ static void __ccdc_enable(struct isp_ccdc_device *ccdc, int enable)
isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR,
ISPCCDC_PCR_EN, enable ? ISPCCDC_PCR_EN : 0);
+
+ ccdc->running = enable;
}
static int ccdc_disable(struct isp_ccdc_device *ccdc)
@@ -1287,6 +1332,8 @@ static int ccdc_disable(struct isp_ccdc_device *ccdc)
spin_lock_irqsave(&ccdc->lock, flags);
if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS)
ccdc->stopping = CCDC_STOP_REQUEST;
+ if (!ccdc->running)
+ ccdc->stopping = CCDC_STOP_FINISHED;
spin_unlock_irqrestore(&ccdc->lock, flags);
ret = wait_event_timeout(ccdc->wait,
@@ -1369,14 +1416,14 @@ static int ccdc_sbl_wait_idle(struct isp_ccdc_device *ccdc,
return -EBUSY;
}
-/* __ccdc_handle_stopping - Handle CCDC and/or LSC stopping sequence
+/* ccdc_handle_stopping - Handle CCDC and/or LSC stopping sequence
* @ccdc: Pointer to ISP CCDC device.
* @event: Pointing which event trigger handler
*
* Return 1 when the event and stopping request combination is satisfied,
* zero otherwise.
*/
-static int __ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event)
+static int ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event)
{
int rval = 0;
@@ -1458,7 +1505,7 @@ static void ccdc_lsc_isr(struct isp_ccdc_device *ccdc, u32 events)
if (ccdc->lsc.state == LSC_STATE_STOPPING)
ccdc->lsc.state = LSC_STATE_STOPPED;
- if (__ccdc_handle_stopping(ccdc, CCDC_EVENT_LSC_DONE))
+ if (ccdc_handle_stopping(ccdc, CCDC_EVENT_LSC_DONE))
goto done;
if (ccdc->lsc.state != LSC_STATE_RECONFIG)
@@ -1486,12 +1533,59 @@ done:
spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags);
}
+/*
+ * Check whether the CCDC has captured all fields necessary to complete the
+ * buffer.
+ */
+static bool ccdc_has_all_fields(struct isp_ccdc_device *ccdc)
+{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
+ struct isp_device *isp = to_isp_device(ccdc);
+ enum v4l2_field of_field = ccdc->formats[CCDC_PAD_SOURCE_OF].field;
+ enum v4l2_field field;
+
+ /* When the input is progressive fields don't matter. */
+ if (of_field == V4L2_FIELD_NONE)
+ return true;
+
+ /* Read the current field identifier. */
+ field = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE)
+ & ISPCCDC_SYN_MODE_FLDSTAT
+ ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
+
+ /* When capturing fields in alternate order just store the current field
+ * identifier in the pipeline.
+ */
+ if (of_field == V4L2_FIELD_ALTERNATE) {
+ pipe->field = field;
+ return true;
+ }
+
+ /* The format is interlaced. Make sure we've captured both fields. */
+ ccdc->fields |= field == V4L2_FIELD_BOTTOM
+ ? CCDC_FIELD_BOTTOM : CCDC_FIELD_TOP;
+
+ if (ccdc->fields != CCDC_FIELD_BOTH)
+ return false;
+
+ /* Verify that the field just captured corresponds to the last field
+ * needed based on the desired field order.
+ */
+ if ((of_field == V4L2_FIELD_INTERLACED_TB && field == V4L2_FIELD_TOP) ||
+ (of_field == V4L2_FIELD_INTERLACED_BT && field == V4L2_FIELD_BOTTOM))
+ return false;
+
+ /* The buffer can be completed, reset the fields for the next buffer. */
+ ccdc->fields = 0;
+
+ return true;
+}
+
static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
{
struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
struct isp_device *isp = to_isp_device(ccdc);
struct isp_buffer *buffer;
- int restart = 0;
/* The CCDC generates VD0 interrupts even when disabled (the datasheet
* doesn't explicitly state if that's supposed to happen or not, so it
@@ -1500,30 +1594,31 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
* would thus not be enough, we need to handle the situation explicitly.
*/
if (list_empty(&ccdc->video_out.dmaqueue))
- goto done;
+ return 0;
/* We're in continuous mode, and memory writes were disabled due to a
* buffer underrun. Reenable them now that we have a buffer. The buffer
* address has been set in ccdc_video_queue.
*/
if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS && ccdc->underrun) {
- restart = 1;
ccdc->underrun = 0;
- goto done;
+ return 1;
}
+ /* Wait for the CCDC to become idle. */
if (ccdc_sbl_wait_idle(ccdc, 1000)) {
dev_info(isp->dev, "CCDC won't become idle!\n");
isp->crashed |= 1U << ccdc->subdev.entity.id;
omap3isp_pipeline_cancel_stream(pipe);
- goto done;
+ return 0;
}
+ if (!ccdc_has_all_fields(ccdc))
+ return 1;
+
buffer = omap3isp_video_buffer_next(&ccdc->video_out);
- if (buffer != NULL) {
+ if (buffer != NULL)
ccdc_set_outaddr(ccdc, buffer->dma);
- restart = 1;
- }
pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
@@ -1532,8 +1627,7 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc)
omap3isp_pipeline_set_stream(pipe,
ISP_PIPELINE_STREAM_SINGLESHOT);
-done:
- return restart;
+ return buffer != NULL;
}
/*
@@ -1547,11 +1641,38 @@ static void ccdc_vd0_isr(struct isp_ccdc_device *ccdc)
unsigned long flags;
int restart = 0;
+ /* In BT.656 mode the CCDC doesn't generate an HS/VS interrupt. We thus
+ * need to increment the frame counter here.
+ */
+ if (ccdc->bt656) {
+ struct isp_pipeline *pipe =
+ to_isp_pipeline(&ccdc->subdev.entity);
+
+ atomic_inc(&pipe->frame_number);
+ }
+
+ /* Emulate a VD1 interrupt for BT.656 mode, as we can't stop the CCDC in
+ * the VD1 interrupt handler in that mode without risking a CCDC stall
+ * if a short frame is received.
+ */
+ if (ccdc->bt656) {
+ spin_lock_irqsave(&ccdc->lock, flags);
+ if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS &&
+ ccdc->output & CCDC_OUTPUT_MEMORY) {
+ if (ccdc->lsc.state != LSC_STATE_STOPPED)
+ __ccdc_lsc_enable(ccdc, 0);
+ __ccdc_enable(ccdc, 0);
+ }
+ ccdc_handle_stopping(ccdc, CCDC_EVENT_VD1);
+ spin_unlock_irqrestore(&ccdc->lock, flags);
+ }
+
if (ccdc->output & CCDC_OUTPUT_MEMORY)
restart = ccdc_isr_buffer(ccdc);
spin_lock_irqsave(&ccdc->lock, flags);
- if (__ccdc_handle_stopping(ccdc, CCDC_EVENT_VD0)) {
+
+ if (ccdc_handle_stopping(ccdc, CCDC_EVENT_VD0)) {
spin_unlock_irqrestore(&ccdc->lock, flags);
return;
}
@@ -1572,6 +1693,18 @@ static void ccdc_vd1_isr(struct isp_ccdc_device *ccdc)
{
unsigned long flags;
+ /* In BT.656 mode the synchronization signals are generated by the CCDC
+ * from the embedded sync codes. The VD0 and VD1 interrupts are thus
+ * only triggered when the CCDC is enabled, unlike external sync mode
+ * where the line counter runs even when the CCDC is stopped. We can't
+ * disable the CCDC at VD1 time, as no VD0 interrupt would be generated
+ * for a short frame, which would result in the CCDC being stopped and
+ * no VD interrupt generated anymore. The CCDC is stopped from the VD0
+ * interrupt handler instead for BT.656.
+ */
+ if (ccdc->bt656)
+ return;
+
spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
/*
@@ -1601,7 +1734,7 @@ static void ccdc_vd1_isr(struct isp_ccdc_device *ccdc)
break;
}
- if (__ccdc_handle_stopping(ccdc, CCDC_EVENT_VD1))
+ if (ccdc_handle_stopping(ccdc, CCDC_EVENT_VD1))
goto done;
if (ccdc->lsc.request == NULL)
@@ -1656,6 +1789,8 @@ int omap3isp_ccdc_isr(struct isp_ccdc_device *ccdc, u32 events)
static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer)
{
struct isp_ccdc_device *ccdc = &video->isp->isp_ccdc;
+ unsigned long flags;
+ bool restart = false;
if (!(ccdc->output & CCDC_OUTPUT_MEMORY))
return -ENODEV;
@@ -1664,9 +1799,20 @@ static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer)
/* We now have a buffer queued on the output, restart the pipeline
* on the next CCDC interrupt if running in continuous mode (or when
- * starting the stream).
+ * starting the stream) in external sync mode, or immediately in BT.656
+ * sync mode as no CCDC interrupt is generated when the CCDC is stopped
+ * in that case.
*/
- ccdc->underrun = 1;
+ spin_lock_irqsave(&ccdc->lock, flags);
+ if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS && !ccdc->running &&
+ ccdc->bt656)
+ restart = true;
+ else
+ ccdc->underrun = 1;
+ spin_unlock_irqrestore(&ccdc->lock, flags);
+
+ if (restart)
+ ccdc_enable(ccdc);
return 0;
}
@@ -1753,11 +1899,6 @@ static int ccdc_set_stream(struct v4l2_subdev *sd, int enable)
ccdc_configure(ccdc);
- /* TODO: Don't configure the video port if all of its output
- * links are inactive.
- */
- ccdc_config_vp(ccdc);
- ccdc_enable_vp(ccdc, 1);
ccdc_print_status(ccdc);
}
@@ -1830,6 +1971,7 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
unsigned int width = fmt->width;
unsigned int height = fmt->height;
struct v4l2_rect *crop;
+ enum v4l2_field field;
unsigned int i;
switch (pad) {
@@ -1846,14 +1988,24 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
/* Clamp the input size. */
fmt->width = clamp_t(u32, width, 32, 4096);
fmt->height = clamp_t(u32, height, 32, 4096);
+
+ /* Default to progressive field order. */
+ if (fmt->field == V4L2_FIELD_ANY)
+ fmt->field = V4L2_FIELD_NONE;
+
break;
case CCDC_PAD_SOURCE_OF:
pixelcode = fmt->code;
+ field = fmt->field;
*fmt = *__ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, which);
- /* YUV formats are converted from 2X8 to 1X16 by the bridge and
- * can be byte-swapped.
+ /* In SYNC mode the bridge converts YUV formats from 2X8 to
+ * 1X16. In BT.656 no such conversion occurs. As we don't know
+ * at this point whether the source will use SYNC or BT.656 mode
+ * let's pretend the conversion always occurs. The CCDC will be
+ * configured to pack bytes in BT.656, hiding the inaccuracy.
+ * In all cases bytes can be swapped.
*/
if (fmt->code == V4L2_MBUS_FMT_YUYV8_2X8 ||
fmt->code == V4L2_MBUS_FMT_UYVY8_2X8) {
@@ -1874,6 +2026,17 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
crop = __ccdc_get_crop(ccdc, fh, which);
fmt->width = crop->width;
fmt->height = crop->height;
+
+ /* When input format is interlaced with alternating fields the
+ * CCDC can interleave the fields.
+ */
+ if (fmt->field == V4L2_FIELD_ALTERNATE &&
+ (field == V4L2_FIELD_INTERLACED_TB ||
+ field == V4L2_FIELD_INTERLACED_BT)) {
+ fmt->field = field;
+ fmt->height *= 2;
+ }
+
break;
case CCDC_PAD_SOURCE_VP:
@@ -1901,7 +2064,6 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
* stored on 2 bytes.
*/
fmt->colorspace = V4L2_COLORSPACE_SRGB;
- fmt->field = V4L2_FIELD_NONE;
}
/*
diff --git a/drivers/media/platform/omap3isp/ispccdc.h b/drivers/media/platform/omap3isp/ispccdc.h
index f65061602c71..3440a7097940 100644
--- a/drivers/media/platform/omap3isp/ispccdc.h
+++ b/drivers/media/platform/omap3isp/ispccdc.h
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef OMAP3_ISP_CCDC_H
@@ -103,6 +93,10 @@ struct ispccdc_lsc {
#define CCDC_PAD_SOURCE_VP 2
#define CCDC_PADS_NUM 3
+#define CCDC_FIELD_TOP 1
+#define CCDC_FIELD_BOTTOM 2
+#define CCDC_FIELD_BOTH 3
+
/*
* struct isp_ccdc_device - Structure for the CCDC module to store its own
* information
@@ -123,11 +117,14 @@ struct ispccdc_lsc {
* @lsc: Lens shading compensation configuration
* @update: Bitmask of controls to update during the next interrupt
* @shadow_update: Controls update in progress by userspace
+ * @bt656: Whether the input interface uses BT.656 synchronization
+ * @fields: The fields (CCDC_FIELD_*) stored in the current buffer
* @underrun: A buffer underrun occurred and a new buffer has been queued
* @state: Streaming state
* @lock: Serializes shadow_update with interrupt handler
* @wait: Wait queue used to stop the module
* @stopping: Stopping state
+ * @running: Is the CCDC hardware running
* @ioctl_lock: Serializes ioctl calls and LSC requests freeing
*/
struct isp_ccdc_device {
@@ -151,11 +148,15 @@ struct isp_ccdc_device {
unsigned int update;
unsigned int shadow_update;
+ bool bt656;
+ unsigned int fields;
+
unsigned int underrun:1;
enum isp_pipeline_stream_state state;
spinlock_t lock;
wait_queue_head_t wait;
unsigned int stopping;
+ bool running;
struct mutex ioctl_lock;
};
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index f3801db9095c..9cb49b3c04bd 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <linux/delay.h>
diff --git a/drivers/media/platform/omap3isp/ispccp2.h b/drivers/media/platform/omap3isp/ispccp2.h
index 76d65f4576ef..4662bffa79e3 100644
--- a/drivers/media/platform/omap3isp/ispccp2.h
+++ b/drivers/media/platform/omap3isp/ispccp2.h
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef OMAP3_ISP_CCP2_H
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index 5a2e47e58b84..6530b255f103 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <linux/delay.h>
#include <media/v4l2-common.h>
diff --git a/drivers/media/platform/omap3isp/ispcsi2.h b/drivers/media/platform/omap3isp/ispcsi2.h
index c57729b7e86e..453ed62fe394 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.h
+++ b/drivers/media/platform/omap3isp/ispcsi2.h
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef OMAP3_ISP_CSI2_H
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/omap3isp/ispcsiphy.c
index c09de32f986a..e033f2237a72 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.c
+++ b/drivers/media/platform/omap3isp/ispcsiphy.c
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <linux/delay.h>
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.h b/drivers/media/platform/omap3isp/ispcsiphy.h
index 14551fd77697..e17c88beab92 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.h
+++ b/drivers/media/platform/omap3isp/ispcsiphy.h
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef OMAP3_ISP_CSI_PHY_H
diff --git a/drivers/media/platform/omap3isp/isph3a.h b/drivers/media/platform/omap3isp/isph3a.h
index fb09fd4ca755..e5b28d0f3b0f 100644
--- a/drivers/media/platform/omap3isp/isph3a.h
+++ b/drivers/media/platform/omap3isp/isph3a.h
@@ -13,16 +13,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef OMAP3_ISP_H3A_H
diff --git a/drivers/media/platform/omap3isp/isph3a_aewb.c b/drivers/media/platform/omap3isp/isph3a_aewb.c
index d6811ce263eb..b208c5417146 100644
--- a/drivers/media/platform/omap3isp/isph3a_aewb.c
+++ b/drivers/media/platform/omap3isp/isph3a_aewb.c
@@ -13,16 +13,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <linux/slab.h>
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/omap3isp/isph3a_af.c
index 6fc960cd30f5..8a83e195f3e3 100644
--- a/drivers/media/platform/omap3isp/isph3a_af.c
+++ b/drivers/media/platform/omap3isp/isph3a_af.c
@@ -13,16 +13,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
/* Linux specific include files */
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/omap3isp/isphist.c
index 06a5f8164eaa..ce822c34c843 100644
--- a/drivers/media/platform/omap3isp/isphist.c
+++ b/drivers/media/platform/omap3isp/isphist.c
@@ -13,16 +13,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <linux/delay.h>
diff --git a/drivers/media/platform/omap3isp/isphist.h b/drivers/media/platform/omap3isp/isphist.h
index 0b2a38ec94c4..3b5415517dcd 100644
--- a/drivers/media/platform/omap3isp/isphist.h
+++ b/drivers/media/platform/omap3isp/isphist.h
@@ -13,16 +13,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef OMAP3_ISP_HIST_H
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index 720809b07e75..605f57ef0a49 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <linux/device.h>
diff --git a/drivers/media/platform/omap3isp/isppreview.h b/drivers/media/platform/omap3isp/isppreview.h
index f66923407f8c..16fdc03a3d43 100644
--- a/drivers/media/platform/omap3isp/isppreview.h
+++ b/drivers/media/platform/omap3isp/isppreview.h
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef OMAP3_ISP_PREVIEW_H
diff --git a/drivers/media/platform/omap3isp/ispreg.h b/drivers/media/platform/omap3isp/ispreg.h
index b7d90e6fb01d..b5ea8da0b904 100644
--- a/drivers/media/platform/omap3isp/ispreg.h
+++ b/drivers/media/platform/omap3isp/ispreg.h
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef OMAP3_ISP_REG_H
@@ -740,17 +730,13 @@
#define ISPCCDC_HSIZE_OFF_SHIFT 0
-#define ISPCCDC_SDOFST_FINV (1 << 14)
-#define ISPCCDC_SDOFST_FOFST_1L 0
-#define ISPCCDC_SDOFST_FOFST_4L (3 << 12)
+#define ISPCCDC_SDOFST_FIINV (1 << 14)
+#define ISPCCDC_SDOFST_FOFST_SHIFT 12
+#define ISPCCDC_SDOFST_FOFST_MASK (3 << 12)
#define ISPCCDC_SDOFST_LOFST3_SHIFT 0
#define ISPCCDC_SDOFST_LOFST2_SHIFT 3
#define ISPCCDC_SDOFST_LOFST1_SHIFT 6
#define ISPCCDC_SDOFST_LOFST0_SHIFT 9
-#define EVENEVEN 1
-#define ODDEVEN 2
-#define EVENODD 3
-#define ODDODD 4
#define ISPCCDC_CLAMP_OBGAIN_SHIFT 0
#define ISPCCDC_CLAMP_OBST_SHIFT 10
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index 6f077c2377db..05d1ace57451 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <linux/device.h>
@@ -239,7 +229,7 @@ static void resizer_set_phase(struct isp_res_device *res, u32 h_phase,
u32 v_phase)
{
struct isp_device *isp = to_isp_device(res);
- u32 rgval = 0;
+ u32 rgval;
rgval = isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT) &
~(ISPRSZ_CNT_HSTPH_MASK | ISPRSZ_CNT_VSTPH_MASK);
@@ -275,7 +265,7 @@ static void resizer_set_luma(struct isp_res_device *res,
struct resizer_luma_yenh *luma)
{
struct isp_device *isp = to_isp_device(res);
- u32 rgval = 0;
+ u32 rgval;
rgval = (luma->algo << ISPRSZ_YENH_ALGO_SHIFT)
& ISPRSZ_YENH_ALGO_MASK;
@@ -322,7 +312,7 @@ static void resizer_set_ratio(struct isp_res_device *res,
{
struct isp_device *isp = to_isp_device(res);
const u16 *h_filter, *v_filter;
- u32 rgval = 0;
+ u32 rgval;
rgval = isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT) &
~(ISPRSZ_CNT_HRSZ_MASK | ISPRSZ_CNT_VRSZ_MASK);
@@ -365,9 +355,8 @@ static void resizer_set_output_size(struct isp_res_device *res,
u32 width, u32 height)
{
struct isp_device *isp = to_isp_device(res);
- u32 rgval = 0;
+ u32 rgval;
- dev_dbg(isp->dev, "Output size[w/h]: %dx%d\n", width, height);
rgval = (width << ISPRSZ_OUT_SIZE_HORZ_SHIFT)
& ISPRSZ_OUT_SIZE_HORZ_MASK;
rgval |= (height << ISPRSZ_OUT_SIZE_VERT_SHIFT)
@@ -409,7 +398,7 @@ static void resizer_set_output_offset(struct isp_res_device *res, u32 offset)
static void resizer_set_start(struct isp_res_device *res, u32 left, u32 top)
{
struct isp_device *isp = to_isp_device(res);
- u32 rgval = 0;
+ u32 rgval;
rgval = (left << ISPRSZ_IN_START_HORZ_ST_SHIFT)
& ISPRSZ_IN_START_HORZ_ST_MASK;
@@ -429,9 +418,7 @@ static void resizer_set_input_size(struct isp_res_device *res,
u32 width, u32 height)
{
struct isp_device *isp = to_isp_device(res);
- u32 rgval = 0;
-
- dev_dbg(isp->dev, "Input size[w/h]: %dx%d\n", width, height);
+ u32 rgval;
rgval = (width << ISPRSZ_IN_SIZE_HORZ_SHIFT)
& ISPRSZ_IN_SIZE_HORZ_MASK;
@@ -1075,10 +1062,13 @@ static void resizer_isr_buffer(struct isp_res_device *res)
void omap3isp_resizer_isr(struct isp_res_device *res)
{
struct v4l2_mbus_framefmt *informat, *outformat;
+ unsigned long flags;
if (omap3isp_module_sync_is_stopping(&res->wait, &res->stopping))
return;
+ spin_lock_irqsave(&res->lock, flags);
+
if (res->applycrop) {
outformat = __resizer_get_format(res, NULL, RESZ_PAD_SOURCE,
V4L2_SUBDEV_FORMAT_ACTIVE);
@@ -1088,6 +1078,8 @@ void omap3isp_resizer_isr(struct isp_res_device *res)
res->applycrop = 0;
}
+ spin_unlock_irqrestore(&res->lock, flags);
+
resizer_isr_buffer(res);
}
@@ -1290,8 +1282,10 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
struct isp_device *isp = to_isp_device(res);
- struct v4l2_mbus_framefmt *format_sink, *format_source;
+ const struct v4l2_mbus_framefmt *format_sink;
+ struct v4l2_mbus_framefmt format_source;
struct resizer_ratio ratio;
+ unsigned long flags;
if (sel->target != V4L2_SEL_TGT_CROP ||
sel->pad != RESZ_PAD_SINK)
@@ -1299,16 +1293,14 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
format_sink = __resizer_get_format(res, fh, RESZ_PAD_SINK,
sel->which);
- format_source = __resizer_get_format(res, fh, RESZ_PAD_SOURCE,
- sel->which);
-
- dev_dbg(isp->dev, "%s: L=%d,T=%d,W=%d,H=%d,which=%d\n", __func__,
- sel->r.left, sel->r.top, sel->r.width, sel->r.height,
- sel->which);
+ format_source = *__resizer_get_format(res, fh, RESZ_PAD_SOURCE,
+ sel->which);
- dev_dbg(isp->dev, "%s: input=%dx%d, output=%dx%d\n", __func__,
+ dev_dbg(isp->dev, "%s(%s): req %ux%u -> (%d,%d)/%ux%u -> %ux%u\n",
+ __func__, sel->which == V4L2_SUBDEV_FORMAT_TRY ? "try" : "act",
format_sink->width, format_sink->height,
- format_source->width, format_source->height);
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height,
+ format_source.width, format_source.height);
/* Clamp the crop rectangle to the bounds, and then mangle it further to
* fulfill the TRM equations. Store the clamped but otherwise unmangled
@@ -1318,23 +1310,39 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
* smaller input crop rectangle every time the output size is set if we
* stored the mangled rectangle.
*/
- resizer_try_crop(format_sink, format_source, &sel->r);
+ resizer_try_crop(format_sink, &format_source, &sel->r);
*__resizer_get_crop(res, fh, sel->which) = sel->r;
- resizer_calc_ratios(res, &sel->r, format_source, &ratio);
+ resizer_calc_ratios(res, &sel->r, &format_source, &ratio);
- if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+ dev_dbg(isp->dev, "%s(%s): got %ux%u -> (%d,%d)/%ux%u -> %ux%u\n",
+ __func__, sel->which == V4L2_SUBDEV_FORMAT_TRY ? "try" : "act",
+ format_sink->width, format_sink->height,
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height,
+ format_source.width, format_source.height);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *__resizer_get_format(res, fh, RESZ_PAD_SOURCE, sel->which) =
+ format_source;
return 0;
+ }
+
+ /* Update the source format, resizing ratios and crop rectangle. If
+ * streaming is on the IRQ handler will reprogram the resizer after the
+ * current frame. We thus we need to protect against race conditions.
+ */
+ spin_lock_irqsave(&res->lock, flags);
+
+ *__resizer_get_format(res, fh, RESZ_PAD_SOURCE, sel->which) =
+ format_source;
res->ratio = ratio;
res->crop.active = sel->r;
- /*
- * set_selection can be called while streaming is on. In this case the
- * crop values will be set in the next IRQ.
- */
if (res->state != ISP_PIPELINE_STREAM_STOPPED)
res->applycrop = 1;
+ spin_unlock_irqrestore(&res->lock, flags);
+
return 0;
}
@@ -1781,6 +1789,8 @@ int omap3isp_resizer_init(struct isp_device *isp)
init_waitqueue_head(&res->wait);
atomic_set(&res->stopping, 0);
+ spin_lock_init(&res->lock);
+
return resizer_init_entities(res);
}
diff --git a/drivers/media/platform/omap3isp/ispresizer.h b/drivers/media/platform/omap3isp/ispresizer.h
index 9b01e9047c15..5414542912e2 100644
--- a/drivers/media/platform/omap3isp/ispresizer.h
+++ b/drivers/media/platform/omap3isp/ispresizer.h
@@ -12,21 +12,12 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef OMAP3_ISP_RESIZER_H
#define OMAP3_ISP_RESIZER_H
+#include <linux/spinlock.h>
#include <linux/types.h>
/*
@@ -96,6 +87,7 @@ enum resizer_input_entity {
/*
* struct isp_res_device - OMAP3 ISP resizer module
+ * @lock: Protects formats and crop rectangles between set_selection and IRQ
* @crop.request: Crop rectangle requested by the user
* @crop.active: Active crop rectangle (based on hardware requirements)
*/
@@ -116,6 +108,7 @@ struct isp_res_device {
enum isp_pipeline_stream_state state;
wait_queue_head_t wait;
atomic_t stopping;
+ spinlock_t lock;
struct {
struct v4l2_rect request;
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index e6cbc1eaf4ca..a94e8340508f 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -13,16 +13,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <linux/dma-mapping.h>
diff --git a/drivers/media/platform/omap3isp/ispstat.h b/drivers/media/platform/omap3isp/ispstat.h
index 58d6ac7cb664..b32b29677e2c 100644
--- a/drivers/media/platform/omap3isp/ispstat.h
+++ b/drivers/media/platform/omap3isp/ispstat.h
@@ -13,16 +13,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef OMAP3_ISP_STAT_H
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index e36bac26476c..bc38c88c7bd9 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -11,16 +11,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#include <asm/cacheflush.h>
@@ -319,10 +309,11 @@ isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
vfh->format.fmt.pix.height != format.fmt.pix.height ||
vfh->format.fmt.pix.width != format.fmt.pix.width ||
vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
- vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
+ vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage ||
+ vfh->format.fmt.pix.field != format.fmt.pix.field)
return -EINVAL;
- return ret;
+ return 0;
}
/* -----------------------------------------------------------------------------
@@ -491,6 +482,11 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
else
buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number);
+ if (pipe->field != V4L2_FIELD_NONE)
+ buf->vb.v4l2_buf.sequence /= 2;
+
+ buf->vb.v4l2_buf.field = pipe->field;
+
/* Report pipeline errors to userspace on the capture device side. */
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
state = VB2_BUF_STATE_ERROR;
@@ -641,7 +637,40 @@ isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
if (format->type != video->type)
return -EINVAL;
- mutex_lock(&video->mutex);
+ /* Replace unsupported field orders with sane defaults. */
+ switch (format->fmt.pix.field) {
+ case V4L2_FIELD_NONE:
+ /* Progressive is supported everywhere. */
+ break;
+ case V4L2_FIELD_ALTERNATE:
+ /* ALTERNATE is not supported on output nodes. */
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ format->fmt.pix.field = V4L2_FIELD_NONE;
+ break;
+ case V4L2_FIELD_INTERLACED:
+ /* The ISP has no concept of video standard, select the
+ * top-bottom order when the unqualified interlaced order is
+ * requested.
+ */
+ format->fmt.pix.field = V4L2_FIELD_INTERLACED_TB;
+ /* Fall-through */
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ /* Interlaced orders are only supported at the CCDC output. */
+ if (video != &video->isp->isp_ccdc.video_out)
+ format->fmt.pix.field = V4L2_FIELD_NONE;
+ break;
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ default:
+ /* All other field orders are currently unsupported, default to
+ * progressive.
+ */
+ format->fmt.pix.field = V4L2_FIELD_NONE;
+ break;
+ }
/* Fill the bytesperline and sizeimage fields by converting to media bus
* format and back to pixel format.
@@ -649,9 +678,10 @@ isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
+ mutex_lock(&video->mutex);
vfh->format = *format;
-
mutex_unlock(&video->mutex);
+
return 0;
}
@@ -1039,6 +1069,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
video->queue = &vfh->queue;
INIT_LIST_HEAD(&video->dmaqueue);
atomic_set(&pipe->frame_number, -1);
+ pipe->field = vfh->format.fmt.pix.field;
mutex_lock(&video->queue_lock);
ret = vb2_streamon(&vfh->queue, type);
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
index 7d2e82122ecd..0b7efedc3da9 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -11,16 +11,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
#ifndef OMAP3_ISP_VIDEO_H
@@ -88,6 +78,7 @@ enum isp_pipeline_state {
/*
* struct isp_pipeline - An ISP hardware pipeline
+ * @field: The field being processed by the pipeline
* @error: A hardware error occurred during capture
* @entities: Bitmask of entities in the pipeline (indexed by entity ID)
*/
@@ -101,6 +92,7 @@ struct isp_pipeline {
u32 entities;
unsigned long l3_ick;
unsigned int max_rate;
+ enum v4l2_field field;
atomic_t frame_number;
bool do_propagation; /* of frame number */
bool error;
diff --git a/drivers/media/platform/omap3isp/luma_enhance_table.h b/drivers/media/platform/omap3isp/luma_enhance_table.h
index 098b45e2280f..81c5b1566469 100644
--- a/drivers/media/platform/omap3isp/luma_enhance_table.h
+++ b/drivers/media/platform/omap3isp/luma_enhance_table.h
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
1047552, 1047552, 1047552, 1047552, 1047552, 1047552, 1047552, 1047552,
diff --git a/drivers/media/platform/omap3isp/noise_filter_table.h b/drivers/media/platform/omap3isp/noise_filter_table.h
index d50451a4a242..5073f9847937 100644
--- a/drivers/media/platform/omap3isp/noise_filter_table.h
+++ b/drivers/media/platform/omap3isp/noise_filter_table.h
@@ -12,16 +12,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
*/
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index f33641384e15..4f81b4c9d113 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -280,8 +280,8 @@ static int camif_prepare_addr(struct camif_vp *vp, struct vb2_buffer *vb,
return -EINVAL;
}
- pr_debug("DMA address: y: %#x cb: %#x cr: %#x\n",
- paddr->y, paddr->cb, paddr->cr);
+ pr_debug("DMA address: y: %pad cb: %pad cr: %pad\n",
+ &paddr->y, &paddr->cb, &paddr->cr);
return 0;
}
diff --git a/drivers/media/platform/s3c-camif/camif-regs.c b/drivers/media/platform/s3c-camif/camif-regs.c
index ebf5b184cce4..6e0c9988a191 100644
--- a/drivers/media/platform/s3c-camif/camif-regs.c
+++ b/drivers/media/platform/s3c-camif/camif-regs.c
@@ -214,8 +214,8 @@ void camif_hw_set_output_addr(struct camif_vp *vp,
paddr->cr);
}
- pr_debug("dst_buf[%d]: %#X, cb: %#X, cr: %#X\n",
- i, paddr->y, paddr->cb, paddr->cr);
+ pr_debug("dst_buf[%d]: %pad, cb: %pad, cr: %pad\n",
+ i, &paddr->y, &paddr->cb, &paddr->cr);
}
static void camif_hw_set_out_dma_size(struct camif_vp *vp)
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 357af1ebaeda..d79e214ce8ce 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -490,14 +490,13 @@ static void job_abort(void *prv)
{
struct g2d_ctx *ctx = prv;
struct g2d_dev *dev = ctx->dev;
- int ret;
if (dev->curr == NULL) /* No job currently running */
return;
- ret = wait_event_timeout(dev->irq_queue,
- dev->curr == NULL,
- msecs_to_jiffies(G2D_TIMEOUT));
+ wait_event_timeout(dev->irq_queue,
+ dev->curr == NULL,
+ msecs_to_jiffies(G2D_TIMEOUT));
}
static void device_run(void *prv)
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index e66acbc2a82d..e525a7c8d885 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -729,7 +729,7 @@ static inline void exynos4_jpeg_set_qtbl_chr(void __iomem *regs, int quality)
ARRAY_SIZE(qtbl_chrominance[quality]));
}
-void exynos4_jpeg_set_huff_tbl(void __iomem *base)
+static void exynos4_jpeg_set_huff_tbl(void __iomem *base)
{
exynos4_jpeg_set_tbl(base, hdctbl0, EXYNOS4_HUFF_TBL_HDCLL,
ARRAY_SIZE(hdctbl0));
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c
index d26e1f846553..e8c2cad93962 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos3250.c
@@ -233,6 +233,7 @@ void exynos3250_jpeg_set_x(void __iomem *regs, unsigned int x)
writel(reg, regs + EXYNOS3250_JPGX);
}
+#if 0 /* Currently unused */
unsigned int exynos3250_jpeg_get_y(void __iomem *regs)
{
return readl(regs + EXYNOS3250_JPGY);
@@ -242,6 +243,7 @@ unsigned int exynos3250_jpeg_get_x(void __iomem *regs)
{
return readl(regs + EXYNOS3250_JPGX);
}
+#endif
void exynos3250_jpeg_interrupts_enable(void __iomem *regs)
{
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
index da8d6a1a984f..ab6d6f43c96f 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
@@ -23,7 +23,7 @@ void exynos4_jpeg_sw_reset(void __iomem *base)
reg = readl(base + EXYNOS4_JPEG_CNTL_REG);
writel(reg & ~EXYNOS4_SOFT_RESET_HI, base + EXYNOS4_JPEG_CNTL_REG);
- ndelay(100000);
+ udelay(100);
writel(reg | EXYNOS4_SOFT_RESET_HI, base + EXYNOS4_JPEG_CNTL_REG);
}
@@ -151,9 +151,6 @@ void exynos4_jpeg_set_enc_out_fmt(void __iomem *base, unsigned int out_fmt)
void exynos4_jpeg_set_interrupt(void __iomem *base)
{
- unsigned int reg;
-
- reg = readl(base + EXYNOS4_INT_EN_REG) & ~EXYNOS4_INT_EN_MASK;
writel(EXYNOS4_INT_EN_ALL, base + EXYNOS4_INT_EN_REG);
}
@@ -185,7 +182,7 @@ void exynos4_jpeg_set_huf_table_enable(void __iomem *base, int value)
writel(reg | EXYNOS4_HUF_TBL_EN,
base + EXYNOS4_JPEG_CNTL_REG);
else
- writel(reg | ~EXYNOS4_HUF_TBL_EN,
+ writel(reg & ~EXYNOS4_HUF_TBL_EN,
base + EXYNOS4_JPEG_CNTL_REG);
}
@@ -196,9 +193,9 @@ void exynos4_jpeg_set_sys_int_enable(void __iomem *base, int value)
reg = readl(base + EXYNOS4_JPEG_CNTL_REG) & ~(EXYNOS4_SYS_INT_EN);
if (value == 1)
- writel(EXYNOS4_SYS_INT_EN, base + EXYNOS4_JPEG_CNTL_REG);
+ writel(reg | EXYNOS4_SYS_INT_EN, base + EXYNOS4_JPEG_CNTL_REG);
else
- writel(~EXYNOS4_SYS_INT_EN, base + EXYNOS4_JPEG_CNTL_REG);
+ writel(reg & ~EXYNOS4_SYS_INT_EN, base + EXYNOS4_JPEG_CNTL_REG);
}
void exynos4_jpeg_set_stream_buf_address(void __iomem *base,
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
index 52407d790726..e3b8e67e005f 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c
@@ -324,11 +324,9 @@ int s5p_jpeg_stream_stat_ok(void __iomem *regs)
void s5p_jpeg_clear_int(void __iomem *regs)
{
- unsigned long reg;
-
- reg = readl(regs + S5P_JPGINTST);
+ readl(regs + S5P_JPGINTST);
writel(S5P_INT_RELEASE, regs + S5P_JPGCOM);
- reg = readl(regs + S5P_JPGOPR);
+ readl(regs + S5P_JPGOPR);
}
unsigned int s5p_jpeg_compressed_size(void __iomem *regs)
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index d35b0418ab37..165bc86c5962 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -37,8 +37,8 @@
#define S5P_MFC_DEC_NAME "s5p-mfc-dec"
#define S5P_MFC_ENC_NAME "s5p-mfc-enc"
-int debug;
-module_param(debug, int, S_IRUGO | S_IWUSR);
+int mfc_debug_level;
+module_param_named(debug, mfc_debug_level, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug level - higher value produces more verbose messages");
/* Helper functions for interrupt processing */
@@ -150,10 +150,10 @@ static void s5p_mfc_watchdog_worker(struct work_struct *work)
if (!ctx)
continue;
ctx->state = MFCINST_ERROR;
- s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue,
- &ctx->vq_dst);
- s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
- &ctx->vq_src);
+ s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
+ &ctx->dst_queue, &ctx->vq_dst);
+ s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
+ &ctx->src_queue, &ctx->vq_src);
clear_work_bit(ctx);
wake_up_ctx(ctx, S5P_MFC_R2H_CMD_ERR_RET, 0);
}
@@ -264,7 +264,12 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
unsigned int frame_type;
dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
- frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_disp_frame_type, ctx);
+ if (IS_MFCV6_PLUS(dev))
+ frame_type = s5p_mfc_hw_call(dev->mfc_ops,
+ get_disp_frame_type, ctx);
+ else
+ frame_type = s5p_mfc_hw_call(dev->mfc_ops,
+ get_dec_frame_type, dev);
/* If frame is same as previous then skip and do not dequeue */
if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
@@ -327,12 +332,12 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
if (res_change == S5P_FIMV_RES_INCREASE ||
res_change == S5P_FIMV_RES_DECREASE) {
ctx->state = MFCINST_RES_CHANGE_INIT;
- s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
wake_up_ctx(ctx, reason, err);
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
BUG();
s5p_mfc_clock_off();
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
return;
}
if (ctx->dpb_flush_flag)
@@ -400,7 +405,7 @@ leave_handle_frame:
if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
|| ctx->dst_queue_cnt < ctx->pb_count)
clear_work_bit(ctx);
- s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
wake_up_ctx(ctx, reason, err);
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
BUG();
@@ -409,7 +414,7 @@ leave_handle_frame:
if (test_bit(0, &dev->enter_suspend))
wake_up_dev(dev, reason, err);
else
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
}
/* Error handling for interrupt */
@@ -435,10 +440,10 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
ctx->state = MFCINST_ERROR;
/* Mark all dst buffers as having an error */
spin_lock_irqsave(&dev->irqlock, flags);
- s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue,
+ s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
&ctx->dst_queue, &ctx->vq_dst);
/* Mark all src buffers as having an error */
- s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue,
+ s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
&ctx->src_queue, &ctx->vq_src);
spin_unlock_irqrestore(&dev->irqlock, flags);
wake_up_ctx(ctx, reason, err);
@@ -452,7 +457,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
}
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
BUG();
- s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
s5p_mfc_clock_off();
wake_up_dev(dev, reason, err);
return;
@@ -476,7 +481,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
ctx->img_height = s5p_mfc_hw_call(dev->mfc_ops, get_img_height,
dev);
- s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx);
+ s5p_mfc_hw_call_void(dev->mfc_ops, dec_calc_dpb_size, ctx);
ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
dev);
@@ -503,12 +508,12 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
ctx->head_processed = 1;
}
}
- s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
clear_work_bit(ctx);
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
BUG();
s5p_mfc_clock_off();
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
wake_up_ctx(ctx, reason, err);
}
@@ -523,7 +528,7 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
if (ctx == NULL)
return;
dev = ctx->dev;
- s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
ctx->int_type = reason;
ctx->int_err = err;
ctx->int_cond = 1;
@@ -550,7 +555,7 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
s5p_mfc_clock_off();
wake_up(&ctx->queue);
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
} else {
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
BUG();
@@ -591,7 +596,7 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
s5p_mfc_clock_off();
wake_up(&ctx->queue);
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
}
/* Interrupt processing */
@@ -628,12 +633,12 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
if (ctx->c_ops->post_frame_start) {
if (ctx->c_ops->post_frame_start(ctx))
mfc_err("post_frame_start() failed\n");
- s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
wake_up_ctx(ctx, reason, err);
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
BUG();
s5p_mfc_clock_off();
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
} else {
s5p_mfc_handle_frame(ctx, reason, err);
}
@@ -663,7 +668,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
case S5P_MFC_R2H_CMD_WAKEUP_RET:
if (ctx)
clear_work_bit(ctx);
- s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
wake_up_dev(dev, reason, err);
clear_bit(0, &dev->hw_lock);
clear_bit(0, &dev->enter_suspend);
@@ -685,12 +690,12 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
default:
mfc_debug(2, "Unknown int reason\n");
- s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
}
mfc_debug_leave();
return IRQ_HANDLED;
irq_cleanup_hw:
- s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
ctx->int_type = reason;
ctx->int_err = err;
ctx->int_cond = 1;
@@ -699,7 +704,7 @@ irq_cleanup_hw:
s5p_mfc_clock_off();
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
mfc_debug(2, "Exit via irq_cleanup_hw\n");
return IRQ_HANDLED;
}
@@ -1311,11 +1316,9 @@ static int s5p_mfc_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
- int pre_power;
if (!m_dev->alloc_ctx)
return 0;
- pre_power = atomic_read(&m_dev->pm.power);
atomic_set(&m_dev->pm.power, 1);
return 0;
}
@@ -1328,20 +1331,20 @@ static const struct dev_pm_ops s5p_mfc_pm_ops = {
NULL)
};
-struct s5p_mfc_buf_size_v5 mfc_buf_size_v5 = {
+static struct s5p_mfc_buf_size_v5 mfc_buf_size_v5 = {
.h264_ctx = MFC_H264_CTX_BUF_SIZE,
.non_h264_ctx = MFC_CTX_BUF_SIZE,
.dsc = DESC_BUF_SIZE,
.shm = SHARED_BUF_SIZE,
};
-struct s5p_mfc_buf_size buf_size_v5 = {
+static struct s5p_mfc_buf_size buf_size_v5 = {
.fw = MAX_FW_SIZE,
.cpb = MAX_CPB_SIZE,
.priv = &mfc_buf_size_v5,
};
-struct s5p_mfc_buf_align mfc_buf_align_v5 = {
+static struct s5p_mfc_buf_align mfc_buf_align_v5 = {
.base = MFC_BASE_ALIGN_ORDER,
};
@@ -1354,7 +1357,7 @@ static struct s5p_mfc_variant mfc_drvdata_v5 = {
.fw_name[0] = "s5p-mfc.fw",
};
-struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = {
+static struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = {
.dev_ctx = MFC_CTX_BUF_SIZE_V6,
.h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V6,
.other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V6,
@@ -1362,13 +1365,13 @@ struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = {
.other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V6,
};
-struct s5p_mfc_buf_size buf_size_v6 = {
+static struct s5p_mfc_buf_size buf_size_v6 = {
.fw = MAX_FW_SIZE_V6,
.cpb = MAX_CPB_SIZE_V6,
.priv = &mfc_buf_size_v6,
};
-struct s5p_mfc_buf_align mfc_buf_align_v6 = {
+static struct s5p_mfc_buf_align mfc_buf_align_v6 = {
.base = 0,
};
@@ -1386,7 +1389,7 @@ static struct s5p_mfc_variant mfc_drvdata_v6 = {
.fw_name[1] = "s5p-mfc-v6-v2.fw",
};
-struct s5p_mfc_buf_size_v6 mfc_buf_size_v7 = {
+static struct s5p_mfc_buf_size_v6 mfc_buf_size_v7 = {
.dev_ctx = MFC_CTX_BUF_SIZE_V7,
.h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V7,
.other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V7,
@@ -1394,13 +1397,13 @@ struct s5p_mfc_buf_size_v6 mfc_buf_size_v7 = {
.other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V7,
};
-struct s5p_mfc_buf_size buf_size_v7 = {
+static struct s5p_mfc_buf_size buf_size_v7 = {
.fw = MAX_FW_SIZE_V7,
.cpb = MAX_CPB_SIZE_V7,
.priv = &mfc_buf_size_v7,
};
-struct s5p_mfc_buf_align mfc_buf_align_v7 = {
+static struct s5p_mfc_buf_align mfc_buf_align_v7 = {
.base = 0,
};
@@ -1413,7 +1416,7 @@ static struct s5p_mfc_variant mfc_drvdata_v7 = {
.fw_name[0] = "s5p-mfc-v7.fw",
};
-struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
+static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
.dev_ctx = MFC_CTX_BUF_SIZE_V8,
.h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V8,
.other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V8,
@@ -1421,13 +1424,13 @@ struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
.other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V8,
};
-struct s5p_mfc_buf_size buf_size_v8 = {
+static struct s5p_mfc_buf_size buf_size_v8 = {
.fw = MAX_FW_SIZE_V8,
.cpb = MAX_CPB_SIZE_V8,
.priv = &mfc_buf_size_v8,
};
-struct s5p_mfc_buf_align mfc_buf_align_v8 = {
+static struct s5p_mfc_buf_align mfc_buf_align_v8 = {
.base = 0,
};
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
index 9a6efd6c1329..8c4739ca16d6 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
@@ -14,6 +14,7 @@
#include "s5p_mfc_cmd.h"
#include "s5p_mfc_common.h"
#include "s5p_mfc_debug.h"
+#include "s5p_mfc_cmd_v5.h"
/* This function is used to send a command to the MFC */
static int s5p_mfc_cmd_host2risc_v5(struct s5p_mfc_dev *dev, int cmd,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
index ec1a5947ed7d..f17609669b96 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
@@ -16,6 +16,7 @@
#include "s5p_mfc_debug.h"
#include "s5p_mfc_intr.h"
#include "s5p_mfc_opr.h"
+#include "s5p_mfc_cmd_v6.h"
static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd,
struct s5p_mfc_cmd_args *args)
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 01816ffb384b..3e41ca1293ed 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -698,6 +698,12 @@ struct mfc_control {
#define s5p_mfc_hw_call(f, op, args...) \
((f && f->op) ? f->op(args) : -ENODEV)
+#define s5p_mfc_hw_call_void(f, op, args...) \
+do { \
+ if (f && f->op) \
+ f->op(args); \
+} while (0)
+
#define fh_to_ctx(__fh) container_of(__fh, struct s5p_mfc_ctx, fh)
#define ctrl_to_ctx(__ctrl) \
container_of((__ctrl)->handler, struct s5p_mfc_ctx, ctrl_handler)
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index ca9f78922832..0c885a8a0e9f 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -21,6 +21,7 @@
#include "s5p_mfc_intr.h"
#include "s5p_mfc_opr.h"
#include "s5p_mfc_pm.h"
+#include "s5p_mfc_ctrl.h"
/* Allocate memory for firmware */
int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev)
@@ -188,12 +189,12 @@ static inline void s5p_mfc_init_memctrl(struct s5p_mfc_dev *dev)
{
if (IS_MFCV6_PLUS(dev)) {
mfc_write(dev, dev->bank1, S5P_FIMV_RISC_BASE_ADDRESS_V6);
- mfc_debug(2, "Base Address : %08x\n", dev->bank1);
+ mfc_debug(2, "Base Address : %pad\n", &dev->bank1);
} else {
mfc_write(dev, dev->bank1, S5P_FIMV_MC_DRAMBASE_ADR_A);
mfc_write(dev, dev->bank2, S5P_FIMV_MC_DRAMBASE_ADR_B);
- mfc_debug(2, "Bank1: %08x, Bank2: %08x\n",
- dev->bank1, dev->bank2);
+ mfc_debug(2, "Bank1: %pad, Bank2: %pad\n",
+ &dev->bank1, &dev->bank2);
}
}
@@ -257,9 +258,9 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
s5p_mfc_clock_off();
return ret;
}
- mfc_debug(2, "Ok, now will write a command to init the system\n");
+ mfc_debug(2, "Ok, now will wait for completion of hardware init\n");
if (s5p_mfc_wait_for_done_dev(dev, S5P_MFC_R2H_CMD_SYS_INIT_RET)) {
- mfc_err("Failed to load firmware\n");
+ mfc_err("Failed to init hardware\n");
s5p_mfc_reset(dev);
s5p_mfc_clock_off();
return -EIO;
@@ -293,7 +294,7 @@ void s5p_mfc_deinit_hw(struct s5p_mfc_dev *dev)
s5p_mfc_clock_on();
s5p_mfc_reset(dev);
- s5p_mfc_hw_call(dev->mfc_ops, release_dev_context_buffer, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, release_dev_context_buffer, dev);
s5p_mfc_clock_off();
}
@@ -396,7 +397,7 @@ int s5p_mfc_open_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx)
set_work_bit_irqsave(ctx);
s5p_mfc_clean_ctx_int_flags(ctx);
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
if (s5p_mfc_wait_for_done_ctx(ctx,
S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET, 0)) {
/* Error or timeout */
@@ -410,9 +411,9 @@ int s5p_mfc_open_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx)
err_free_desc_buf:
if (ctx->type == MFCINST_DECODER)
- s5p_mfc_hw_call(dev->mfc_ops, release_dec_desc_buffer, ctx);
+ s5p_mfc_hw_call_void(dev->mfc_ops, release_dec_desc_buffer, ctx);
err_free_inst_buf:
- s5p_mfc_hw_call(dev->mfc_ops, release_instance_buffer, ctx);
+ s5p_mfc_hw_call_void(dev->mfc_ops, release_instance_buffer, ctx);
err:
return ret;
}
@@ -422,17 +423,17 @@ void s5p_mfc_close_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx)
ctx->state = MFCINST_RETURN_INST;
set_work_bit_irqsave(ctx);
s5p_mfc_clean_ctx_int_flags(ctx);
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
/* Wait until instance is returned or timeout occurred */
if (s5p_mfc_wait_for_done_ctx(ctx,
S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0))
mfc_err("Err returning instance\n");
/* Free resources */
- s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx);
- s5p_mfc_hw_call(dev->mfc_ops, release_instance_buffer, ctx);
+ s5p_mfc_hw_call_void(dev->mfc_ops, release_codec_buffers, ctx);
+ s5p_mfc_hw_call_void(dev->mfc_ops, release_instance_buffer, ctx);
if (ctx->type == MFCINST_DECODER)
- s5p_mfc_hw_call(dev->mfc_ops, release_dec_desc_buffer, ctx);
+ s5p_mfc_hw_call_void(dev->mfc_ops, release_dec_desc_buffer, ctx);
ctx->inst_no = MFC_NO_INSTANCE_SET;
ctx->state = MFCINST_FREE;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
index 8e608f5aa0d7..5936923c631c 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
@@ -1,5 +1,5 @@
/*
- * drivers/media/platform/samsung/mfc5/s5p_mfc_debug.h
+ * drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
*
* Header file for Samsung MFC (Multi Function Codec - FIMV) driver
* This file contains debug macros
@@ -18,11 +18,11 @@
#define DEBUG
#ifdef DEBUG
-extern int debug;
+extern int mfc_debug_level;
#define mfc_debug(level, fmt, args...) \
do { \
- if (debug >= level) \
+ if (mfc_debug_level >= level) \
printk(KERN_DEBUG "%s:%d: " fmt, \
__func__, __LINE__, ##args); \
} while (0)
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 9103258b7df3..a98fe023deaf 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -283,17 +283,13 @@ static int vidioc_querycap(struct file *file, void *priv,
/* Enumerate format */
static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
- bool mplane, bool out)
+ bool out)
{
struct s5p_mfc_dev *dev = video_drvdata(file);
struct s5p_mfc_fmt *fmt;
int i, j = 0;
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
- if (mplane && formats[i].num_planes == 1)
- continue;
- else if (!mplane && formats[i].num_planes > 1)
- continue;
if (out && formats[i].type != MFC_FMT_DEC)
continue;
else if (!out && formats[i].type != MFC_FMT_RAW)
@@ -313,28 +309,16 @@ static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
return 0;
}
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
- struct v4l2_fmtdesc *f)
-{
- return vidioc_enum_fmt(file, f, false, false);
-}
-
static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(file, f, true, false);
-}
-
-static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- return vidioc_enum_fmt(file, f, false, true);
+ return vidioc_enum_fmt(file, f, false);
}
static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(file, f, true, true);
+ return vidioc_enum_fmt(file, f, true);
}
/* Get format */
@@ -543,7 +527,7 @@ static int reqbufs_capture(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx,
ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
if (ret)
goto out;
- s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx);
+ s5p_mfc_hw_call_void(dev->mfc_ops, release_codec_buffers, ctx);
ctx->dst_bufs_cnt = 0;
} else if (ctx->capture_state == QUEUE_FREE) {
WARN_ON(ctx->dst_bufs_cnt != 0);
@@ -571,7 +555,7 @@ static int reqbufs_capture(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx,
if (s5p_mfc_ctx_ready(ctx))
set_work_bit_irqsave(ctx);
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_INIT_BUFFERS_RET,
0);
} else {
@@ -823,8 +807,8 @@ static int vidioc_g_crop(struct file *file, void *priv,
return 0;
}
-int vidioc_decoder_cmd(struct file *file, void *priv,
- struct v4l2_decoder_cmd *cmd)
+static int vidioc_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *cmd)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
struct s5p_mfc_dev *dev = ctx->dev;
@@ -846,7 +830,7 @@ int vidioc_decoder_cmd(struct file *file, void *priv,
if (s5p_mfc_ctx_ready(ctx))
set_work_bit_irqsave(ctx);
spin_unlock_irqrestore(&dev->irqlock, flags);
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
} else {
mfc_err("EOS: marking last buffer of stream");
buf = list_entry(ctx->src_queue.prev,
@@ -881,9 +865,7 @@ static int vidioc_subscribe_event(struct v4l2_fh *fh,
/* v4l2_ioctl_ops */
static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
- .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
.vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
.vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt,
.vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt,
@@ -990,7 +972,7 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
if (ctx->capture_state == QUEUE_BUFS_MMAPED)
return 0;
- for (i = 0; i <= ctx->src_fmt->num_planes ; i++) {
+ for (i = 0; i < ctx->dst_fmt->num_planes; i++) {
if (IS_ERR_OR_NULL(ERR_PTR(
vb2_dma_contig_plane_dma_addr(vb, i)))) {
mfc_err("Plane mem not allocated\n");
@@ -1044,7 +1026,7 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
/* If context is ready then dev = work->data;schedule it to run */
if (s5p_mfc_ctx_ready(ctx))
set_work_bit_irqsave(ctx);
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
return 0;
}
@@ -1065,8 +1047,8 @@ static void s5p_mfc_stop_streaming(struct vb2_queue *q)
}
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
spin_lock_irqsave(&dev->irqlock, flags);
- s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue,
- &ctx->vq_dst);
+ s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
+ &ctx->dst_queue, &ctx->vq_dst);
INIT_LIST_HEAD(&ctx->dst_queue);
ctx->dst_queue_cnt = 0;
ctx->dpb_flush_flag = 1;
@@ -1076,7 +1058,7 @@ static void s5p_mfc_stop_streaming(struct vb2_queue *q)
ctx->state = MFCINST_FLUSH;
set_work_bit_irqsave(ctx);
s5p_mfc_clean_ctx_int_flags(ctx);
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
if (s5p_mfc_wait_for_done_ctx(ctx,
S5P_MFC_R2H_CMD_DPB_FLUSH_RET, 0))
mfc_err("Err flushing buffers\n");
@@ -1084,8 +1066,8 @@ static void s5p_mfc_stop_streaming(struct vb2_queue *q)
}
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
spin_lock_irqsave(&dev->irqlock, flags);
- s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
- &ctx->vq_src);
+ s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
+ &ctx->src_queue, &ctx->vq_src);
INIT_LIST_HEAD(&ctx->src_queue);
ctx->src_queue_cnt = 0;
spin_unlock_irqrestore(&dev->irqlock, flags);
@@ -1124,7 +1106,7 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
}
if (s5p_mfc_ctx_ready(ctx))
set_work_bit_irqsave(ctx);
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
}
static struct vb2_ops s5p_mfc_dec_qops = {
@@ -1220,7 +1202,7 @@ void s5p_mfc_dec_init(struct s5p_mfc_ctx *ctx)
else
f.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12MT;
ctx->dst_fmt = find_format(&f, MFC_FMT_RAW);
- mfc_debug(2, "Default src_fmt is %x, dest_fmt is %x\n",
- (unsigned int)ctx->src_fmt, (unsigned int)ctx->dst_fmt);
+ mfc_debug(2, "Default src_fmt is %p, dest_fmt is %p\n",
+ ctx->src_fmt, ctx->dst_fmt);
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index d26b2484ca10..a904a1c7bb21 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -739,14 +739,11 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
static void cleanup_ref_queue(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_buf *mb_entry;
- unsigned long mb_y_addr, mb_c_addr;
/* move buffers in ref queue to src queue */
while (!list_empty(&ctx->ref_queue)) {
mb_entry = list_entry((&ctx->ref_queue)->next,
struct s5p_mfc_buf, list);
- mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
- mb_c_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 1);
list_del(&mb_entry->list);
ctx->ref_queue_cnt--;
list_add_tail(&mb_entry->list, &ctx->src_queue);
@@ -770,7 +767,7 @@ static int enc_pre_seq_start(struct s5p_mfc_ctx *ctx)
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
dst_size = vb2_plane_size(dst_mb->b, 0);
- s5p_mfc_hw_call(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr,
+ s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr,
dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
return 0;
@@ -803,7 +800,7 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
ctx->state = MFCINST_RUNNING;
if (s5p_mfc_ctx_ready(ctx))
set_work_bit_irqsave(ctx);
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
} else {
enc_pb_count = s5p_mfc_hw_call(dev->mfc_ops,
get_enc_dpb_count, dev);
@@ -828,15 +825,15 @@ static int enc_pre_frame_start(struct s5p_mfc_ctx *ctx)
src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
- s5p_mfc_hw_call(dev->mfc_ops, set_enc_frame_buffer, ctx, src_y_addr,
- src_c_addr);
+ s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_frame_buffer, ctx,
+ src_y_addr, src_c_addr);
spin_unlock_irqrestore(&dev->irqlock, flags);
spin_lock_irqsave(&dev->irqlock, flags);
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
dst_size = vb2_plane_size(dst_mb->b, 0);
- s5p_mfc_hw_call(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr,
+ s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr,
dst_size);
spin_unlock_irqrestore(&dev->irqlock, flags);
@@ -861,7 +858,7 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT));
spin_lock_irqsave(&dev->irqlock, flags);
if (slice_type >= 0) {
- s5p_mfc_hw_call(dev->mfc_ops, get_enc_frame_buffer, ctx,
+ s5p_mfc_hw_call_void(dev->mfc_ops, get_enc_frame_buffer, ctx,
&enc_y_addr, &enc_c_addr);
list_for_each_entry(mb_entry, &ctx->src_queue, list) {
mb_y_addr = vb2_dma_contig_plane_dma_addr(mb_entry->b, 0);
@@ -954,17 +951,13 @@ static int vidioc_querycap(struct file *file, void *priv,
}
static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
- bool mplane, bool out)
+ bool out)
{
struct s5p_mfc_dev *dev = video_drvdata(file);
struct s5p_mfc_fmt *fmt;
int i, j = 0;
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
- if (mplane && formats[i].num_planes == 1)
- continue;
- else if (!mplane && formats[i].num_planes > 1)
- continue;
if (out && formats[i].type != MFC_FMT_RAW)
continue;
else if (!out && formats[i].type != MFC_FMT_ENC)
@@ -984,28 +977,16 @@ static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
return -EINVAL;
}
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
- struct v4l2_fmtdesc *f)
-{
- return vidioc_enum_fmt(file, f, false, false);
-}
-
static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(file, f, true, false);
-}
-
-static int vidioc_enum_fmt_vid_out(struct file *file, void *prov,
- struct v4l2_fmtdesc *f)
-{
- return vidioc_enum_fmt(file, f, false, true);
+ return vidioc_enum_fmt(file, f, false);
}
static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *prov,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(file, f, true, true);
+ return vidioc_enum_fmt(file, f, true);
}
static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
@@ -1127,7 +1108,7 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
pix_fmt_mp->width, pix_fmt_mp->height,
ctx->img_width, ctx->img_height);
- s5p_mfc_hw_call(dev->mfc_ops, enc_calc_src_size, ctx);
+ s5p_mfc_hw_call_void(dev->mfc_ops, enc_calc_src_size, ctx);
pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
@@ -1681,8 +1662,8 @@ static int vidioc_g_parm(struct file *file, void *priv,
return 0;
}
-int vidioc_encoder_cmd(struct file *file, void *priv,
- struct v4l2_encoder_cmd *cmd)
+static int vidioc_encoder_cmd(struct file *file, void *priv,
+ struct v4l2_encoder_cmd *cmd)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
struct s5p_mfc_dev *dev = ctx->dev;
@@ -1704,7 +1685,7 @@ int vidioc_encoder_cmd(struct file *file, void *priv,
if (s5p_mfc_ctx_ready(ctx))
set_work_bit_irqsave(ctx);
spin_unlock_irqrestore(&dev->irqlock, flags);
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
} else {
mfc_debug(2, "EOS: marking last buffer of stream\n");
buf = list_entry(ctx->src_queue.prev,
@@ -1736,9 +1717,7 @@ static int vidioc_subscribe_event(struct v4l2_fh *fh,
static const struct v4l2_ioctl_ops s5p_mfc_enc_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
- .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
.vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
.vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt,
.vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt,
@@ -1771,13 +1750,13 @@ static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
return -EINVAL;
}
for (i = 0; i < fmt->num_planes; i++) {
- if (!vb2_dma_contig_plane_dma_addr(vb, i)) {
+ dma_addr_t dma = vb2_dma_contig_plane_dma_addr(vb, i);
+ if (!dma) {
mfc_err("failed to get plane cookie\n");
return -EINVAL;
}
- mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx\n",
- vb->v4l2_buf.index, i,
- vb2_dma_contig_plane_dma_addr(vb, i));
+ mfc_debug(2, "index: %d, plane[%d] cookie: %pad\n",
+ vb->v4l2_buf.index, i, &dma);
}
return 0;
}
@@ -1897,7 +1876,7 @@ static int s5p_mfc_buf_prepare(struct vb2_buffer *vb)
ret = check_vb_with_fmt(ctx->dst_fmt, vb);
if (ret < 0)
return ret;
- mfc_debug(2, "plane size: %ld, dst size: %d\n",
+ mfc_debug(2, "plane size: %ld, dst size: %zu\n",
vb2_plane_size(vb, 0), ctx->enc_dst_buf_size);
if (vb2_plane_size(vb, 0) < ctx->enc_dst_buf_size) {
mfc_err("plane size is too small for capture\n");
@@ -1948,7 +1927,7 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
/* If context is ready then dev = work->data;schedule it to run */
if (s5p_mfc_ctx_ready(ctx))
set_work_bit_irqsave(ctx);
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
return 0;
}
@@ -1969,14 +1948,14 @@ static void s5p_mfc_stop_streaming(struct vb2_queue *q)
ctx->state = MFCINST_FINISHED;
spin_lock_irqsave(&dev->irqlock, flags);
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue,
- &ctx->vq_dst);
+ s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
+ &ctx->dst_queue, &ctx->vq_dst);
INIT_LIST_HEAD(&ctx->dst_queue);
ctx->dst_queue_cnt = 0;
}
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
cleanup_ref_queue(ctx);
- s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
+ s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
&ctx->vq_src);
INIT_LIST_HEAD(&ctx->src_queue);
ctx->src_queue_cnt = 0;
@@ -2017,7 +1996,7 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
}
if (s5p_mfc_ctx_ready(ctx))
set_work_bit_irqsave(ctx);
- s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+ s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
}
static struct vb2_ops s5p_mfc_enc_qops = {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
index c9a227428e6a..00a1d8b2a8c2 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
@@ -41,7 +41,7 @@ int s5p_mfc_alloc_priv_buf(struct device *dev,
struct s5p_mfc_priv_buf *b)
{
- mfc_debug(3, "Allocating priv: %d\n", b->size);
+ mfc_debug(3, "Allocating priv: %zu\n", b->size);
b->virt = dma_alloc_coherent(dev, b->size, &b->dma, GFP_KERNEL);
@@ -50,7 +50,7 @@ int s5p_mfc_alloc_priv_buf(struct device *dev,
return -ENOMEM;
}
- mfc_debug(3, "Allocated addr %p %08x\n", b->virt, b->dma);
+ mfc_debug(3, "Allocated addr %p %pad\n", b->virt, &b->dma);
return 0;
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
index 7a7ad32ee608..de2b8c69daa5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
@@ -20,254 +20,254 @@
struct s5p_mfc_regs {
/* codec common registers */
- void *risc_on;
- void *risc2host_int;
- void *host2risc_int;
- void *risc_base_address;
- void *mfc_reset;
- void *host2risc_command;
- void *risc2host_command;
- void *mfc_bus_reset_ctrl;
- void *firmware_version;
- void *instance_id;
- void *codec_type;
- void *context_mem_addr;
- void *context_mem_size;
- void *pixel_format;
- void *metadata_enable;
- void *mfc_version;
- void *dbg_info_enable;
- void *dbg_buffer_addr;
- void *dbg_buffer_size;
- void *hed_control;
- void *mfc_timeout_value;
- void *hed_shared_mem_addr;
- void *dis_shared_mem_addr;/* only v7 */
- void *ret_instance_id;
- void *error_code;
- void *dbg_buffer_output_size;
- void *metadata_status;
- void *metadata_addr_mb_info;
- void *metadata_size_mb_info;
- void *dbg_info_stage_counter;
+ volatile void __iomem *risc_on;
+ volatile void __iomem *risc2host_int;
+ volatile void __iomem *host2risc_int;
+ volatile void __iomem *risc_base_address;
+ volatile void __iomem *mfc_reset;
+ volatile void __iomem *host2risc_command;
+ volatile void __iomem *risc2host_command;
+ volatile void __iomem *mfc_bus_reset_ctrl;
+ volatile void __iomem *firmware_version;
+ volatile void __iomem *instance_id;
+ volatile void __iomem *codec_type;
+ volatile void __iomem *context_mem_addr;
+ volatile void __iomem *context_mem_size;
+ volatile void __iomem *pixel_format;
+ volatile void __iomem *metadata_enable;
+ volatile void __iomem *mfc_version;
+ volatile void __iomem *dbg_info_enable;
+ volatile void __iomem *dbg_buffer_addr;
+ volatile void __iomem *dbg_buffer_size;
+ volatile void __iomem *hed_control;
+ volatile void __iomem *mfc_timeout_value;
+ volatile void __iomem *hed_shared_mem_addr;
+ volatile void __iomem *dis_shared_mem_addr;/* only v7 */
+ volatile void __iomem *ret_instance_id;
+ volatile void __iomem *error_code;
+ volatile void __iomem *dbg_buffer_output_size;
+ volatile void __iomem *metadata_status;
+ volatile void __iomem *metadata_addr_mb_info;
+ volatile void __iomem *metadata_size_mb_info;
+ volatile void __iomem *dbg_info_stage_counter;
/* decoder registers */
- void *d_crc_ctrl;
- void *d_dec_options;
- void *d_display_delay;
- void *d_set_frame_width;
- void *d_set_frame_height;
- void *d_sei_enable;
- void *d_min_num_dpb;
- void *d_min_first_plane_dpb_size;
- void *d_min_second_plane_dpb_size;
- void *d_min_third_plane_dpb_size;/* only v8 */
- void *d_min_num_mv;
- void *d_mvc_num_views;
- void *d_min_num_dis;/* only v7 */
- void *d_min_first_dis_size;/* only v7 */
- void *d_min_second_dis_size;/* only v7 */
- void *d_min_third_dis_size;/* only v7 */
- void *d_post_filter_luma_dpb0;/* v7 and v8 */
- void *d_post_filter_luma_dpb1;/* v7 and v8 */
- void *d_post_filter_luma_dpb2;/* only v7 */
- void *d_post_filter_chroma_dpb0;/* v7 and v8 */
- void *d_post_filter_chroma_dpb1;/* v7 and v8 */
- void *d_post_filter_chroma_dpb2;/* only v7 */
- void *d_num_dpb;
- void *d_num_mv;
- void *d_init_buffer_options;
- void *d_first_plane_dpb_stride_size;/* only v8 */
- void *d_second_plane_dpb_stride_size;/* only v8 */
- void *d_third_plane_dpb_stride_size;/* only v8 */
- void *d_first_plane_dpb_size;
- void *d_second_plane_dpb_size;
- void *d_third_plane_dpb_size;/* only v8 */
- void *d_mv_buffer_size;
- void *d_first_plane_dpb;
- void *d_second_plane_dpb;
- void *d_third_plane_dpb;
- void *d_mv_buffer;
- void *d_scratch_buffer_addr;
- void *d_scratch_buffer_size;
- void *d_metadata_buffer_addr;
- void *d_metadata_buffer_size;
- void *d_nal_start_options;/* v7 and v8 */
- void *d_cpb_buffer_addr;
- void *d_cpb_buffer_size;
- void *d_available_dpb_flag_upper;
- void *d_available_dpb_flag_lower;
- void *d_cpb_buffer_offset;
- void *d_slice_if_enable;
- void *d_picture_tag;
- void *d_stream_data_size;
- void *d_dynamic_dpb_flag_upper;/* v7 and v8 */
- void *d_dynamic_dpb_flag_lower;/* v7 and v8 */
- void *d_display_frame_width;
- void *d_display_frame_height;
- void *d_display_status;
- void *d_display_first_plane_addr;
- void *d_display_second_plane_addr;
- void *d_display_third_plane_addr;/* only v8 */
- void *d_display_frame_type;
- void *d_display_crop_info1;
- void *d_display_crop_info2;
- void *d_display_picture_profile;
- void *d_display_luma_crc;/* v7 and v8 */
- void *d_display_chroma0_crc;/* v7 and v8 */
- void *d_display_chroma1_crc;/* only v8 */
- void *d_display_luma_crc_top;/* only v6 */
- void *d_display_chroma_crc_top;/* only v6 */
- void *d_display_luma_crc_bot;/* only v6 */
- void *d_display_chroma_crc_bot;/* only v6 */
- void *d_display_aspect_ratio;
- void *d_display_extended_ar;
- void *d_decoded_frame_width;
- void *d_decoded_frame_height;
- void *d_decoded_status;
- void *d_decoded_first_plane_addr;
- void *d_decoded_second_plane_addr;
- void *d_decoded_third_plane_addr;/* only v8 */
- void *d_decoded_frame_type;
- void *d_decoded_crop_info1;
- void *d_decoded_crop_info2;
- void *d_decoded_picture_profile;
- void *d_decoded_nal_size;
- void *d_decoded_luma_crc;
- void *d_decoded_chroma0_crc;
- void *d_decoded_chroma1_crc;/* only v8 */
- void *d_ret_picture_tag_top;
- void *d_ret_picture_tag_bot;
- void *d_ret_picture_time_top;
- void *d_ret_picture_time_bot;
- void *d_chroma_format;
- void *d_vc1_info;/* v7 and v8 */
- void *d_mpeg4_info;
- void *d_h264_info;
- void *d_metadata_addr_concealed_mb;
- void *d_metadata_size_concealed_mb;
- void *d_metadata_addr_vc1_param;
- void *d_metadata_size_vc1_param;
- void *d_metadata_addr_sei_nal;
- void *d_metadata_size_sei_nal;
- void *d_metadata_addr_vui;
- void *d_metadata_size_vui;
- void *d_metadata_addr_mvcvui;/* v7 and v8 */
- void *d_metadata_size_mvcvui;/* v7 and v8 */
- void *d_mvc_view_id;
- void *d_frame_pack_sei_avail;
- void *d_frame_pack_arrgment_id;
- void *d_frame_pack_sei_info;
- void *d_frame_pack_grid_pos;
- void *d_display_recovery_sei_info;/* v7 and v8 */
- void *d_decoded_recovery_sei_info;/* v7 and v8 */
- void *d_display_first_addr;/* only v7 */
- void *d_display_second_addr;/* only v7 */
- void *d_display_third_addr;/* only v7 */
- void *d_decoded_first_addr;/* only v7 */
- void *d_decoded_second_addr;/* only v7 */
- void *d_decoded_third_addr;/* only v7 */
- void *d_used_dpb_flag_upper;/* v7 and v8 */
- void *d_used_dpb_flag_lower;/* v7 and v8 */
+ volatile void __iomem *d_crc_ctrl;
+ volatile void __iomem *d_dec_options;
+ volatile void __iomem *d_display_delay;
+ volatile void __iomem *d_set_frame_width;
+ volatile void __iomem *d_set_frame_height;
+ volatile void __iomem *d_sei_enable;
+ volatile void __iomem *d_min_num_dpb;
+ volatile void __iomem *d_min_first_plane_dpb_size;
+ volatile void __iomem *d_min_second_plane_dpb_size;
+ volatile void __iomem *d_min_third_plane_dpb_size;/* only v8 */
+ volatile void __iomem *d_min_num_mv;
+ volatile void __iomem *d_mvc_num_views;
+ volatile void __iomem *d_min_num_dis;/* only v7 */
+ volatile void __iomem *d_min_first_dis_size;/* only v7 */
+ volatile void __iomem *d_min_second_dis_size;/* only v7 */
+ volatile void __iomem *d_min_third_dis_size;/* only v7 */
+ volatile void __iomem *d_post_filter_luma_dpb0;/* v7 and v8 */
+ volatile void __iomem *d_post_filter_luma_dpb1;/* v7 and v8 */
+ volatile void __iomem *d_post_filter_luma_dpb2;/* only v7 */
+ volatile void __iomem *d_post_filter_chroma_dpb0;/* v7 and v8 */
+ volatile void __iomem *d_post_filter_chroma_dpb1;/* v7 and v8 */
+ volatile void __iomem *d_post_filter_chroma_dpb2;/* only v7 */
+ volatile void __iomem *d_num_dpb;
+ volatile void __iomem *d_num_mv;
+ volatile void __iomem *d_init_buffer_options;
+ volatile void __iomem *d_first_plane_dpb_stride_size;/* only v8 */
+ volatile void __iomem *d_second_plane_dpb_stride_size;/* only v8 */
+ volatile void __iomem *d_third_plane_dpb_stride_size;/* only v8 */
+ volatile void __iomem *d_first_plane_dpb_size;
+ volatile void __iomem *d_second_plane_dpb_size;
+ volatile void __iomem *d_third_plane_dpb_size;/* only v8 */
+ volatile void __iomem *d_mv_buffer_size;
+ volatile void __iomem *d_first_plane_dpb;
+ volatile void __iomem *d_second_plane_dpb;
+ volatile void __iomem *d_third_plane_dpb;
+ volatile void __iomem *d_mv_buffer;
+ volatile void __iomem *d_scratch_buffer_addr;
+ volatile void __iomem *d_scratch_buffer_size;
+ volatile void __iomem *d_metadata_buffer_addr;
+ volatile void __iomem *d_metadata_buffer_size;
+ volatile void __iomem *d_nal_start_options;/* v7 and v8 */
+ volatile void __iomem *d_cpb_buffer_addr;
+ volatile void __iomem *d_cpb_buffer_size;
+ volatile void __iomem *d_available_dpb_flag_upper;
+ volatile void __iomem *d_available_dpb_flag_lower;
+ volatile void __iomem *d_cpb_buffer_offset;
+ volatile void __iomem *d_slice_if_enable;
+ volatile void __iomem *d_picture_tag;
+ volatile void __iomem *d_stream_data_size;
+ volatile void __iomem *d_dynamic_dpb_flag_upper;/* v7 and v8 */
+ volatile void __iomem *d_dynamic_dpb_flag_lower;/* v7 and v8 */
+ volatile void __iomem *d_display_frame_width;
+ volatile void __iomem *d_display_frame_height;
+ volatile void __iomem *d_display_status;
+ volatile void __iomem *d_display_first_plane_addr;
+ volatile void __iomem *d_display_second_plane_addr;
+ volatile void __iomem *d_display_third_plane_addr;/* only v8 */
+ volatile void __iomem *d_display_frame_type;
+ volatile void __iomem *d_display_crop_info1;
+ volatile void __iomem *d_display_crop_info2;
+ volatile void __iomem *d_display_picture_profile;
+ volatile void __iomem *d_display_luma_crc;/* v7 and v8 */
+ volatile void __iomem *d_display_chroma0_crc;/* v7 and v8 */
+ volatile void __iomem *d_display_chroma1_crc;/* only v8 */
+ volatile void __iomem *d_display_luma_crc_top;/* only v6 */
+ volatile void __iomem *d_display_chroma_crc_top;/* only v6 */
+ volatile void __iomem *d_display_luma_crc_bot;/* only v6 */
+ volatile void __iomem *d_display_chroma_crc_bot;/* only v6 */
+ volatile void __iomem *d_display_aspect_ratio;
+ volatile void __iomem *d_display_extended_ar;
+ volatile void __iomem *d_decoded_frame_width;
+ volatile void __iomem *d_decoded_frame_height;
+ volatile void __iomem *d_decoded_status;
+ volatile void __iomem *d_decoded_first_plane_addr;
+ volatile void __iomem *d_decoded_second_plane_addr;
+ volatile void __iomem *d_decoded_third_plane_addr;/* only v8 */
+ volatile void __iomem *d_decoded_frame_type;
+ volatile void __iomem *d_decoded_crop_info1;
+ volatile void __iomem *d_decoded_crop_info2;
+ volatile void __iomem *d_decoded_picture_profile;
+ volatile void __iomem *d_decoded_nal_size;
+ volatile void __iomem *d_decoded_luma_crc;
+ volatile void __iomem *d_decoded_chroma0_crc;
+ volatile void __iomem *d_decoded_chroma1_crc;/* only v8 */
+ volatile void __iomem *d_ret_picture_tag_top;
+ volatile void __iomem *d_ret_picture_tag_bot;
+ volatile void __iomem *d_ret_picture_time_top;
+ volatile void __iomem *d_ret_picture_time_bot;
+ volatile void __iomem *d_chroma_format;
+ volatile void __iomem *d_vc1_info;/* v7 and v8 */
+ volatile void __iomem *d_mpeg4_info;
+ volatile void __iomem *d_h264_info;
+ volatile void __iomem *d_metadata_addr_concealed_mb;
+ volatile void __iomem *d_metadata_size_concealed_mb;
+ volatile void __iomem *d_metadata_addr_vc1_param;
+ volatile void __iomem *d_metadata_size_vc1_param;
+ volatile void __iomem *d_metadata_addr_sei_nal;
+ volatile void __iomem *d_metadata_size_sei_nal;
+ volatile void __iomem *d_metadata_addr_vui;
+ volatile void __iomem *d_metadata_size_vui;
+ volatile void __iomem *d_metadata_addr_mvcvui;/* v7 and v8 */
+ volatile void __iomem *d_metadata_size_mvcvui;/* v7 and v8 */
+ volatile void __iomem *d_mvc_view_id;
+ volatile void __iomem *d_frame_pack_sei_avail;
+ volatile void __iomem *d_frame_pack_arrgment_id;
+ volatile void __iomem *d_frame_pack_sei_info;
+ volatile void __iomem *d_frame_pack_grid_pos;
+ volatile void __iomem *d_display_recovery_sei_info;/* v7 and v8 */
+ volatile void __iomem *d_decoded_recovery_sei_info;/* v7 and v8 */
+ volatile void __iomem *d_display_first_addr;/* only v7 */
+ volatile void __iomem *d_display_second_addr;/* only v7 */
+ volatile void __iomem *d_display_third_addr;/* only v7 */
+ volatile void __iomem *d_decoded_first_addr;/* only v7 */
+ volatile void __iomem *d_decoded_second_addr;/* only v7 */
+ volatile void __iomem *d_decoded_third_addr;/* only v7 */
+ volatile void __iomem *d_used_dpb_flag_upper;/* v7 and v8 */
+ volatile void __iomem *d_used_dpb_flag_lower;/* v7 and v8 */
/* encoder registers */
- void *e_frame_width;
- void *e_frame_height;
- void *e_cropped_frame_width;
- void *e_cropped_frame_height;
- void *e_frame_crop_offset;
- void *e_enc_options;
- void *e_picture_profile;
- void *e_vbv_buffer_size;
- void *e_vbv_init_delay;
- void *e_fixed_picture_qp;
- void *e_rc_config;
- void *e_rc_qp_bound;
- void *e_rc_qp_bound_pb;/* v7 and v8 */
- void *e_rc_mode;
- void *e_mb_rc_config;
- void *e_padding_ctrl;
- void *e_air_threshold;
- void *e_mv_hor_range;
- void *e_mv_ver_range;
- void *e_num_dpb;
- void *e_luma_dpb;
- void *e_chroma_dpb;
- void *e_me_buffer;
- void *e_scratch_buffer_addr;
- void *e_scratch_buffer_size;
- void *e_tmv_buffer0;
- void *e_tmv_buffer1;
- void *e_ir_buffer_addr;/* v7 and v8 */
- void *e_source_first_plane_addr;
- void *e_source_second_plane_addr;
- void *e_source_third_plane_addr;/* v7 and v8 */
- void *e_source_first_plane_stride;/* v7 and v8 */
- void *e_source_second_plane_stride;/* v7 and v8 */
- void *e_source_third_plane_stride;/* v7 and v8 */
- void *e_stream_buffer_addr;
- void *e_stream_buffer_size;
- void *e_roi_buffer_addr;
- void *e_param_change;
- void *e_ir_size;
- void *e_gop_config;
- void *e_mslice_mode;
- void *e_mslice_size_mb;
- void *e_mslice_size_bits;
- void *e_frame_insertion;
- void *e_rc_frame_rate;
- void *e_rc_bit_rate;
- void *e_rc_roi_ctrl;
- void *e_picture_tag;
- void *e_bit_count_enable;
- void *e_max_bit_count;
- void *e_min_bit_count;
- void *e_metadata_buffer_addr;
- void *e_metadata_buffer_size;
- void *e_encoded_source_first_plane_addr;
- void *e_encoded_source_second_plane_addr;
- void *e_encoded_source_third_plane_addr;/* v7 and v8 */
- void *e_stream_size;
- void *e_slice_type;
- void *e_picture_count;
- void *e_ret_picture_tag;
- void *e_stream_buffer_write_pointer; /* only v6 */
- void *e_recon_luma_dpb_addr;
- void *e_recon_chroma_dpb_addr;
- void *e_metadata_addr_enc_slice;
- void *e_metadata_size_enc_slice;
- void *e_mpeg4_options;
- void *e_mpeg4_hec_period;
- void *e_aspect_ratio;
- void *e_extended_sar;
- void *e_h264_options;
- void *e_h264_options_2;/* v7 and v8 */
- void *e_h264_lf_alpha_offset;
- void *e_h264_lf_beta_offset;
- void *e_h264_i_period;
- void *e_h264_fmo_slice_grp_map_type;
- void *e_h264_fmo_num_slice_grp_minus1;
- void *e_h264_fmo_slice_grp_change_dir;
- void *e_h264_fmo_slice_grp_change_rate_minus1;
- void *e_h264_fmo_run_length_minus1_0;
- void *e_h264_aso_slice_order_0;
- void *e_h264_chroma_qp_offset;
- void *e_h264_num_t_layer;
- void *e_h264_hierarchical_qp_layer0;
- void *e_h264_frame_packing_sei_info;
- void *e_h264_nal_control;/* v7 and v8 */
- void *e_mvc_frame_qp_view1;
- void *e_mvc_rc_bit_rate_view1;
- void *e_mvc_rc_qbound_view1;
- void *e_mvc_rc_mode_view1;
- void *e_mvc_inter_view_prediction_on;
- void *e_vp8_options;/* v7 and v8 */
- void *e_vp8_filter_options;/* v7 and v8 */
- void *e_vp8_golden_frame_option;/* v7 and v8 */
- void *e_vp8_num_t_layer;/* v7 and v8 */
- void *e_vp8_hierarchical_qp_layer0;/* v7 and v8 */
- void *e_vp8_hierarchical_qp_layer1;/* v7 and v8 */
- void *e_vp8_hierarchical_qp_layer2;/* v7 and v8 */
+ volatile void __iomem *e_frame_width;
+ volatile void __iomem *e_frame_height;
+ volatile void __iomem *e_cropped_frame_width;
+ volatile void __iomem *e_cropped_frame_height;
+ volatile void __iomem *e_frame_crop_offset;
+ volatile void __iomem *e_enc_options;
+ volatile void __iomem *e_picture_profile;
+ volatile void __iomem *e_vbv_buffer_size;
+ volatile void __iomem *e_vbv_init_delay;
+ volatile void __iomem *e_fixed_picture_qp;
+ volatile void __iomem *e_rc_config;
+ volatile void __iomem *e_rc_qp_bound;
+ volatile void __iomem *e_rc_qp_bound_pb;/* v7 and v8 */
+ volatile void __iomem *e_rc_mode;
+ volatile void __iomem *e_mb_rc_config;
+ volatile void __iomem *e_padding_ctrl;
+ volatile void __iomem *e_air_threshold;
+ volatile void __iomem *e_mv_hor_range;
+ volatile void __iomem *e_mv_ver_range;
+ volatile void __iomem *e_num_dpb;
+ volatile void __iomem *e_luma_dpb;
+ volatile void __iomem *e_chroma_dpb;
+ volatile void __iomem *e_me_buffer;
+ volatile void __iomem *e_scratch_buffer_addr;
+ volatile void __iomem *e_scratch_buffer_size;
+ volatile void __iomem *e_tmv_buffer0;
+ volatile void __iomem *e_tmv_buffer1;
+ volatile void __iomem *e_ir_buffer_addr;/* v7 and v8 */
+ volatile void __iomem *e_source_first_plane_addr;
+ volatile void __iomem *e_source_second_plane_addr;
+ volatile void __iomem *e_source_third_plane_addr;/* v7 and v8 */
+ volatile void __iomem *e_source_first_plane_stride;/* v7 and v8 */
+ volatile void __iomem *e_source_second_plane_stride;/* v7 and v8 */
+ volatile void __iomem *e_source_third_plane_stride;/* v7 and v8 */
+ volatile void __iomem *e_stream_buffer_addr;
+ volatile void __iomem *e_stream_buffer_size;
+ volatile void __iomem *e_roi_buffer_addr;
+ volatile void __iomem *e_param_change;
+ volatile void __iomem *e_ir_size;
+ volatile void __iomem *e_gop_config;
+ volatile void __iomem *e_mslice_mode;
+ volatile void __iomem *e_mslice_size_mb;
+ volatile void __iomem *e_mslice_size_bits;
+ volatile void __iomem *e_frame_insertion;
+ volatile void __iomem *e_rc_frame_rate;
+ volatile void __iomem *e_rc_bit_rate;
+ volatile void __iomem *e_rc_roi_ctrl;
+ volatile void __iomem *e_picture_tag;
+ volatile void __iomem *e_bit_count_enable;
+ volatile void __iomem *e_max_bit_count;
+ volatile void __iomem *e_min_bit_count;
+ volatile void __iomem *e_metadata_buffer_addr;
+ volatile void __iomem *e_metadata_buffer_size;
+ volatile void __iomem *e_encoded_source_first_plane_addr;
+ volatile void __iomem *e_encoded_source_second_plane_addr;
+ volatile void __iomem *e_encoded_source_third_plane_addr;/* v7 and v8 */
+ volatile void __iomem *e_stream_size;
+ volatile void __iomem *e_slice_type;
+ volatile void __iomem *e_picture_count;
+ volatile void __iomem *e_ret_picture_tag;
+ volatile void __iomem *e_stream_buffer_write_pointer; /* only v6 */
+ volatile void __iomem *e_recon_luma_dpb_addr;
+ volatile void __iomem *e_recon_chroma_dpb_addr;
+ volatile void __iomem *e_metadata_addr_enc_slice;
+ volatile void __iomem *e_metadata_size_enc_slice;
+ volatile void __iomem *e_mpeg4_options;
+ volatile void __iomem *e_mpeg4_hec_period;
+ volatile void __iomem *e_aspect_ratio;
+ volatile void __iomem *e_extended_sar;
+ volatile void __iomem *e_h264_options;
+ volatile void __iomem *e_h264_options_2;/* v7 and v8 */
+ volatile void __iomem *e_h264_lf_alpha_offset;
+ volatile void __iomem *e_h264_lf_beta_offset;
+ volatile void __iomem *e_h264_i_period;
+ volatile void __iomem *e_h264_fmo_slice_grp_map_type;
+ volatile void __iomem *e_h264_fmo_num_slice_grp_minus1;
+ volatile void __iomem *e_h264_fmo_slice_grp_change_dir;
+ volatile void __iomem *e_h264_fmo_slice_grp_change_rate_minus1;
+ volatile void __iomem *e_h264_fmo_run_length_minus1_0;
+ volatile void __iomem *e_h264_aso_slice_order_0;
+ volatile void __iomem *e_h264_chroma_qp_offset;
+ volatile void __iomem *e_h264_num_t_layer;
+ volatile void __iomem *e_h264_hierarchical_qp_layer0;
+ volatile void __iomem *e_h264_frame_packing_sei_info;
+ volatile void __iomem *e_h264_nal_control;/* v7 and v8 */
+ volatile void __iomem *e_mvc_frame_qp_view1;
+ volatile void __iomem *e_mvc_rc_bit_rate_view1;
+ volatile void __iomem *e_mvc_rc_qbound_view1;
+ volatile void __iomem *e_mvc_rc_mode_view1;
+ volatile void __iomem *e_mvc_inter_view_prediction_on;
+ volatile void __iomem *e_vp8_options;/* v7 and v8 */
+ volatile void __iomem *e_vp8_filter_options;/* v7 and v8 */
+ volatile void __iomem *e_vp8_golden_frame_option;/* v7 and v8 */
+ volatile void __iomem *e_vp8_num_t_layer;/* v7 and v8 */
+ volatile void __iomem *e_vp8_hierarchical_qp_layer0;/* v7 and v8 */
+ volatile void __iomem *e_vp8_hierarchical_qp_layer1;/* v7 and v8 */
+ volatile void __iomem *e_vp8_hierarchical_qp_layer2;/* v7 and v8 */
};
struct s5p_mfc_hw_ops {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 58ec7bb26ebc..7cf07963187d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -228,6 +228,7 @@ static int s5p_mfc_alloc_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->shm);
if (ret) {
mfc_err("Failed to allocate shared memory buffer\n");
+ s5p_mfc_release_priv_buf(dev->mem_dev_l, &ctx->ctx);
return ret;
}
@@ -262,7 +263,7 @@ static void s5p_mfc_release_dev_context_buffer_v5(struct s5p_mfc_dev *dev)
static void s5p_mfc_write_info_v5(struct s5p_mfc_ctx *ctx, unsigned int data,
unsigned int ofs)
{
- writel(data, (ctx->shm.virt + ofs));
+ writel(data, (volatile void __iomem *)(ctx->shm.virt + ofs));
wmb();
}
@@ -270,7 +271,7 @@ static unsigned int s5p_mfc_read_info_v5(struct s5p_mfc_ctx *ctx,
unsigned int ofs)
{
rmb();
- return readl(ctx->shm.virt + ofs);
+ return readl((volatile void __iomem *)(ctx->shm.virt + ofs));
}
static void s5p_mfc_dec_calc_dpb_size_v5(struct s5p_mfc_ctx *ctx)
@@ -377,7 +378,7 @@ static int s5p_mfc_set_dec_stream_buffer_v5(struct s5p_mfc_ctx *ctx,
/* Set decoding frame buffer */
static int s5p_mfc_set_dec_frame_buffer_v5(struct s5p_mfc_ctx *ctx)
{
- unsigned int frame_size, i;
+ unsigned int frame_size_lu, i;
unsigned int frame_size_ch, frame_size_mv;
struct s5p_mfc_dev *dev = ctx->dev;
unsigned int dpb;
@@ -465,23 +466,23 @@ static int s5p_mfc_set_dec_frame_buffer_v5(struct s5p_mfc_ctx *ctx)
ctx->codec_mode);
return -EINVAL;
}
- frame_size = ctx->luma_size;
+ frame_size_lu = ctx->luma_size;
frame_size_ch = ctx->chroma_size;
frame_size_mv = ctx->mv_size;
- mfc_debug(2, "Frm size: %d ch: %d mv: %d\n", frame_size, frame_size_ch,
+ mfc_debug(2, "Frm size: %d ch: %d mv: %d\n", frame_size_lu, frame_size_ch,
frame_size_mv);
for (i = 0; i < ctx->total_dpb_count; i++) {
/* Bank2 */
- mfc_debug(2, "Luma %d: %x\n", i,
+ mfc_debug(2, "Luma %d: %zx\n", i,
ctx->dst_bufs[i].cookie.raw.luma);
mfc_write(dev, OFFSETB(ctx->dst_bufs[i].cookie.raw.luma),
S5P_FIMV_DEC_LUMA_ADR + i * 4);
- mfc_debug(2, "\tChroma %d: %x\n", i,
+ mfc_debug(2, "\tChroma %d: %zx\n", i,
ctx->dst_bufs[i].cookie.raw.chroma);
mfc_write(dev, OFFSETA(ctx->dst_bufs[i].cookie.raw.chroma),
S5P_FIMV_DEC_CHROMA_ADR + i * 4);
if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC) {
- mfc_debug(2, "\tBuf2: %x, size: %d\n",
+ mfc_debug(2, "\tBuf2: %zx, size: %d\n",
buf_addr2, buf_size2);
mfc_write(dev, OFFSETB(buf_addr2),
S5P_FIMV_H264_MV_ADR + i * 4);
@@ -489,14 +490,14 @@ static int s5p_mfc_set_dec_frame_buffer_v5(struct s5p_mfc_ctx *ctx)
buf_size2 -= frame_size_mv;
}
}
- mfc_debug(2, "Buf1: %u, buf_size1: %d\n", buf_addr1, buf_size1);
+ mfc_debug(2, "Buf1: %zu, buf_size1: %d\n", buf_addr1, buf_size1);
mfc_debug(2, "Buf 1/2 size after: %d/%d (frames %d)\n",
buf_size1, buf_size2, ctx->total_dpb_count);
if (buf_size1 < 0 || buf_size2 < 0) {
mfc_debug(2, "Not enough memory has been allocated\n");
return -ENOMEM;
}
- s5p_mfc_write_info_v5(ctx, frame_size, ALLOC_LUMA_DPB_SIZE);
+ s5p_mfc_write_info_v5(ctx, frame_size_lu, ALLOC_LUMA_DPB_SIZE);
s5p_mfc_write_info_v5(ctx, frame_size_ch, ALLOC_CHROMA_DPB_SIZE);
if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC)
s5p_mfc_write_info_v5(ctx, frame_size_mv, ALLOC_MV_SIZE);
@@ -566,7 +567,7 @@ static int s5p_mfc_set_enc_ref_buffer_v5(struct s5p_mfc_ctx *ctx)
enc_ref_c_size = ALIGN(guard_width * guard_height,
S5P_FIMV_NV12MT_SALIGN);
}
- mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2);
+ mfc_debug(2, "buf_size1: %zu, buf_size2: %zu\n", buf_size1, buf_size2);
switch (ctx->codec_mode) {
case S5P_MFC_CODEC_H264_ENC:
for (i = 0; i < 2; i++) {
@@ -605,7 +606,7 @@ static int s5p_mfc_set_enc_ref_buffer_v5(struct s5p_mfc_ctx *ctx)
S5P_FIMV_H264_NBOR_INFO_ADR);
buf_addr1 += S5P_FIMV_ENC_NBORINFO_SIZE;
buf_size1 -= S5P_FIMV_ENC_NBORINFO_SIZE;
- mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
+ mfc_debug(2, "buf_size1: %zu, buf_size2: %zu\n",
buf_size1, buf_size2);
break;
case S5P_MFC_CODEC_MPEG4_ENC:
@@ -636,7 +637,7 @@ static int s5p_mfc_set_enc_ref_buffer_v5(struct s5p_mfc_ctx *ctx)
S5P_FIMV_MPEG4_ACDC_COEF_ADR);
buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
- mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
+ mfc_debug(2, "buf_size1: %zu, buf_size2: %zu\n",
buf_size1, buf_size2);
break;
case S5P_MFC_CODEC_H263_ENC:
@@ -662,7 +663,7 @@ static int s5p_mfc_set_enc_ref_buffer_v5(struct s5p_mfc_ctx *ctx)
mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_ACDC_COEF_ADR);
buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
- mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
+ mfc_debug(2, "buf_size1: %zu, buf_size2: %zu\n",
buf_size1, buf_size2);
break;
default:
@@ -1186,7 +1187,6 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf *temp_vb;
unsigned long flags;
- unsigned int index;
if (ctx->state == MFCINST_FINISHING) {
last_frame = MFC_DEC_LAST_FRAME;
@@ -1211,7 +1211,6 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
ctx->consumed_stream, temp_vb->b->v4l2_planes[0].bytesused);
spin_unlock_irqrestore(&dev->irqlock, flags);
- index = temp_vb->b->v4l2_buf.index;
dev->curr_ctx = ctx->num;
s5p_mfc_clean_ctx_int_flags(ctx);
if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index c1c12f8d8f68..8798b14bacce 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -43,11 +43,6 @@
} while (0)
#endif /* S5P_MFC_DEBUG_REGWRITE */
-#define READL(reg) \
- (WARN_ON_ONCE(!(reg)) ? 0 : readl(reg))
-#define WRITEL(data, reg) \
- (WARN_ON_ONCE(!(reg)) ? 0 : writel((data), (reg)))
-
#define IS_MFCV6_V2(dev) (!IS_MFCV7_PLUS(dev) && dev->fw_ver == MFC_FW_V2)
/* Allocate temporary buffers for decoding */
@@ -105,7 +100,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_width, mb_height),
S5P_FIMV_ME_BUFFER_ALIGN_V6);
- mfc_debug(2, "recon luma size: %d chroma size: %d\n",
+ mfc_debug(2, "recon luma size: %zu chroma size: %zu\n",
ctx->luma_dpb_size, ctx->chroma_dpb_size);
} else {
return -EINVAL;
@@ -416,10 +411,10 @@ static int s5p_mfc_set_dec_stream_buffer_v6(struct s5p_mfc_ctx *ctx,
mfc_debug(2, "inst_no: %d, buf_addr: 0x%08x,\n"
"buf_size: 0x%08x (%d)\n",
ctx->inst_no, buf_addr, strm_size, strm_size);
- WRITEL(strm_size, mfc_regs->d_stream_data_size);
- WRITEL(buf_addr, mfc_regs->d_cpb_buffer_addr);
- WRITEL(buf_size->cpb, mfc_regs->d_cpb_buffer_size);
- WRITEL(start_num_byte, mfc_regs->d_cpb_buffer_offset);
+ writel(strm_size, mfc_regs->d_stream_data_size);
+ writel(buf_addr, mfc_regs->d_cpb_buffer_addr);
+ writel(buf_size->cpb, mfc_regs->d_cpb_buffer_size);
+ writel(start_num_byte, mfc_regs->d_cpb_buffer_offset);
mfc_debug_leave();
return 0;
@@ -443,17 +438,17 @@ static int s5p_mfc_set_dec_frame_buffer_v6(struct s5p_mfc_ctx *ctx)
mfc_debug(2, "Total DPB COUNT: %d\n", ctx->total_dpb_count);
mfc_debug(2, "Setting display delay to %d\n", ctx->display_delay);
- WRITEL(ctx->total_dpb_count, mfc_regs->d_num_dpb);
- WRITEL(ctx->luma_size, mfc_regs->d_first_plane_dpb_size);
- WRITEL(ctx->chroma_size, mfc_regs->d_second_plane_dpb_size);
+ writel(ctx->total_dpb_count, mfc_regs->d_num_dpb);
+ writel(ctx->luma_size, mfc_regs->d_first_plane_dpb_size);
+ writel(ctx->chroma_size, mfc_regs->d_second_plane_dpb_size);
- WRITEL(buf_addr1, mfc_regs->d_scratch_buffer_addr);
- WRITEL(ctx->scratch_buf_size, mfc_regs->d_scratch_buffer_size);
+ writel(buf_addr1, mfc_regs->d_scratch_buffer_addr);
+ writel(ctx->scratch_buf_size, mfc_regs->d_scratch_buffer_size);
if (IS_MFCV8(dev)) {
- WRITEL(ctx->img_width,
+ writel(ctx->img_width,
mfc_regs->d_first_plane_dpb_stride_size);
- WRITEL(ctx->img_width,
+ writel(ctx->img_width,
mfc_regs->d_second_plane_dpb_stride_size);
}
@@ -462,8 +457,8 @@ static int s5p_mfc_set_dec_frame_buffer_v6(struct s5p_mfc_ctx *ctx)
if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC ||
ctx->codec_mode == S5P_FIMV_CODEC_H264_MVC_DEC){
- WRITEL(ctx->mv_size, mfc_regs->d_mv_buffer_size);
- WRITEL(ctx->mv_count, mfc_regs->d_num_mv);
+ writel(ctx->mv_size, mfc_regs->d_mv_buffer_size);
+ writel(ctx->mv_count, mfc_regs->d_num_mv);
}
frame_size = ctx->luma_size;
@@ -474,13 +469,13 @@ static int s5p_mfc_set_dec_frame_buffer_v6(struct s5p_mfc_ctx *ctx)
for (i = 0; i < ctx->total_dpb_count; i++) {
/* Bank2 */
- mfc_debug(2, "Luma %d: %x\n", i,
+ mfc_debug(2, "Luma %d: %zx\n", i,
ctx->dst_bufs[i].cookie.raw.luma);
- WRITEL(ctx->dst_bufs[i].cookie.raw.luma,
+ writel(ctx->dst_bufs[i].cookie.raw.luma,
mfc_regs->d_first_plane_dpb + i * 4);
- mfc_debug(2, "\tChroma %d: %x\n", i,
+ mfc_debug(2, "\tChroma %d: %zx\n", i,
ctx->dst_bufs[i].cookie.raw.chroma);
- WRITEL(ctx->dst_bufs[i].cookie.raw.chroma,
+ writel(ctx->dst_bufs[i].cookie.raw.chroma,
mfc_regs->d_second_plane_dpb + i * 4);
}
if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
@@ -492,23 +487,23 @@ static int s5p_mfc_set_dec_frame_buffer_v6(struct s5p_mfc_ctx *ctx)
align_gap = buf_addr1 - align_gap;
buf_size1 -= align_gap;
- mfc_debug(2, "\tBuf1: %x, size: %d\n",
+ mfc_debug(2, "\tBuf1: %zx, size: %d\n",
buf_addr1, buf_size1);
- WRITEL(buf_addr1, mfc_regs->d_mv_buffer + i * 4);
+ writel(buf_addr1, mfc_regs->d_mv_buffer + i * 4);
buf_addr1 += frame_size_mv;
buf_size1 -= frame_size_mv;
}
}
- mfc_debug(2, "Buf1: %u, buf_size1: %d (frames %d)\n",
+ mfc_debug(2, "Buf1: %zu, buf_size1: %d (frames %d)\n",
buf_addr1, buf_size1, ctx->total_dpb_count);
if (buf_size1 < 0) {
mfc_debug(2, "Not enough memory has been allocated.\n");
return -ENOMEM;
}
- WRITEL(ctx->inst_no, mfc_regs->instance_id);
- s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ writel(ctx->inst_no, mfc_regs->instance_id);
+ s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev,
S5P_FIMV_CH_INIT_BUFS_V6, NULL);
mfc_debug(2, "After setting buffers.\n");
@@ -522,8 +517,8 @@ static int s5p_mfc_set_enc_stream_buffer_v6(struct s5p_mfc_ctx *ctx,
struct s5p_mfc_dev *dev = ctx->dev;
const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
- WRITEL(addr, mfc_regs->e_stream_buffer_addr); /* 16B align */
- WRITEL(size, mfc_regs->e_stream_buffer_size);
+ writel(addr, mfc_regs->e_stream_buffer_addr); /* 16B align */
+ writel(size, mfc_regs->e_stream_buffer_size);
mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d\n",
addr, size);
@@ -537,8 +532,8 @@ static void s5p_mfc_set_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
struct s5p_mfc_dev *dev = ctx->dev;
const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
- WRITEL(y_addr, mfc_regs->e_source_first_plane_addr);
- WRITEL(c_addr, mfc_regs->e_source_second_plane_addr);
+ writel(y_addr, mfc_regs->e_source_first_plane_addr);
+ writel(c_addr, mfc_regs->e_source_second_plane_addr);
mfc_debug(2, "enc src y buf addr: 0x%08lx\n", y_addr);
mfc_debug(2, "enc src c buf addr: 0x%08lx\n", c_addr);
@@ -551,11 +546,11 @@ static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
unsigned long enc_recon_y_addr, enc_recon_c_addr;
- *y_addr = READL(mfc_regs->e_encoded_source_first_plane_addr);
- *c_addr = READL(mfc_regs->e_encoded_source_second_plane_addr);
+ *y_addr = readl(mfc_regs->e_encoded_source_first_plane_addr);
+ *c_addr = readl(mfc_regs->e_encoded_source_second_plane_addr);
- enc_recon_y_addr = READL(mfc_regs->e_recon_luma_dpb_addr);
- enc_recon_c_addr = READL(mfc_regs->e_recon_chroma_dpb_addr);
+ enc_recon_y_addr = readl(mfc_regs->e_recon_luma_dpb_addr);
+ enc_recon_c_addr = readl(mfc_regs->e_recon_chroma_dpb_addr);
mfc_debug(2, "recon y addr: 0x%08lx\n", enc_recon_y_addr);
mfc_debug(2, "recon c addr: 0x%08lx\n", enc_recon_c_addr);
@@ -577,36 +572,36 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx)
mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
for (i = 0; i < ctx->pb_count; i++) {
- WRITEL(buf_addr1, mfc_regs->e_luma_dpb + (4 * i));
+ writel(buf_addr1, mfc_regs->e_luma_dpb + (4 * i));
buf_addr1 += ctx->luma_dpb_size;
- WRITEL(buf_addr1, mfc_regs->e_chroma_dpb + (4 * i));
+ writel(buf_addr1, mfc_regs->e_chroma_dpb + (4 * i));
buf_addr1 += ctx->chroma_dpb_size;
- WRITEL(buf_addr1, mfc_regs->e_me_buffer + (4 * i));
+ writel(buf_addr1, mfc_regs->e_me_buffer + (4 * i));
buf_addr1 += ctx->me_buffer_size;
buf_size1 -= (ctx->luma_dpb_size + ctx->chroma_dpb_size +
ctx->me_buffer_size);
}
- WRITEL(buf_addr1, mfc_regs->e_scratch_buffer_addr);
- WRITEL(ctx->scratch_buf_size, mfc_regs->e_scratch_buffer_size);
+ writel(buf_addr1, mfc_regs->e_scratch_buffer_addr);
+ writel(ctx->scratch_buf_size, mfc_regs->e_scratch_buffer_size);
buf_addr1 += ctx->scratch_buf_size;
buf_size1 -= ctx->scratch_buf_size;
- WRITEL(buf_addr1, mfc_regs->e_tmv_buffer0);
+ writel(buf_addr1, mfc_regs->e_tmv_buffer0);
buf_addr1 += ctx->tmv_buffer_size >> 1;
- WRITEL(buf_addr1, mfc_regs->e_tmv_buffer1);
+ writel(buf_addr1, mfc_regs->e_tmv_buffer1);
buf_addr1 += ctx->tmv_buffer_size >> 1;
buf_size1 -= ctx->tmv_buffer_size;
- mfc_debug(2, "Buf1: %u, buf_size1: %d (ref frames %d)\n",
+ mfc_debug(2, "Buf1: %zu, buf_size1: %d (ref frames %d)\n",
buf_addr1, buf_size1, ctx->pb_count);
if (buf_size1 < 0) {
mfc_debug(2, "Not enough memory has been allocated.\n");
return -ENOMEM;
}
- WRITEL(ctx->inst_no, mfc_regs->instance_id);
- s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ writel(ctx->inst_no, mfc_regs->instance_id);
+ s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev,
S5P_FIMV_CH_INIT_BUFS_V6, NULL);
mfc_debug_leave();
@@ -621,15 +616,15 @@ static int s5p_mfc_set_slice_mode(struct s5p_mfc_ctx *ctx)
/* multi-slice control */
/* multi-slice MB number or bit size */
- WRITEL(ctx->slice_mode, mfc_regs->e_mslice_mode);
+ writel(ctx->slice_mode, mfc_regs->e_mslice_mode);
if (ctx->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
- WRITEL(ctx->slice_size.mb, mfc_regs->e_mslice_size_mb);
+ writel(ctx->slice_size.mb, mfc_regs->e_mslice_size_mb);
} else if (ctx->slice_mode ==
V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
- WRITEL(ctx->slice_size.bits, mfc_regs->e_mslice_size_bits);
+ writel(ctx->slice_size.bits, mfc_regs->e_mslice_size_bits);
} else {
- WRITEL(0x0, mfc_regs->e_mslice_size_mb);
- WRITEL(0x0, mfc_regs->e_mslice_size_bits);
+ writel(0x0, mfc_regs->e_mslice_size_mb);
+ writel(0x0, mfc_regs->e_mslice_size_bits);
}
return 0;
@@ -645,21 +640,21 @@ static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
mfc_debug_enter();
/* width */
- WRITEL(ctx->img_width, mfc_regs->e_frame_width); /* 16 align */
+ writel(ctx->img_width, mfc_regs->e_frame_width); /* 16 align */
/* height */
- WRITEL(ctx->img_height, mfc_regs->e_frame_height); /* 16 align */
+ writel(ctx->img_height, mfc_regs->e_frame_height); /* 16 align */
/* cropped width */
- WRITEL(ctx->img_width, mfc_regs->e_cropped_frame_width);
+ writel(ctx->img_width, mfc_regs->e_cropped_frame_width);
/* cropped height */
- WRITEL(ctx->img_height, mfc_regs->e_cropped_frame_height);
+ writel(ctx->img_height, mfc_regs->e_cropped_frame_height);
/* cropped offset */
- WRITEL(0x0, mfc_regs->e_frame_crop_offset);
+ writel(0x0, mfc_regs->e_frame_crop_offset);
/* pictype : IDR period */
reg = 0;
reg |= p->gop_size & 0xFFFF;
- WRITEL(reg, mfc_regs->e_gop_config);
+ writel(reg, mfc_regs->e_gop_config);
/* multi-slice control */
/* multi-slice MB number or bit size */
@@ -667,65 +662,65 @@ static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
reg = 0;
if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
reg |= (0x1 << 3);
- WRITEL(reg, mfc_regs->e_enc_options);
+ writel(reg, mfc_regs->e_enc_options);
ctx->slice_size.mb = p->slice_mb;
} else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
reg |= (0x1 << 3);
- WRITEL(reg, mfc_regs->e_enc_options);
+ writel(reg, mfc_regs->e_enc_options);
ctx->slice_size.bits = p->slice_bit;
} else {
reg &= ~(0x1 << 3);
- WRITEL(reg, mfc_regs->e_enc_options);
+ writel(reg, mfc_regs->e_enc_options);
}
s5p_mfc_set_slice_mode(ctx);
/* cyclic intra refresh */
- WRITEL(p->intra_refresh_mb, mfc_regs->e_ir_size);
- reg = READL(mfc_regs->e_enc_options);
+ writel(p->intra_refresh_mb, mfc_regs->e_ir_size);
+ reg = readl(mfc_regs->e_enc_options);
if (p->intra_refresh_mb == 0)
reg &= ~(0x1 << 4);
else
reg |= (0x1 << 4);
- WRITEL(reg, mfc_regs->e_enc_options);
+ writel(reg, mfc_regs->e_enc_options);
/* 'NON_REFERENCE_STORE_ENABLE' for debugging */
- reg = READL(mfc_regs->e_enc_options);
+ reg = readl(mfc_regs->e_enc_options);
reg &= ~(0x1 << 9);
- WRITEL(reg, mfc_regs->e_enc_options);
+ writel(reg, mfc_regs->e_enc_options);
/* memory structure cur. frame */
if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) {
/* 0: Linear, 1: 2D tiled*/
- reg = READL(mfc_regs->e_enc_options);
+ reg = readl(mfc_regs->e_enc_options);
reg &= ~(0x1 << 7);
- WRITEL(reg, mfc_regs->e_enc_options);
+ writel(reg, mfc_regs->e_enc_options);
/* 0: NV12(CbCr), 1: NV21(CrCb) */
- WRITEL(0x0, mfc_regs->pixel_format);
+ writel(0x0, mfc_regs->pixel_format);
} else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV21M) {
/* 0: Linear, 1: 2D tiled*/
- reg = READL(mfc_regs->e_enc_options);
+ reg = readl(mfc_regs->e_enc_options);
reg &= ~(0x1 << 7);
- WRITEL(reg, mfc_regs->e_enc_options);
+ writel(reg, mfc_regs->e_enc_options);
/* 0: NV12(CbCr), 1: NV21(CrCb) */
- WRITEL(0x1, mfc_regs->pixel_format);
+ writel(0x1, mfc_regs->pixel_format);
} else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) {
/* 0: Linear, 1: 2D tiled*/
- reg = READL(mfc_regs->e_enc_options);
+ reg = readl(mfc_regs->e_enc_options);
reg |= (0x1 << 7);
- WRITEL(reg, mfc_regs->e_enc_options);
+ writel(reg, mfc_regs->e_enc_options);
/* 0: NV12(CbCr), 1: NV21(CrCb) */
- WRITEL(0x0, mfc_regs->pixel_format);
+ writel(0x0, mfc_regs->pixel_format);
}
/* memory structure recon. frame */
/* 0: Linear, 1: 2D tiled */
- reg = READL(mfc_regs->e_enc_options);
+ reg = readl(mfc_regs->e_enc_options);
reg |= (0x1 << 8);
- WRITEL(reg, mfc_regs->e_enc_options);
+ writel(reg, mfc_regs->e_enc_options);
/* padding control & value */
- WRITEL(0x0, mfc_regs->e_padding_ctrl);
+ writel(0x0, mfc_regs->e_padding_ctrl);
if (p->pad) {
reg = 0;
/** enable */
@@ -736,64 +731,64 @@ static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
reg |= ((p->pad_cb & 0xFF) << 8);
/** y value */
reg |= p->pad_luma & 0xFF;
- WRITEL(reg, mfc_regs->e_padding_ctrl);
+ writel(reg, mfc_regs->e_padding_ctrl);
}
/* rate control config. */
reg = 0;
/* frame-level rate control */
reg |= ((p->rc_frame & 0x1) << 9);
- WRITEL(reg, mfc_regs->e_rc_config);
+ writel(reg, mfc_regs->e_rc_config);
/* bit rate */
if (p->rc_frame)
- WRITEL(p->rc_bitrate,
+ writel(p->rc_bitrate,
mfc_regs->e_rc_bit_rate);
else
- WRITEL(1, mfc_regs->e_rc_bit_rate);
+ writel(1, mfc_regs->e_rc_bit_rate);
/* reaction coefficient */
if (p->rc_frame) {
if (p->rc_reaction_coeff < TIGHT_CBR_MAX) /* tight CBR */
- WRITEL(1, mfc_regs->e_rc_mode);
+ writel(1, mfc_regs->e_rc_mode);
else /* loose CBR */
- WRITEL(2, mfc_regs->e_rc_mode);
+ writel(2, mfc_regs->e_rc_mode);
}
/* seq header ctrl */
- reg = READL(mfc_regs->e_enc_options);
+ reg = readl(mfc_regs->e_enc_options);
reg &= ~(0x1 << 2);
reg |= ((p->seq_hdr_mode & 0x1) << 2);
/* frame skip mode */
reg &= ~(0x3);
reg |= (p->frame_skip_mode & 0x3);
- WRITEL(reg, mfc_regs->e_enc_options);
+ writel(reg, mfc_regs->e_enc_options);
/* 'DROP_CONTROL_ENABLE', disable */
- reg = READL(mfc_regs->e_rc_config);
+ reg = readl(mfc_regs->e_rc_config);
reg &= ~(0x1 << 10);
- WRITEL(reg, mfc_regs->e_rc_config);
+ writel(reg, mfc_regs->e_rc_config);
/* setting for MV range [16, 256] */
reg = (p->mv_h_range & S5P_FIMV_E_MV_RANGE_V6_MASK);
- WRITEL(reg, mfc_regs->e_mv_hor_range);
+ writel(reg, mfc_regs->e_mv_hor_range);
reg = (p->mv_v_range & S5P_FIMV_E_MV_RANGE_V6_MASK);
- WRITEL(reg, mfc_regs->e_mv_ver_range);
+ writel(reg, mfc_regs->e_mv_ver_range);
- WRITEL(0x0, mfc_regs->e_frame_insertion);
- WRITEL(0x0, mfc_regs->e_roi_buffer_addr);
- WRITEL(0x0, mfc_regs->e_param_change);
- WRITEL(0x0, mfc_regs->e_rc_roi_ctrl);
- WRITEL(0x0, mfc_regs->e_picture_tag);
+ writel(0x0, mfc_regs->e_frame_insertion);
+ writel(0x0, mfc_regs->e_roi_buffer_addr);
+ writel(0x0, mfc_regs->e_param_change);
+ writel(0x0, mfc_regs->e_rc_roi_ctrl);
+ writel(0x0, mfc_regs->e_picture_tag);
- WRITEL(0x0, mfc_regs->e_bit_count_enable);
- WRITEL(0x0, mfc_regs->e_max_bit_count);
- WRITEL(0x0, mfc_regs->e_min_bit_count);
+ writel(0x0, mfc_regs->e_bit_count_enable);
+ writel(0x0, mfc_regs->e_max_bit_count);
+ writel(0x0, mfc_regs->e_min_bit_count);
- WRITEL(0x0, mfc_regs->e_metadata_buffer_addr);
- WRITEL(0x0, mfc_regs->e_metadata_buffer_size);
+ writel(0x0, mfc_regs->e_metadata_buffer_addr);
+ writel(0x0, mfc_regs->e_metadata_buffer_size);
mfc_debug_leave();
@@ -814,10 +809,10 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
s5p_mfc_set_enc_params(ctx);
/* pictype : number of B */
- reg = READL(mfc_regs->e_gop_config);
+ reg = readl(mfc_regs->e_gop_config);
reg &= ~(0x3 << 16);
reg |= ((p->num_b_frame & 0x3) << 16);
- WRITEL(reg, mfc_regs->e_gop_config);
+ writel(reg, mfc_regs->e_gop_config);
/* profile & level */
reg = 0;
@@ -825,19 +820,19 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
reg |= ((p_h264->level & 0xFF) << 8);
/** profile - 0 ~ 3 */
reg |= p_h264->profile & 0x3F;
- WRITEL(reg, mfc_regs->e_picture_profile);
+ writel(reg, mfc_regs->e_picture_profile);
/* rate control config. */
- reg = READL(mfc_regs->e_rc_config);
+ reg = readl(mfc_regs->e_rc_config);
/** macroblock level rate control */
reg &= ~(0x1 << 8);
reg |= ((p->rc_mb & 0x1) << 8);
- WRITEL(reg, mfc_regs->e_rc_config);
+ writel(reg, mfc_regs->e_rc_config);
/** frame QP */
reg &= ~(0x3F);
reg |= p_h264->rc_frame_qp & 0x3F;
- WRITEL(reg, mfc_regs->e_rc_config);
+ writel(reg, mfc_regs->e_rc_config);
/* max & min value of QP */
reg = 0;
@@ -845,16 +840,16 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
reg |= ((p_h264->rc_max_qp & 0x3F) << 8);
/** min QP */
reg |= p_h264->rc_min_qp & 0x3F;
- WRITEL(reg, mfc_regs->e_rc_qp_bound);
+ writel(reg, mfc_regs->e_rc_qp_bound);
/* other QPs */
- WRITEL(0x0, mfc_regs->e_fixed_picture_qp);
+ writel(0x0, mfc_regs->e_fixed_picture_qp);
if (!p->rc_frame && !p->rc_mb) {
reg = 0;
reg |= ((p_h264->rc_b_frame_qp & 0x3F) << 16);
reg |= ((p_h264->rc_p_frame_qp & 0x3F) << 8);
reg |= p_h264->rc_frame_qp & 0x3F;
- WRITEL(reg, mfc_regs->e_fixed_picture_qp);
+ writel(reg, mfc_regs->e_fixed_picture_qp);
}
/* frame rate */
@@ -862,38 +857,38 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
reg = 0;
reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
reg |= p->rc_framerate_denom & 0xFFFF;
- WRITEL(reg, mfc_regs->e_rc_frame_rate);
+ writel(reg, mfc_regs->e_rc_frame_rate);
}
/* vbv buffer size */
if (p->frame_skip_mode ==
V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
- WRITEL(p_h264->cpb_size & 0xFFFF,
+ writel(p_h264->cpb_size & 0xFFFF,
mfc_regs->e_vbv_buffer_size);
if (p->rc_frame)
- WRITEL(p->vbv_delay, mfc_regs->e_vbv_init_delay);
+ writel(p->vbv_delay, mfc_regs->e_vbv_init_delay);
}
/* interlace */
reg = 0;
reg |= ((p_h264->interlace & 0x1) << 3);
- WRITEL(reg, mfc_regs->e_h264_options);
+ writel(reg, mfc_regs->e_h264_options);
/* height */
if (p_h264->interlace) {
- WRITEL(ctx->img_height >> 1,
+ writel(ctx->img_height >> 1,
mfc_regs->e_frame_height); /* 32 align */
/* cropped height */
- WRITEL(ctx->img_height >> 1,
+ writel(ctx->img_height >> 1,
mfc_regs->e_cropped_frame_height);
}
/* loop filter ctrl */
- reg = READL(mfc_regs->e_h264_options);
+ reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x3 << 1);
reg |= ((p_h264->loop_filter_mode & 0x3) << 1);
- WRITEL(reg, mfc_regs->e_h264_options);
+ writel(reg, mfc_regs->e_h264_options);
/* loopfilter alpha offset */
if (p_h264->loop_filter_alpha < 0) {
@@ -903,7 +898,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
reg = 0x00;
reg |= (p_h264->loop_filter_alpha & 0xF);
}
- WRITEL(reg, mfc_regs->e_h264_lf_alpha_offset);
+ writel(reg, mfc_regs->e_h264_lf_alpha_offset);
/* loopfilter beta offset */
if (p_h264->loop_filter_beta < 0) {
@@ -913,28 +908,28 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
reg = 0x00;
reg |= (p_h264->loop_filter_beta & 0xF);
}
- WRITEL(reg, mfc_regs->e_h264_lf_beta_offset);
+ writel(reg, mfc_regs->e_h264_lf_beta_offset);
/* entropy coding mode */
- reg = READL(mfc_regs->e_h264_options);
+ reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x1);
reg |= p_h264->entropy_mode & 0x1;
- WRITEL(reg, mfc_regs->e_h264_options);
+ writel(reg, mfc_regs->e_h264_options);
/* number of ref. picture */
- reg = READL(mfc_regs->e_h264_options);
+ reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 7);
reg |= (((p_h264->num_ref_pic_4p - 1) & 0x1) << 7);
- WRITEL(reg, mfc_regs->e_h264_options);
+ writel(reg, mfc_regs->e_h264_options);
/* 8x8 transform enable */
- reg = READL(mfc_regs->e_h264_options);
+ reg = readl(mfc_regs->e_h264_options);
reg &= ~(0x3 << 12);
reg |= ((p_h264->_8x8_transform & 0x3) << 12);
- WRITEL(reg, mfc_regs->e_h264_options);
+ writel(reg, mfc_regs->e_h264_options);
/* macroblock adaptive scaling features */
- WRITEL(0x0, mfc_regs->e_mb_rc_config);
+ writel(0x0, mfc_regs->e_mb_rc_config);
if (p->rc_mb) {
reg = 0;
/** dark region */
@@ -945,95 +940,95 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
reg |= ((p_h264->rc_mb_static & 0x1) << 1);
/** high activity region */
reg |= p_h264->rc_mb_activity & 0x1;
- WRITEL(reg, mfc_regs->e_mb_rc_config);
+ writel(reg, mfc_regs->e_mb_rc_config);
}
/* aspect ratio VUI */
- READL(mfc_regs->e_h264_options);
+ readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 5);
reg |= ((p_h264->vui_sar & 0x1) << 5);
- WRITEL(reg, mfc_regs->e_h264_options);
+ writel(reg, mfc_regs->e_h264_options);
- WRITEL(0x0, mfc_regs->e_aspect_ratio);
- WRITEL(0x0, mfc_regs->e_extended_sar);
+ writel(0x0, mfc_regs->e_aspect_ratio);
+ writel(0x0, mfc_regs->e_extended_sar);
if (p_h264->vui_sar) {
/* aspect ration IDC */
reg = 0;
reg |= p_h264->vui_sar_idc & 0xFF;
- WRITEL(reg, mfc_regs->e_aspect_ratio);
+ writel(reg, mfc_regs->e_aspect_ratio);
if (p_h264->vui_sar_idc == 0xFF) {
/* extended SAR */
reg = 0;
reg |= (p_h264->vui_ext_sar_width & 0xFFFF) << 16;
reg |= p_h264->vui_ext_sar_height & 0xFFFF;
- WRITEL(reg, mfc_regs->e_extended_sar);
+ writel(reg, mfc_regs->e_extended_sar);
}
}
/* intra picture period for H.264 open GOP */
/* control */
- READL(mfc_regs->e_h264_options);
+ readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 4);
reg |= ((p_h264->open_gop & 0x1) << 4);
- WRITEL(reg, mfc_regs->e_h264_options);
+ writel(reg, mfc_regs->e_h264_options);
/* value */
- WRITEL(0x0, mfc_regs->e_h264_i_period);
+ writel(0x0, mfc_regs->e_h264_i_period);
if (p_h264->open_gop) {
reg = 0;
reg |= p_h264->open_gop_size & 0xFFFF;
- WRITEL(reg, mfc_regs->e_h264_i_period);
+ writel(reg, mfc_regs->e_h264_i_period);
}
/* 'WEIGHTED_BI_PREDICTION' for B is disable */
- READL(mfc_regs->e_h264_options);
+ readl(mfc_regs->e_h264_options);
reg &= ~(0x3 << 9);
- WRITEL(reg, mfc_regs->e_h264_options);
+ writel(reg, mfc_regs->e_h264_options);
/* 'CONSTRAINED_INTRA_PRED_ENABLE' is disable */
- READL(mfc_regs->e_h264_options);
+ readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 14);
- WRITEL(reg, mfc_regs->e_h264_options);
+ writel(reg, mfc_regs->e_h264_options);
/* ASO */
- READL(mfc_regs->e_h264_options);
+ readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 6);
reg |= ((p_h264->aso & 0x1) << 6);
- WRITEL(reg, mfc_regs->e_h264_options);
+ writel(reg, mfc_regs->e_h264_options);
/* hier qp enable */
- READL(mfc_regs->e_h264_options);
+ readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 8);
reg |= ((p_h264->open_gop & 0x1) << 8);
- WRITEL(reg, mfc_regs->e_h264_options);
+ writel(reg, mfc_regs->e_h264_options);
reg = 0;
if (p_h264->hier_qp && p_h264->hier_qp_layer) {
reg |= (p_h264->hier_qp_type & 0x1) << 0x3;
reg |= p_h264->hier_qp_layer & 0x7;
- WRITEL(reg, mfc_regs->e_h264_num_t_layer);
+ writel(reg, mfc_regs->e_h264_num_t_layer);
/* QP value for each layer */
for (i = 0; i < p_h264->hier_qp_layer &&
i < ARRAY_SIZE(p_h264->hier_qp_layer_qp); i++) {
- WRITEL(p_h264->hier_qp_layer_qp[i],
+ writel(p_h264->hier_qp_layer_qp[i],
mfc_regs->e_h264_hierarchical_qp_layer0
+ i * 4);
}
}
/* number of coding layer should be zero when hierarchical is disable */
- WRITEL(reg, mfc_regs->e_h264_num_t_layer);
+ writel(reg, mfc_regs->e_h264_num_t_layer);
/* frame packing SEI generation */
- READL(mfc_regs->e_h264_options);
+ readl(mfc_regs->e_h264_options);
reg &= ~(0x1 << 25);
reg |= ((p_h264->sei_frame_packing & 0x1) << 25);
- WRITEL(reg, mfc_regs->e_h264_options);
+ writel(reg, mfc_regs->e_h264_options);
if (p_h264->sei_frame_packing) {
reg = 0;
/** current frame0 flag */
reg |= ((p_h264->sei_fp_curr_frame_0 & 0x1) << 2);
/** arrangement type */
reg |= p_h264->sei_fp_arrangement_type & 0x3;
- WRITEL(reg, mfc_regs->e_h264_frame_packing_sei_info);
+ writel(reg, mfc_regs->e_h264_frame_packing_sei_info);
}
if (p_h264->fmo) {
@@ -1042,7 +1037,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
if (p_h264->fmo_slice_grp > 4)
p_h264->fmo_slice_grp = 4;
for (i = 0; i < (p_h264->fmo_slice_grp & 0xF); i++)
- WRITEL(p_h264->fmo_run_len[i] - 1,
+ writel(p_h264->fmo_run_len[i] - 1,
mfc_regs->e_h264_fmo_run_length_minus1_0
+ i * 4);
break;
@@ -1054,10 +1049,10 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
case V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN:
if (p_h264->fmo_slice_grp > 2)
p_h264->fmo_slice_grp = 2;
- WRITEL(p_h264->fmo_chg_dir & 0x1,
+ writel(p_h264->fmo_chg_dir & 0x1,
mfc_regs->e_h264_fmo_slice_grp_change_dir);
/* the valid range is 0 ~ number of macroblocks -1 */
- WRITEL(p_h264->fmo_chg_rate,
+ writel(p_h264->fmo_chg_rate,
mfc_regs->e_h264_fmo_slice_grp_change_rate_minus1);
break;
default:
@@ -1068,12 +1063,12 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
break;
}
- WRITEL(p_h264->fmo_map_type,
+ writel(p_h264->fmo_map_type,
mfc_regs->e_h264_fmo_slice_grp_map_type);
- WRITEL(p_h264->fmo_slice_grp - 1,
+ writel(p_h264->fmo_slice_grp - 1,
mfc_regs->e_h264_fmo_num_slice_grp_minus1);
} else {
- WRITEL(0, mfc_regs->e_h264_fmo_num_slice_grp_minus1);
+ writel(0, mfc_regs->e_h264_fmo_num_slice_grp_minus1);
}
mfc_debug_leave();
@@ -1094,10 +1089,10 @@ static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
s5p_mfc_set_enc_params(ctx);
/* pictype : number of B */
- reg = READL(mfc_regs->e_gop_config);
+ reg = readl(mfc_regs->e_gop_config);
reg &= ~(0x3 << 16);
reg |= ((p->num_b_frame & 0x3) << 16);
- WRITEL(reg, mfc_regs->e_gop_config);
+ writel(reg, mfc_regs->e_gop_config);
/* profile & level */
reg = 0;
@@ -1105,19 +1100,19 @@ static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
reg |= ((p_mpeg4->level & 0xFF) << 8);
/** profile - 0 ~ 1 */
reg |= p_mpeg4->profile & 0x3F;
- WRITEL(reg, mfc_regs->e_picture_profile);
+ writel(reg, mfc_regs->e_picture_profile);
/* rate control config. */
- reg = READL(mfc_regs->e_rc_config);
+ reg = readl(mfc_regs->e_rc_config);
/** macroblock level rate control */
reg &= ~(0x1 << 8);
reg |= ((p->rc_mb & 0x1) << 8);
- WRITEL(reg, mfc_regs->e_rc_config);
+ writel(reg, mfc_regs->e_rc_config);
/** frame QP */
reg &= ~(0x3F);
reg |= p_mpeg4->rc_frame_qp & 0x3F;
- WRITEL(reg, mfc_regs->e_rc_config);
+ writel(reg, mfc_regs->e_rc_config);
/* max & min value of QP */
reg = 0;
@@ -1125,16 +1120,16 @@ static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
reg |= ((p_mpeg4->rc_max_qp & 0x3F) << 8);
/** min QP */
reg |= p_mpeg4->rc_min_qp & 0x3F;
- WRITEL(reg, mfc_regs->e_rc_qp_bound);
+ writel(reg, mfc_regs->e_rc_qp_bound);
/* other QPs */
- WRITEL(0x0, mfc_regs->e_fixed_picture_qp);
+ writel(0x0, mfc_regs->e_fixed_picture_qp);
if (!p->rc_frame && !p->rc_mb) {
reg = 0;
reg |= ((p_mpeg4->rc_b_frame_qp & 0x3F) << 16);
reg |= ((p_mpeg4->rc_p_frame_qp & 0x3F) << 8);
reg |= p_mpeg4->rc_frame_qp & 0x3F;
- WRITEL(reg, mfc_regs->e_fixed_picture_qp);
+ writel(reg, mfc_regs->e_fixed_picture_qp);
}
/* frame rate */
@@ -1142,21 +1137,21 @@ static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
reg = 0;
reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
reg |= p->rc_framerate_denom & 0xFFFF;
- WRITEL(reg, mfc_regs->e_rc_frame_rate);
+ writel(reg, mfc_regs->e_rc_frame_rate);
}
/* vbv buffer size */
if (p->frame_skip_mode ==
V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
- WRITEL(p->vbv_size & 0xFFFF, mfc_regs->e_vbv_buffer_size);
+ writel(p->vbv_size & 0xFFFF, mfc_regs->e_vbv_buffer_size);
if (p->rc_frame)
- WRITEL(p->vbv_delay, mfc_regs->e_vbv_init_delay);
+ writel(p->vbv_delay, mfc_regs->e_vbv_init_delay);
}
/* Disable HEC */
- WRITEL(0x0, mfc_regs->e_mpeg4_options);
- WRITEL(0x0, mfc_regs->e_mpeg4_hec_period);
+ writel(0x0, mfc_regs->e_mpeg4_options);
+ writel(0x0, mfc_regs->e_mpeg4_hec_period);
mfc_debug_leave();
@@ -1179,19 +1174,19 @@ static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
reg = 0;
/** profile */
reg |= (0x1 << 4);
- WRITEL(reg, mfc_regs->e_picture_profile);
+ writel(reg, mfc_regs->e_picture_profile);
/* rate control config. */
- reg = READL(mfc_regs->e_rc_config);
+ reg = readl(mfc_regs->e_rc_config);
/** macroblock level rate control */
reg &= ~(0x1 << 8);
reg |= ((p->rc_mb & 0x1) << 8);
- WRITEL(reg, mfc_regs->e_rc_config);
+ writel(reg, mfc_regs->e_rc_config);
/** frame QP */
reg &= ~(0x3F);
reg |= p_h263->rc_frame_qp & 0x3F;
- WRITEL(reg, mfc_regs->e_rc_config);
+ writel(reg, mfc_regs->e_rc_config);
/* max & min value of QP */
reg = 0;
@@ -1199,16 +1194,16 @@ static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
reg |= ((p_h263->rc_max_qp & 0x3F) << 8);
/** min QP */
reg |= p_h263->rc_min_qp & 0x3F;
- WRITEL(reg, mfc_regs->e_rc_qp_bound);
+ writel(reg, mfc_regs->e_rc_qp_bound);
/* other QPs */
- WRITEL(0x0, mfc_regs->e_fixed_picture_qp);
+ writel(0x0, mfc_regs->e_fixed_picture_qp);
if (!p->rc_frame && !p->rc_mb) {
reg = 0;
reg |= ((p_h263->rc_b_frame_qp & 0x3F) << 16);
reg |= ((p_h263->rc_p_frame_qp & 0x3F) << 8);
reg |= p_h263->rc_frame_qp & 0x3F;
- WRITEL(reg, mfc_regs->e_fixed_picture_qp);
+ writel(reg, mfc_regs->e_fixed_picture_qp);
}
/* frame rate */
@@ -1216,16 +1211,16 @@ static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
reg = 0;
reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
reg |= p->rc_framerate_denom & 0xFFFF;
- WRITEL(reg, mfc_regs->e_rc_frame_rate);
+ writel(reg, mfc_regs->e_rc_frame_rate);
}
/* vbv buffer size */
if (p->frame_skip_mode ==
V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
- WRITEL(p->vbv_size & 0xFFFF, mfc_regs->e_vbv_buffer_size);
+ writel(p->vbv_size & 0xFFFF, mfc_regs->e_vbv_buffer_size);
if (p->rc_frame)
- WRITEL(p->vbv_delay, mfc_regs->e_vbv_init_delay);
+ writel(p->vbv_delay, mfc_regs->e_vbv_init_delay);
}
mfc_debug_leave();
@@ -1247,57 +1242,57 @@ static int s5p_mfc_set_enc_params_vp8(struct s5p_mfc_ctx *ctx)
s5p_mfc_set_enc_params(ctx);
/* pictype : number of B */
- reg = READL(mfc_regs->e_gop_config);
+ reg = readl(mfc_regs->e_gop_config);
reg &= ~(0x3 << 16);
reg |= ((p->num_b_frame & 0x3) << 16);
- WRITEL(reg, mfc_regs->e_gop_config);
+ writel(reg, mfc_regs->e_gop_config);
/* profile - 0 ~ 3 */
reg = p_vp8->profile & 0x3;
- WRITEL(reg, mfc_regs->e_picture_profile);
+ writel(reg, mfc_regs->e_picture_profile);
/* rate control config. */
- reg = READL(mfc_regs->e_rc_config);
+ reg = readl(mfc_regs->e_rc_config);
/** macroblock level rate control */
reg &= ~(0x1 << 8);
reg |= ((p->rc_mb & 0x1) << 8);
- WRITEL(reg, mfc_regs->e_rc_config);
+ writel(reg, mfc_regs->e_rc_config);
/* frame rate */
if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) {
reg = 0;
reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
reg |= p->rc_framerate_denom & 0xFFFF;
- WRITEL(reg, mfc_regs->e_rc_frame_rate);
+ writel(reg, mfc_regs->e_rc_frame_rate);
}
/* frame QP */
reg &= ~(0x7F);
reg |= p_vp8->rc_frame_qp & 0x7F;
- WRITEL(reg, mfc_regs->e_rc_config);
+ writel(reg, mfc_regs->e_rc_config);
/* other QPs */
- WRITEL(0x0, mfc_regs->e_fixed_picture_qp);
+ writel(0x0, mfc_regs->e_fixed_picture_qp);
if (!p->rc_frame && !p->rc_mb) {
reg = 0;
reg |= ((p_vp8->rc_p_frame_qp & 0x7F) << 8);
reg |= p_vp8->rc_frame_qp & 0x7F;
- WRITEL(reg, mfc_regs->e_fixed_picture_qp);
+ writel(reg, mfc_regs->e_fixed_picture_qp);
}
/* max QP */
reg = ((p_vp8->rc_max_qp & 0x7F) << 8);
/* min QP */
reg |= p_vp8->rc_min_qp & 0x7F;
- WRITEL(reg, mfc_regs->e_rc_qp_bound);
+ writel(reg, mfc_regs->e_rc_qp_bound);
/* vbv buffer size */
if (p->frame_skip_mode ==
V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
- WRITEL(p->vbv_size & 0xFFFF, mfc_regs->e_vbv_buffer_size);
+ writel(p->vbv_size & 0xFFFF, mfc_regs->e_vbv_buffer_size);
if (p->rc_frame)
- WRITEL(p->vbv_delay, mfc_regs->e_vbv_init_delay);
+ writel(p->vbv_delay, mfc_regs->e_vbv_init_delay);
}
/* VP8 specific params */
@@ -1319,7 +1314,7 @@ static int s5p_mfc_set_enc_params_vp8(struct s5p_mfc_ctx *ctx)
}
reg |= (val & 0xF) << 3;
reg |= (p_vp8->num_ref & 0x2);
- WRITEL(reg, mfc_regs->e_vp8_options);
+ writel(reg, mfc_regs->e_vp8_options);
mfc_debug_leave();
@@ -1338,9 +1333,9 @@ static int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx)
mfc_debug(2, "InstNo: %d/%d\n", ctx->inst_no,
S5P_FIMV_CH_SEQ_HEADER_V6);
mfc_debug(2, "BUFs: %08x %08x %08x\n",
- READL(mfc_regs->d_cpb_buffer_addr),
- READL(mfc_regs->d_cpb_buffer_addr),
- READL(mfc_regs->d_cpb_buffer_addr));
+ readl(mfc_regs->d_cpb_buffer_addr),
+ readl(mfc_regs->d_cpb_buffer_addr),
+ readl(mfc_regs->d_cpb_buffer_addr));
/* FMO_ASO_CTRL - 0: Enable, 1: Disable */
reg |= (fmo_aso_ctrl << S5P_FIMV_D_OPT_FMO_ASO_CTRL_MASK_V6);
@@ -1351,11 +1346,11 @@ static int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx)
* set to negative value. */
if (ctx->display_delay >= 0) {
reg |= (0x1 << S5P_FIMV_D_OPT_DDELAY_EN_SHIFT_V6);
- WRITEL(ctx->display_delay, mfc_regs->d_display_delay);
+ writel(ctx->display_delay, mfc_regs->d_display_delay);
}
if (IS_MFCV7_PLUS(dev) || IS_MFCV6_V2(dev)) {
- WRITEL(reg, mfc_regs->d_dec_options);
+ writel(reg, mfc_regs->d_dec_options);
reg = 0;
}
@@ -1370,22 +1365,22 @@ static int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx)
reg |= (0x1 << S5P_FIMV_D_OPT_TILE_MODE_SHIFT_V6);
if (IS_MFCV7_PLUS(dev) || IS_MFCV6_V2(dev))
- WRITEL(reg, mfc_regs->d_init_buffer_options);
+ writel(reg, mfc_regs->d_init_buffer_options);
else
- WRITEL(reg, mfc_regs->d_dec_options);
+ writel(reg, mfc_regs->d_dec_options);
/* 0: NV12(CbCr), 1: NV21(CrCb) */
if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_NV21M)
- WRITEL(0x1, mfc_regs->pixel_format);
+ writel(0x1, mfc_regs->pixel_format);
else
- WRITEL(0x0, mfc_regs->pixel_format);
+ writel(0x0, mfc_regs->pixel_format);
/* sei parse */
- WRITEL(ctx->sei_fp_parse & 0x1, mfc_regs->d_sei_enable);
+ writel(ctx->sei_fp_parse & 0x1, mfc_regs->d_sei_enable);
- WRITEL(ctx->inst_no, mfc_regs->instance_id);
- s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ writel(ctx->inst_no, mfc_regs->instance_id);
+ s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev,
S5P_FIMV_CH_SEQ_HEADER_V6, NULL);
mfc_debug_leave();
@@ -1400,8 +1395,8 @@ static inline void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
if (flush) {
dev->curr_ctx = ctx->num;
s5p_mfc_clean_ctx_int_flags(ctx);
- WRITEL(ctx->inst_no, mfc_regs->instance_id);
- s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ writel(ctx->inst_no, mfc_regs->instance_id);
+ s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev,
S5P_FIMV_H2R_CMD_FLUSH_V6, NULL);
}
}
@@ -1413,19 +1408,19 @@ static int s5p_mfc_decode_one_frame_v6(struct s5p_mfc_ctx *ctx,
struct s5p_mfc_dev *dev = ctx->dev;
const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
- WRITEL(ctx->dec_dst_flag, mfc_regs->d_available_dpb_flag_lower);
- WRITEL(ctx->slice_interface & 0x1, mfc_regs->d_slice_if_enable);
+ writel(ctx->dec_dst_flag, mfc_regs->d_available_dpb_flag_lower);
+ writel(ctx->slice_interface & 0x1, mfc_regs->d_slice_if_enable);
- WRITEL(ctx->inst_no, mfc_regs->instance_id);
+ writel(ctx->inst_no, mfc_regs->instance_id);
/* Issue different commands to instance basing on whether it
* is the last frame or not. */
switch (last_frame) {
case 0:
- s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev,
S5P_FIMV_CH_FRAME_START_V6, NULL);
break;
case 1:
- s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev,
S5P_FIMV_CH_LAST_FRAME_V6, NULL);
break;
default:
@@ -1458,12 +1453,12 @@ static int s5p_mfc_init_encode_v6(struct s5p_mfc_ctx *ctx)
/* Set stride lengths for v7 & above */
if (IS_MFCV7_PLUS(dev)) {
- WRITEL(ctx->img_width, mfc_regs->e_source_first_plane_stride);
- WRITEL(ctx->img_width, mfc_regs->e_source_second_plane_stride);
+ writel(ctx->img_width, mfc_regs->e_source_first_plane_stride);
+ writel(ctx->img_width, mfc_regs->e_source_second_plane_stride);
}
- WRITEL(ctx->inst_no, mfc_regs->instance_id);
- s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ writel(ctx->inst_no, mfc_regs->instance_id);
+ s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev,
S5P_FIMV_CH_SEQ_HEADER_V6, NULL);
return 0;
@@ -1479,7 +1474,7 @@ static int s5p_mfc_h264_set_aso_slice_order_v6(struct s5p_mfc_ctx *ctx)
if (p_h264->aso) {
for (i = 0; i < ARRAY_SIZE(p_h264->aso_slice_order); i++) {
- WRITEL(p_h264->aso_slice_order[i],
+ writel(p_h264->aso_slice_order[i],
mfc_regs->e_h264_aso_slice_order_0 + i * 4);
}
}
@@ -1501,8 +1496,8 @@ static int s5p_mfc_encode_one_frame_v6(struct s5p_mfc_ctx *ctx)
s5p_mfc_set_slice_mode(ctx);
- WRITEL(ctx->inst_no, mfc_regs->instance_id);
- s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
+ writel(ctx->inst_no, mfc_regs->instance_id);
+ s5p_mfc_hw_call_void(dev->mfc_cmds, cmd_host2risc, dev,
S5P_FIMV_CH_FRAME_START_V6, NULL);
mfc_debug(2, "--\n");
@@ -1877,15 +1872,15 @@ static void s5p_mfc_cleanup_queue_v6(struct list_head *lh, struct vb2_queue *vq)
static void s5p_mfc_clear_int_flags_v6(struct s5p_mfc_dev *dev)
{
const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
- WRITEL(0, mfc_regs->risc2host_command);
- WRITEL(0, mfc_regs->risc2host_int);
+ writel(0, mfc_regs->risc2host_command);
+ writel(0, mfc_regs->risc2host_int);
}
static void s5p_mfc_write_info_v6(struct s5p_mfc_ctx *ctx, unsigned int data,
unsigned int ofs)
{
s5p_mfc_clock_on();
- WRITEL(data, (void *)ofs);
+ writel(data, (volatile void __iomem *)((unsigned long)ofs));
s5p_mfc_clock_off();
}
@@ -1895,7 +1890,7 @@ s5p_mfc_read_info_v6(struct s5p_mfc_ctx *ctx, unsigned int ofs)
int ret;
s5p_mfc_clock_on();
- ret = READL((void *)ofs);
+ ret = readl((volatile void __iomem *)((unsigned long)ofs));
s5p_mfc_clock_off();
return ret;
@@ -1903,51 +1898,51 @@ s5p_mfc_read_info_v6(struct s5p_mfc_ctx *ctx, unsigned int ofs)
static int s5p_mfc_get_dspl_y_adr_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->d_display_first_plane_addr);
+ return readl(dev->mfc_regs->d_display_first_plane_addr);
}
static int s5p_mfc_get_dec_y_adr_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->d_decoded_first_plane_addr);
+ return readl(dev->mfc_regs->d_decoded_first_plane_addr);
}
static int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->d_display_status);
+ return readl(dev->mfc_regs->d_display_status);
}
static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->d_decoded_status);
+ return readl(dev->mfc_regs->d_decoded_status);
}
static int s5p_mfc_get_dec_frame_type_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->d_decoded_frame_type) &
+ return readl(dev->mfc_regs->d_decoded_frame_type) &
S5P_FIMV_DECODE_FRAME_MASK_V6;
}
static int s5p_mfc_get_disp_frame_type_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- return READL(dev->mfc_regs->d_display_frame_type) &
+ return readl(dev->mfc_regs->d_display_frame_type) &
S5P_FIMV_DECODE_FRAME_MASK_V6;
}
static int s5p_mfc_get_consumed_stream_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->d_decoded_nal_size);
+ return readl(dev->mfc_regs->d_decoded_nal_size);
}
static int s5p_mfc_get_int_reason_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->risc2host_command) &
+ return readl(dev->mfc_regs->risc2host_command) &
S5P_FIMV_RISC2HOST_CMD_MASK;
}
static int s5p_mfc_get_int_err_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->error_code);
+ return readl(dev->mfc_regs->error_code);
}
static int s5p_mfc_err_dec_v6(unsigned int err)
@@ -1962,87 +1957,87 @@ static int s5p_mfc_err_dspl_v6(unsigned int err)
static int s5p_mfc_get_img_width_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->d_display_frame_width);
+ return readl(dev->mfc_regs->d_display_frame_width);
}
static int s5p_mfc_get_img_height_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->d_display_frame_height);
+ return readl(dev->mfc_regs->d_display_frame_height);
}
static int s5p_mfc_get_dpb_count_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->d_min_num_dpb);
+ return readl(dev->mfc_regs->d_min_num_dpb);
}
static int s5p_mfc_get_mv_count_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->d_min_num_mv);
+ return readl(dev->mfc_regs->d_min_num_mv);
}
static int s5p_mfc_get_inst_no_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->ret_instance_id);
+ return readl(dev->mfc_regs->ret_instance_id);
}
static int s5p_mfc_get_enc_dpb_count_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->e_num_dpb);
+ return readl(dev->mfc_regs->e_num_dpb);
}
static int s5p_mfc_get_enc_strm_size_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->e_stream_size);
+ return readl(dev->mfc_regs->e_stream_size);
}
static int s5p_mfc_get_enc_slice_type_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->e_slice_type);
+ return readl(dev->mfc_regs->e_slice_type);
}
static int s5p_mfc_get_enc_pic_count_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->e_picture_count);
+ return readl(dev->mfc_regs->e_picture_count);
}
static int s5p_mfc_get_sei_avail_status_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- return READL(dev->mfc_regs->d_frame_pack_sei_avail);
+ return readl(dev->mfc_regs->d_frame_pack_sei_avail);
}
static int s5p_mfc_get_mvc_num_views_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->d_mvc_num_views);
+ return readl(dev->mfc_regs->d_mvc_num_views);
}
static int s5p_mfc_get_mvc_view_id_v6(struct s5p_mfc_dev *dev)
{
- return READL(dev->mfc_regs->d_mvc_view_id);
+ return readl(dev->mfc_regs->d_mvc_view_id);
}
static unsigned int s5p_mfc_get_pic_type_top_v6(struct s5p_mfc_ctx *ctx)
{
return s5p_mfc_read_info_v6(ctx,
- (unsigned int) ctx->dev->mfc_regs->d_ret_picture_tag_top);
+ (__force unsigned long) ctx->dev->mfc_regs->d_ret_picture_tag_top);
}
static unsigned int s5p_mfc_get_pic_type_bot_v6(struct s5p_mfc_ctx *ctx)
{
return s5p_mfc_read_info_v6(ctx,
- (unsigned int) ctx->dev->mfc_regs->d_ret_picture_tag_bot);
+ (__force unsigned long) ctx->dev->mfc_regs->d_ret_picture_tag_bot);
}
static unsigned int s5p_mfc_get_crop_info_h_v6(struct s5p_mfc_ctx *ctx)
{
return s5p_mfc_read_info_v6(ctx,
- (unsigned int) ctx->dev->mfc_regs->d_display_crop_info1);
+ (__force unsigned long) ctx->dev->mfc_regs->d_display_crop_info1);
}
static unsigned int s5p_mfc_get_crop_info_v_v6(struct s5p_mfc_ctx *ctx)
{
return s5p_mfc_read_info_v6(ctx,
- (unsigned int) ctx->dev->mfc_regs->d_display_crop_info2);
+ (__force unsigned long) ctx->dev->mfc_regs->d_display_crop_info2);
}
static struct s5p_mfc_regs mfc_regs;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index b6a8be97a96c..826c48945bf5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -21,7 +21,7 @@
#include "s5p_mfc_pm.h"
#define MFC_GATE_CLK_NAME "mfc"
-#define MFC_SCLK_NAME "sclk-mfc"
+#define MFC_SCLK_NAME "sclk_mfc"
#define MFC_SCLK_RATE (200 * 1000000)
#define CLK_DEBUG
diff --git a/drivers/media/platform/s5p-tv/Kconfig b/drivers/media/platform/s5p-tv/Kconfig
index 369a4c191e18..a9d56f8936b4 100644
--- a/drivers/media/platform/s5p-tv/Kconfig
+++ b/drivers/media/platform/s5p-tv/Kconfig
@@ -8,7 +8,8 @@
config VIDEO_SAMSUNG_S5P_TV
bool "Samsung TV driver for S5P platform"
- depends on (PLAT_S5P || ARCH_EXYNOS) && PM_RUNTIME
+ depends on PM_RUNTIME
+ depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
default n
---help---
Say Y here to enable selecting the TV output devices for
@@ -70,6 +71,7 @@ config VIDEO_SAMSUNG_S5P_MIXER
tristate "Samsung Mixer and Video Processor Driver"
depends on VIDEO_DEV && VIDEO_V4L2
depends on VIDEO_SAMSUNG_S5P_TV
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
help
Say Y here if you want support for the Mixer in Samsung S5P SoCs.
diff --git a/drivers/media/platform/s5p-tv/hdmi_drv.c b/drivers/media/platform/s5p-tv/hdmi_drv.c
index 754740f4b671..37c8bd694c5f 100644
--- a/drivers/media/platform/s5p-tv/hdmi_drv.c
+++ b/drivers/media/platform/s5p-tv/hdmi_drv.c
@@ -615,7 +615,7 @@ static int hdmi_s_power(struct v4l2_subdev *sd, int on)
else
ret = pm_runtime_put_sync(hdev->dev);
/* only values < 0 indicate errors */
- return IS_ERR_VALUE(ret) ? ret : 0;
+ return ret < 0 ? ret : 0;
}
static int hdmi_s_dv_timings(struct v4l2_subdev *sd,
diff --git a/drivers/media/platform/s5p-tv/sdo_drv.c b/drivers/media/platform/s5p-tv/sdo_drv.c
index 5a7c3796f22e..72cf892dd008 100644
--- a/drivers/media/platform/s5p-tv/sdo_drv.c
+++ b/drivers/media/platform/s5p-tv/sdo_drv.c
@@ -190,7 +190,7 @@ static int sdo_s_power(struct v4l2_subdev *sd, int on)
ret = pm_runtime_put_sync(dev);
/* only values < 0 indicate errors */
- return IS_ERR_VALUE(ret) ? ret : 0;
+ return ret < 0 ? ret : 0;
}
static int sdo_streamon(struct sdo_device *sdev)
diff --git a/drivers/media/platform/s5p-tv/sii9234_drv.c b/drivers/media/platform/s5p-tv/sii9234_drv.c
index 3dd762e5b67e..db8c17bb4aaa 100644
--- a/drivers/media/platform/s5p-tv/sii9234_drv.c
+++ b/drivers/media/platform/s5p-tv/sii9234_drv.c
@@ -289,7 +289,7 @@ static int sii9234_s_power(struct v4l2_subdev *sd, int on)
else
ret = pm_runtime_put(&ctx->client->dev);
/* only values < 0 indicate errors */
- return IS_ERR_VALUE(ret) ? ret : 0;
+ return ret < 0 ? ret : 0;
}
static int sii9234_s_stream(struct v4l2_subdev *sd, int enable)
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index 8dc279d4d561..be3b3bc71a0f 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -26,6 +26,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-image-sizes.h>
#include <media/videobuf2-dma-contig.h>
#define VEU_STR 0x00 /* start register */
@@ -135,9 +136,6 @@ enum sh_veu_fmt_idx {
SH_VEU_FMT_RGB24,
};
-#define VGA_WIDTH 640
-#define VGA_HEIGHT 480
-
#define DEFAULT_IN_WIDTH VGA_WIDTH
#define DEFAULT_IN_HEIGHT VGA_HEIGHT
#define DEFAULT_IN_FMTIDX SH_VEU_FMT_NV12
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index 6540847f4e1d..f2776cd415ca 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -20,6 +20,8 @@ config SOC_CAMERA_PLATFORM
config VIDEO_MX3
tristate "i.MX3x Camera Sensor Interface driver"
depends on VIDEO_DEV && MX3_IPU && SOC_CAMERA
+ depends on MX3_IPU || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
---help---
This is a v4l2 driver for the i.MX3x Camera Sensor Interface
@@ -35,6 +37,7 @@ config VIDEO_RCAR_VIN
tristate "R-Car Video Input (VIN) support"
depends on VIDEO_DEV && SOC_CAMERA
depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select SOC_CAMERA_SCALE_CROP
---help---
@@ -51,6 +54,7 @@ config VIDEO_SH_MOBILE_CEU
tristate "SuperH Mobile CEU Interface driver"
depends on VIDEO_DEV && SOC_CAMERA && HAS_DMA && HAVE_CLK
depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select SOC_CAMERA_SCALE_CROP
---help---
@@ -58,7 +62,9 @@ config VIDEO_SH_MOBILE_CEU
config VIDEO_OMAP1
tristate "OMAP1 Camera Interface driver"
- depends on VIDEO_DEV && ARCH_OMAP1 && SOC_CAMERA
+ depends on VIDEO_DEV && SOC_CAMERA
+ depends on ARCH_OMAP1
+ depends on HAS_DMA
select VIDEOBUF_DMA_CONTIG
select VIDEOBUF_DMA_SG
---help---
@@ -66,14 +72,18 @@ config VIDEO_OMAP1
config VIDEO_MX2
tristate "i.MX27 Camera Sensor Interface driver"
- depends on VIDEO_DEV && SOC_CAMERA && SOC_IMX27
+ depends on VIDEO_DEV && SOC_CAMERA
+ depends on SOC_IMX27 || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
---help---
This is a v4l2 driver for the i.MX27 Camera Sensor Interface
config VIDEO_ATMEL_ISI
tristate "ATMEL Image Sensor Interface (ISI) support"
- depends on VIDEO_DEV && SOC_CAMERA && ARCH_AT91
+ depends on VIDEO_DEV && SOC_CAMERA
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
---help---
This module makes the ATMEL Image Sensor Interface available
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 3408b045b3f1..c5291b001057 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -54,7 +54,7 @@ static void set_dma_ctrl(struct fbd *fb_desc, u32 ctrl)
struct isi_dma_desc {
struct list_head list;
struct fbd *p_fbd;
- u32 fbd_phys;
+ dma_addr_t fbd_phys;
};
/* Frame buffer data */
@@ -75,7 +75,7 @@ struct atmel_isi {
/* Allocate descriptors for dma buffer use */
struct fbd *p_fb_descriptors;
- u32 fb_descriptors_phys;
+ dma_addr_t fb_descriptors_phys;
struct list_head dma_desc_head;
struct isi_dma_desc dma_desc[MAX_BUFFER_NUM];
@@ -169,7 +169,7 @@ static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi)
isi->active = list_entry(isi->video_buffer_list.next,
struct frame_buffer, list);
isi_writel(isi, ISI_DMA_C_DSCR,
- isi->active->p_dma_desc->fbd_phys);
+ (u32)isi->active->p_dma_desc->fbd_phys);
isi_writel(isi, ISI_DMA_C_CTRL,
ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
@@ -346,7 +346,7 @@ static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer)
return;
}
- isi_writel(isi, ISI_DMA_C_DSCR, buffer->p_dma_desc->fbd_phys);
+ isi_writel(isi, ISI_DMA_C_DSCR, (u32)buffer->p_dma_desc->fbd_phys);
isi_writel(isi, ISI_DMA_C_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
@@ -384,7 +384,6 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct atmel_isi *isi = ici->priv;
- u32 sr = 0;
int ret;
/* Reset ISI */
@@ -394,11 +393,11 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
return ret;
}
/* Disable all interrupts */
- isi_writel(isi, ISI_INTDIS, ~0UL);
+ isi_writel(isi, ISI_INTDIS, (u32)~0UL);
spin_lock_irq(&isi->lock);
/* Clear any pending interrupt */
- sr = isi_readl(isi, ISI_STATUS);
+ isi_readl(isi, ISI_STATUS);
if (count)
start_dma(isi, isi->active);
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index b40bc2e5ba47..2347612a4cc1 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -809,10 +809,9 @@ static int mx2_camera_init_videobuf(struct vb2_queue *q,
static int mx27_camera_emma_prp_reset(struct mx2_camera_dev *pcdev)
{
- u32 cntl;
int count = 0;
- cntl = readl(pcdev->base_emma + PRP_CNTL);
+ readl(pcdev->base_emma + PRP_CNTL);
writel(PRP_CNTL_SWRST, pcdev->base_emma + PRP_CNTL);
while (count++ < 100) {
if (!(readl(pcdev->base_emma + PRP_CNTL) & PRP_CNTL_SWRST))
@@ -1003,7 +1002,7 @@ static int mx2_emmaprp_resize(struct mx2_camera_dev *pcdev,
struct v4l2_mbus_framefmt *mf_in,
struct v4l2_pix_format *pix_out, bool apply)
{
- int num, den;
+ unsigned int num, den;
unsigned long m;
int i, dir;
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
index 64dc80ccd6f9..66178fc9f9eb 100644
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ b/drivers/media/platform/soc_camera/pxa_camera.c
@@ -1694,7 +1694,7 @@ static int pxa_camera_pdata_from_dt(struct device *dev,
break;
default:
break;
- };
+ }
if (ep.bus.parallel.flags & V4L2_MBUS_MASTER)
pcdev->platform_flags |= PXA_CAMERA_MASTER;
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 85d579f65f52..20defcb8b31b 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -981,7 +981,7 @@ static int rcar_vin_get_formats(struct soc_camera_device *icd, unsigned int idx,
if (shift == 3) {
dev_err(dev,
- "Failed to configure the client below %ux%x\n",
+ "Failed to configure the client below %ux%u\n",
mf.width, mf.height);
return -EIO;
}
@@ -1502,7 +1502,7 @@ static int rcar_vin_probe(struct platform_device *pdev)
} else {
priv->ici.nr = of_alias_get_id(pdev->dev.of_node, "vin");
priv->chip = (enum chip_id)match->data;
- };
+ }
spin_lock_init(&priv->lock);
INIT_LIST_HEAD(&priv->capture);
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index f4308fed5431..8e61b976da19 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -437,6 +437,22 @@ static int soc_camera_prepare_buf(struct file *file, void *priv,
return vb2_prepare_buf(&icd->vb2_vidq, b);
}
+static int soc_camera_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *p)
+{
+ struct soc_camera_device *icd = file->private_data;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
+ if (icd->streamer != file)
+ return -EBUSY;
+
+ /* videobuf2 only */
+ if (ici->ops->init_videobuf)
+ return -EINVAL;
+ else
+ return vb2_expbuf(&icd->vb2_vidq, p);
+}
+
/* Always entered with .host_lock held */
static int soc_camera_init_user_formats(struct soc_camera_device *icd)
{
@@ -1347,13 +1363,11 @@ static int soc_camera_i2c_init(struct soc_camera_device *icd,
return -ENODEV;
}
- ssdd = kzalloc(sizeof(*ssdd), GFP_KERNEL);
+ ssdd = kmemdup(&sdesc->subdev_desc, sizeof(*ssdd), GFP_KERNEL);
if (!ssdd) {
ret = -ENOMEM;
goto ealloc;
}
-
- memcpy(ssdd, &sdesc->subdev_desc, sizeof(*ssdd));
/*
* In synchronous case we request regulators ourselves in
* soc_camera_pdrv_probe(), make sure the subdevice driver doesn't try
@@ -2085,6 +2099,7 @@ static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = {
.vidioc_dqbuf = soc_camera_dqbuf,
.vidioc_create_bufs = soc_camera_create_bufs,
.vidioc_prepare_buf = soc_camera_prepare_buf,
+ .vidioc_expbuf = soc_camera_expbuf,
.vidioc_streamon = soc_camera_streamon,
.vidioc_streamoff = soc_camera_streamoff,
.vidioc_cropcap = soc_camera_cropcap,
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
index a51a01359805..3e2e3a33e6ed 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -329,7 +329,7 @@ int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size)
if (!buf->addr)
return -ENOMEM;
- WARN_ON((u32) buf->addr & VPDMA_DESC_ALIGN);
+ WARN_ON(((unsigned long)buf->addr & VPDMA_DESC_ALIGN) != 0);
return 0;
}
@@ -584,7 +584,7 @@ static void dump_dtd(struct vpdma_dtd *dtd)
pr_debug("word1: line_length = %d, xfer_height = %d\n",
dtd_get_line_length(dtd), dtd_get_xfer_height(dtd));
- pr_debug("word2: start_addr = 0x%08x\n", dtd->start_addr);
+ pr_debug("word2: start_addr = %pad\n", &dtd->start_addr);
pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, "
"pri = %d, next_chan = %d\n", dtd_get_pkt_type(dtd),
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 972f43f69206..9a081c291159 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/log2.h>
+#include <linux/sizes.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
@@ -138,12 +139,12 @@ struct vpe_dei_regs {
* default expert DEI register values, unlikely to be modified.
*/
static const struct vpe_dei_regs dei_regs = {
- 0x020C0804u,
- 0x0118100Fu,
- 0x08040200u,
- 0x1010100Cu,
- 0x10101010u,
- 0x10101010u,
+ .mdt_spacial_freq_thr_reg = 0x020C0804u,
+ .edi_config_reg = 0x0118100Fu,
+ .edi_lut_reg0 = 0x08040200u,
+ .edi_lut_reg1 = 0x1010100Cu,
+ .edi_lut_reg2 = 0x10101010u,
+ .edi_lut_reg3 = 0x10101010u,
};
/*
@@ -834,10 +835,10 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
VPDMA_STRIDE_ALIGN);
mv_buf_size = bytes_per_line * s_q_data->height;
- ctx->deinterlacing = 1;
+ ctx->deinterlacing = true;
src_h <<= 1;
} else {
- ctx->deinterlacing = 0;
+ ctx->deinterlacing = false;
mv_buf_size = 0;
}
@@ -2343,8 +2344,7 @@ v4l2_dev_unreg:
static int vpe_remove(struct platform_device *pdev)
{
- struct vpe_dev *dev =
- (struct vpe_dev *) platform_get_drvdata(pdev);
+ struct vpe_dev *dev = platform_get_drvdata(pdev);
v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME);
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index b4f9d03636e3..ae6870cb8339 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -18,6 +18,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-image-sizes.h>
#include <media/ov7670.h>
#include <media/videobuf-dma-sg.h>
#include <linux/delay.h>
@@ -49,14 +50,6 @@ MODULE_PARM_DESC(override_serial,
"to force-enable the camera.");
/*
- * Basic window sizes.
- */
-#define VGA_WIDTH 640
-#define VGA_HEIGHT 480
-#define QCIF_WIDTH 176
-#define QCIF_HEIGHT 144
-
-/*
* The structure describing our camera.
*/
enum viacam_opstate { S_IDLE = 0, S_RUNNING = 1 };
@@ -89,7 +82,7 @@ struct via_camera {
* live in frame buffer memory, so we don't call them "DMA".
*/
unsigned int cb_offsets[3]; /* offsets into fb mem */
- u8 *cb_addrs[3]; /* Kernel-space addresses */
+ u8 __iomem *cb_addrs[3]; /* Kernel-space addresses */
int n_cap_bufs; /* How many are we using? */
int next_buf;
struct videobuf_queue vb_queue;
@@ -1283,7 +1276,7 @@ static bool viacam_serial_is_enabled(void)
VIACAM_SERIAL_CREG, &cbyte);
if ((cbyte & VIACAM_SERIAL_BIT) == 0)
return false; /* Not enabled */
- if (override_serial == 0) {
+ if (!override_serial) {
printk(KERN_NOTICE "Via camera: serial port is enabled, " \
"refusing to load.\n");
printk(KERN_NOTICE "Specify override_serial=1 to force " \
diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
deleted file mode 100644
index 80333714ffa7..000000000000
--- a/drivers/media/platform/vivi.c
+++ /dev/null
@@ -1,1542 +0,0 @@
-/*
- * Virtual Video driver - This code emulates a real video device with v4l2 api
- *
- * Copyright (c) 2006 by:
- * Mauro Carvalho Chehab <mchehab--a.t--infradead.org>
- * Ted Walther <ted--a.t--enumera.com>
- * John Sokol <sokol--a.t--videotechnology.com>
- * http://v4l.videotechnology.com/
- *
- * Conversion to videobuf2 by Pawel Osciak & Marek Szyprowski
- * Copyright (c) 2010 Samsung Electronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the BSD Licence, GNU General Public License
- * as published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version
- */
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/font.h>
-#include <linux/mutex.h>
-#include <linux/videodev2.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
-#include <media/videobuf2-vmalloc.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-common.h>
-
-#define VIVI_MODULE_NAME "vivi"
-
-/* Maximum allowed frame rate
- *
- * Vivi will allow setting timeperframe in [1/FPS_MAX - FPS_MAX/1] range.
- *
- * Ideally FPS_MAX should be infinity, i.e. practically UINT_MAX, but that
- * might hit application errors when they manipulate these values.
- *
- * Besides, for tpf < 1ms image-generation logic should be changed, to avoid
- * producing frames with equal content.
- */
-#define FPS_MAX 1000
-
-#define MAX_WIDTH 1920
-#define MAX_HEIGHT 1200
-
-#define VIVI_VERSION "0.8.1"
-
-MODULE_DESCRIPTION("Video Technology Magazine Virtual Video Capture Board");
-MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(VIVI_VERSION);
-
-static unsigned video_nr = -1;
-module_param(video_nr, uint, 0644);
-MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
-
-static unsigned n_devs = 1;
-module_param(n_devs, uint, 0644);
-MODULE_PARM_DESC(n_devs, "number of video devices to create");
-
-static unsigned debug;
-module_param(debug, uint, 0644);
-MODULE_PARM_DESC(debug, "activates debug info");
-
-/* Global font descriptor */
-static const u8 *font8x16;
-
-/* timeperframe: min/max and default */
-static const struct v4l2_fract
- tpf_min = {.numerator = 1, .denominator = FPS_MAX},
- tpf_max = {.numerator = FPS_MAX, .denominator = 1},
- tpf_default = {.numerator = 1001, .denominator = 30000}; /* NTSC */
-
-#define dprintk(dev, level, fmt, arg...) \
- v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ## arg)
-
-/* ------------------------------------------------------------------
- Basic structures
- ------------------------------------------------------------------*/
-
-struct vivi_fmt {
- const char *name;
- u32 fourcc; /* v4l2 format id */
- u8 depth;
- bool is_yuv;
-};
-
-static const struct vivi_fmt formats[] = {
- {
- .name = "4:2:2, packed, YUYV",
- .fourcc = V4L2_PIX_FMT_YUYV,
- .depth = 16,
- .is_yuv = true,
- },
- {
- .name = "4:2:2, packed, UYVY",
- .fourcc = V4L2_PIX_FMT_UYVY,
- .depth = 16,
- .is_yuv = true,
- },
- {
- .name = "4:2:2, packed, YVYU",
- .fourcc = V4L2_PIX_FMT_YVYU,
- .depth = 16,
- .is_yuv = true,
- },
- {
- .name = "4:2:2, packed, VYUY",
- .fourcc = V4L2_PIX_FMT_VYUY,
- .depth = 16,
- .is_yuv = true,
- },
- {
- .name = "RGB565 (LE)",
- .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
- .depth = 16,
- },
- {
- .name = "RGB565 (BE)",
- .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
- .depth = 16,
- },
- {
- .name = "RGB555 (LE)",
- .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
- .depth = 16,
- },
- {
- .name = "RGB555 (BE)",
- .fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
- .depth = 16,
- },
- {
- .name = "RGB24 (LE)",
- .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
- .depth = 24,
- },
- {
- .name = "RGB24 (BE)",
- .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
- .depth = 24,
- },
- {
- .name = "RGB32 (LE)",
- .fourcc = V4L2_PIX_FMT_RGB32, /* argb */
- .depth = 32,
- },
- {
- .name = "RGB32 (BE)",
- .fourcc = V4L2_PIX_FMT_BGR32, /* bgra */
- .depth = 32,
- },
-};
-
-static const struct vivi_fmt *__get_format(u32 pixelformat)
-{
- const struct vivi_fmt *fmt;
- unsigned int k;
-
- for (k = 0; k < ARRAY_SIZE(formats); k++) {
- fmt = &formats[k];
- if (fmt->fourcc == pixelformat)
- break;
- }
-
- if (k == ARRAY_SIZE(formats))
- return NULL;
-
- return &formats[k];
-}
-
-static const struct vivi_fmt *get_format(struct v4l2_format *f)
-{
- return __get_format(f->fmt.pix.pixelformat);
-}
-
-/* buffer for one video frame */
-struct vivi_buffer {
- /* common v4l buffer stuff -- must be first */
- struct vb2_buffer vb;
- struct list_head list;
-};
-
-struct vivi_dmaqueue {
- struct list_head active;
-
- /* thread for generating video stream*/
- struct task_struct *kthread;
- wait_queue_head_t wq;
- /* Counters to control fps rate */
- int frame;
- int ini_jiffies;
-};
-
-static LIST_HEAD(vivi_devlist);
-
-struct vivi_dev {
- struct list_head vivi_devlist;
- struct v4l2_device v4l2_dev;
- struct v4l2_ctrl_handler ctrl_handler;
- struct video_device vdev;
-
- /* controls */
- struct v4l2_ctrl *brightness;
- struct v4l2_ctrl *contrast;
- struct v4l2_ctrl *saturation;
- struct v4l2_ctrl *hue;
- struct {
- /* autogain/gain cluster */
- struct v4l2_ctrl *autogain;
- struct v4l2_ctrl *gain;
- };
- struct v4l2_ctrl *volume;
- struct v4l2_ctrl *alpha;
- struct v4l2_ctrl *button;
- struct v4l2_ctrl *boolean;
- struct v4l2_ctrl *int32;
- struct v4l2_ctrl *int64;
- struct v4l2_ctrl *menu;
- struct v4l2_ctrl *string;
- struct v4l2_ctrl *bitmask;
- struct v4l2_ctrl *int_menu;
-
- spinlock_t slock;
- struct mutex mutex;
-
- struct vivi_dmaqueue vidq;
-
- /* Several counters */
- unsigned ms;
- unsigned long jiffies;
- unsigned button_pressed;
-
- int mv_count; /* Controls bars movement */
-
- /* Input Number */
- int input;
-
- /* video capture */
- const struct vivi_fmt *fmt;
- struct v4l2_fract timeperframe;
- unsigned int width, height;
- struct vb2_queue vb_vidq;
- unsigned int seq_count;
-
- u8 bars[9][3];
- u8 line[MAX_WIDTH * 8] __attribute__((__aligned__(4)));
- unsigned int pixelsize;
- u8 alpha_component;
- u32 textfg, textbg;
-};
-
-/* ------------------------------------------------------------------
- DMA and thread functions
- ------------------------------------------------------------------*/
-
-/* Bars and Colors should match positions */
-
-enum colors {
- WHITE,
- AMBER,
- CYAN,
- GREEN,
- MAGENTA,
- RED,
- BLUE,
- BLACK,
- TEXT_BLACK,
-};
-
-/* R G B */
-#define COLOR_WHITE {204, 204, 204}
-#define COLOR_AMBER {208, 208, 0}
-#define COLOR_CYAN { 0, 206, 206}
-#define COLOR_GREEN { 0, 239, 0}
-#define COLOR_MAGENTA {239, 0, 239}
-#define COLOR_RED {205, 0, 0}
-#define COLOR_BLUE { 0, 0, 255}
-#define COLOR_BLACK { 0, 0, 0}
-
-struct bar_std {
- u8 bar[9][3];
-};
-
-/* Maximum number of bars are 10 - otherwise, the input print code
- should be modified */
-static const struct bar_std bars[] = {
- { /* Standard ITU-R color bar sequence */
- { COLOR_WHITE, COLOR_AMBER, COLOR_CYAN, COLOR_GREEN,
- COLOR_MAGENTA, COLOR_RED, COLOR_BLUE, COLOR_BLACK, COLOR_BLACK }
- }, {
- { COLOR_WHITE, COLOR_AMBER, COLOR_BLACK, COLOR_WHITE,
- COLOR_AMBER, COLOR_BLACK, COLOR_WHITE, COLOR_AMBER, COLOR_BLACK }
- }, {
- { COLOR_WHITE, COLOR_CYAN, COLOR_BLACK, COLOR_WHITE,
- COLOR_CYAN, COLOR_BLACK, COLOR_WHITE, COLOR_CYAN, COLOR_BLACK }
- }, {
- { COLOR_WHITE, COLOR_GREEN, COLOR_BLACK, COLOR_WHITE,
- COLOR_GREEN, COLOR_BLACK, COLOR_WHITE, COLOR_GREEN, COLOR_BLACK }
- },
-};
-
-#define NUM_INPUTS ARRAY_SIZE(bars)
-
-#define TO_Y(r, g, b) \
- (((16829 * r + 33039 * g + 6416 * b + 32768) >> 16) + 16)
-/* RGB to V(Cr) Color transform */
-#define TO_V(r, g, b) \
- (((28784 * r - 24103 * g - 4681 * b + 32768) >> 16) + 128)
-/* RGB to U(Cb) Color transform */
-#define TO_U(r, g, b) \
- (((-9714 * r - 19070 * g + 28784 * b + 32768) >> 16) + 128)
-
-/* precalculate color bar values to speed up rendering */
-static void precalculate_bars(struct vivi_dev *dev)
-{
- u8 r, g, b;
- int k, is_yuv;
-
- for (k = 0; k < 9; k++) {
- r = bars[dev->input].bar[k][0];
- g = bars[dev->input].bar[k][1];
- b = bars[dev->input].bar[k][2];
- is_yuv = dev->fmt->is_yuv;
-
- switch (dev->fmt->fourcc) {
- case V4L2_PIX_FMT_RGB565:
- case V4L2_PIX_FMT_RGB565X:
- r >>= 3;
- g >>= 2;
- b >>= 3;
- break;
- case V4L2_PIX_FMT_RGB555:
- case V4L2_PIX_FMT_RGB555X:
- r >>= 3;
- g >>= 3;
- b >>= 3;
- break;
- case V4L2_PIX_FMT_YUYV:
- case V4L2_PIX_FMT_UYVY:
- case V4L2_PIX_FMT_YVYU:
- case V4L2_PIX_FMT_VYUY:
- case V4L2_PIX_FMT_RGB24:
- case V4L2_PIX_FMT_BGR24:
- case V4L2_PIX_FMT_RGB32:
- case V4L2_PIX_FMT_BGR32:
- break;
- }
-
- if (is_yuv) {
- dev->bars[k][0] = TO_Y(r, g, b); /* Luma */
- dev->bars[k][1] = TO_U(r, g, b); /* Cb */
- dev->bars[k][2] = TO_V(r, g, b); /* Cr */
- } else {
- dev->bars[k][0] = r;
- dev->bars[k][1] = g;
- dev->bars[k][2] = b;
- }
- }
-}
-
-/* 'odd' is true for pixels 1, 3, 5, etc. and false for pixels 0, 2, 4, etc. */
-static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos, bool odd)
-{
- u8 r_y, g_u, b_v;
- u8 alpha = dev->alpha_component;
- int color;
- u8 *p;
-
- r_y = dev->bars[colorpos][0]; /* R or precalculated Y */
- g_u = dev->bars[colorpos][1]; /* G or precalculated U */
- b_v = dev->bars[colorpos][2]; /* B or precalculated V */
-
- for (color = 0; color < dev->pixelsize; color++) {
- p = buf + color;
-
- switch (dev->fmt->fourcc) {
- case V4L2_PIX_FMT_YUYV:
- switch (color) {
- case 0:
- *p = r_y;
- break;
- case 1:
- *p = odd ? b_v : g_u;
- break;
- }
- break;
- case V4L2_PIX_FMT_UYVY:
- switch (color) {
- case 0:
- *p = odd ? b_v : g_u;
- break;
- case 1:
- *p = r_y;
- break;
- }
- break;
- case V4L2_PIX_FMT_YVYU:
- switch (color) {
- case 0:
- *p = r_y;
- break;
- case 1:
- *p = odd ? g_u : b_v;
- break;
- }
- break;
- case V4L2_PIX_FMT_VYUY:
- switch (color) {
- case 0:
- *p = odd ? g_u : b_v;
- break;
- case 1:
- *p = r_y;
- break;
- }
- break;
- case V4L2_PIX_FMT_RGB565:
- switch (color) {
- case 0:
- *p = (g_u << 5) | b_v;
- break;
- case 1:
- *p = (r_y << 3) | (g_u >> 3);
- break;
- }
- break;
- case V4L2_PIX_FMT_RGB565X:
- switch (color) {
- case 0:
- *p = (r_y << 3) | (g_u >> 3);
- break;
- case 1:
- *p = (g_u << 5) | b_v;
- break;
- }
- break;
- case V4L2_PIX_FMT_RGB555:
- switch (color) {
- case 0:
- *p = (g_u << 5) | b_v;
- break;
- case 1:
- *p = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
- break;
- }
- break;
- case V4L2_PIX_FMT_RGB555X:
- switch (color) {
- case 0:
- *p = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
- break;
- case 1:
- *p = (g_u << 5) | b_v;
- break;
- }
- break;
- case V4L2_PIX_FMT_RGB24:
- switch (color) {
- case 0:
- *p = r_y;
- break;
- case 1:
- *p = g_u;
- break;
- case 2:
- *p = b_v;
- break;
- }
- break;
- case V4L2_PIX_FMT_BGR24:
- switch (color) {
- case 0:
- *p = b_v;
- break;
- case 1:
- *p = g_u;
- break;
- case 2:
- *p = r_y;
- break;
- }
- break;
- case V4L2_PIX_FMT_RGB32:
- switch (color) {
- case 0:
- *p = alpha;
- break;
- case 1:
- *p = r_y;
- break;
- case 2:
- *p = g_u;
- break;
- case 3:
- *p = b_v;
- break;
- }
- break;
- case V4L2_PIX_FMT_BGR32:
- switch (color) {
- case 0:
- *p = b_v;
- break;
- case 1:
- *p = g_u;
- break;
- case 2:
- *p = r_y;
- break;
- case 3:
- *p = alpha;
- break;
- }
- break;
- }
- }
-}
-
-static void precalculate_line(struct vivi_dev *dev)
-{
- unsigned pixsize = dev->pixelsize;
- unsigned pixsize2 = 2*pixsize;
- int colorpos;
- u8 *pos;
-
- for (colorpos = 0; colorpos < 16; ++colorpos) {
- u8 pix[8];
- int wstart = colorpos * dev->width / 8;
- int wend = (colorpos+1) * dev->width / 8;
- int w;
-
- gen_twopix(dev, &pix[0], colorpos % 8, 0);
- gen_twopix(dev, &pix[pixsize], colorpos % 8, 1);
-
- for (w = wstart/2*2, pos = dev->line + w*pixsize; w < wend; w += 2, pos += pixsize2)
- memcpy(pos, pix, pixsize2);
- }
-}
-
-/* need this to do rgb24 rendering */
-typedef struct { u16 __; u8 _; } __attribute__((packed)) x24;
-
-static void gen_text(struct vivi_dev *dev, char *basep,
- int y, int x, char *text)
-{
- int line;
- unsigned int width = dev->width;
-
- /* Checks if it is possible to show string */
- if (y + 16 >= dev->height || x + strlen(text) * 8 >= width)
- return;
-
- /* Print stream time */
-#define PRINTSTR(PIXTYPE) do { \
- PIXTYPE fg; \
- PIXTYPE bg; \
- memcpy(&fg, &dev->textfg, sizeof(PIXTYPE)); \
- memcpy(&bg, &dev->textbg, sizeof(PIXTYPE)); \
- \
- for (line = 0; line < 16; line++) { \
- PIXTYPE *pos = (PIXTYPE *)( basep + ((y + line) * width + x) * sizeof(PIXTYPE) ); \
- u8 *s; \
- \
- for (s = text; *s; s++) { \
- u8 chr = font8x16[*s * 16 + line]; \
- \
- pos[0] = (chr & (0x01 << 7) ? fg : bg); \
- pos[1] = (chr & (0x01 << 6) ? fg : bg); \
- pos[2] = (chr & (0x01 << 5) ? fg : bg); \
- pos[3] = (chr & (0x01 << 4) ? fg : bg); \
- pos[4] = (chr & (0x01 << 3) ? fg : bg); \
- pos[5] = (chr & (0x01 << 2) ? fg : bg); \
- pos[6] = (chr & (0x01 << 1) ? fg : bg); \
- pos[7] = (chr & (0x01 << 0) ? fg : bg); \
- \
- pos += 8; \
- } \
- } \
-} while (0)
-
- switch (dev->pixelsize) {
- case 2:
- PRINTSTR(u16); break;
- case 4:
- PRINTSTR(u32); break;
- case 3:
- PRINTSTR(x24); break;
- }
-}
-
-static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
-{
- int stride = dev->width * dev->pixelsize;
- int hmax = dev->height;
- void *vbuf = vb2_plane_vaddr(&buf->vb, 0);
- unsigned ms;
- char str[100];
- int h, line = 1;
- u8 *linestart;
- s32 gain;
-
- if (!vbuf)
- return;
-
- linestart = dev->line + (dev->mv_count % dev->width) * dev->pixelsize;
-
- for (h = 0; h < hmax; h++)
- memcpy(vbuf + h * stride, linestart, stride);
-
- /* Updates stream time */
-
- gen_twopix(dev, (u8 *)&dev->textbg, TEXT_BLACK, /*odd=*/ 0);
- gen_twopix(dev, (u8 *)&dev->textfg, WHITE, /*odd=*/ 0);
-
- dev->ms += jiffies_to_msecs(jiffies - dev->jiffies);
- dev->jiffies = jiffies;
- ms = dev->ms;
- snprintf(str, sizeof(str), " %02d:%02d:%02d:%03d ",
- (ms / (60 * 60 * 1000)) % 24,
- (ms / (60 * 1000)) % 60,
- (ms / 1000) % 60,
- ms % 1000);
- gen_text(dev, vbuf, line++ * 16, 16, str);
- snprintf(str, sizeof(str), " %dx%d, input %d ",
- dev->width, dev->height, dev->input);
- gen_text(dev, vbuf, line++ * 16, 16, str);
-
- gain = v4l2_ctrl_g_ctrl(dev->gain);
- mutex_lock(dev->ctrl_handler.lock);
- snprintf(str, sizeof(str), " brightness %3d, contrast %3d, saturation %3d, hue %d ",
- dev->brightness->cur.val,
- dev->contrast->cur.val,
- dev->saturation->cur.val,
- dev->hue->cur.val);
- gen_text(dev, vbuf, line++ * 16, 16, str);
- snprintf(str, sizeof(str), " autogain %d, gain %3d, volume %3d, alpha 0x%02x ",
- dev->autogain->cur.val, gain, dev->volume->cur.val,
- dev->alpha->cur.val);
- gen_text(dev, vbuf, line++ * 16, 16, str);
- snprintf(str, sizeof(str), " int32 %d, int64 %lld, bitmask %08x ",
- dev->int32->cur.val,
- *dev->int64->p_cur.p_s64,
- dev->bitmask->cur.val);
- gen_text(dev, vbuf, line++ * 16, 16, str);
- snprintf(str, sizeof(str), " boolean %d, menu %s, string \"%s\" ",
- dev->boolean->cur.val,
- dev->menu->qmenu[dev->menu->cur.val],
- dev->string->p_cur.p_char);
- gen_text(dev, vbuf, line++ * 16, 16, str);
- snprintf(str, sizeof(str), " integer_menu %lld, value %d ",
- dev->int_menu->qmenu_int[dev->int_menu->cur.val],
- dev->int_menu->cur.val);
- gen_text(dev, vbuf, line++ * 16, 16, str);
- mutex_unlock(dev->ctrl_handler.lock);
- if (dev->button_pressed) {
- dev->button_pressed--;
- snprintf(str, sizeof(str), " button pressed!");
- gen_text(dev, vbuf, line++ * 16, 16, str);
- }
-
- dev->mv_count += 2;
-
- buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
- buf->vb.v4l2_buf.sequence = dev->seq_count++;
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
-}
-
-static void vivi_thread_tick(struct vivi_dev *dev)
-{
- struct vivi_dmaqueue *dma_q = &dev->vidq;
- struct vivi_buffer *buf;
- unsigned long flags = 0;
-
- dprintk(dev, 1, "Thread tick\n");
-
- spin_lock_irqsave(&dev->slock, flags);
- if (list_empty(&dma_q->active)) {
- dprintk(dev, 1, "No active queue to serve\n");
- spin_unlock_irqrestore(&dev->slock, flags);
- return;
- }
-
- buf = list_entry(dma_q->active.next, struct vivi_buffer, list);
- list_del(&buf->list);
- spin_unlock_irqrestore(&dev->slock, flags);
-
- v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
-
- /* Fill buffer */
- vivi_fillbuff(dev, buf);
- dprintk(dev, 1, "filled buffer %p\n", buf);
-
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
- dprintk(dev, 2, "[%p/%d] done\n", buf, buf->vb.v4l2_buf.index);
-}
-
-#define frames_to_ms(dev, frames) \
- ((frames * dev->timeperframe.numerator * 1000) / dev->timeperframe.denominator)
-
-static void vivi_sleep(struct vivi_dev *dev)
-{
- struct vivi_dmaqueue *dma_q = &dev->vidq;
- int timeout;
- DECLARE_WAITQUEUE(wait, current);
-
- dprintk(dev, 1, "%s dma_q=0x%08lx\n", __func__,
- (unsigned long)dma_q);
-
- add_wait_queue(&dma_q->wq, &wait);
- if (kthread_should_stop())
- goto stop_task;
-
- /* Calculate time to wake up */
- timeout = msecs_to_jiffies(frames_to_ms(dev, 1));
-
- vivi_thread_tick(dev);
-
- schedule_timeout_interruptible(timeout);
-
-stop_task:
- remove_wait_queue(&dma_q->wq, &wait);
- try_to_freeze();
-}
-
-static int vivi_thread(void *data)
-{
- struct vivi_dev *dev = data;
-
- dprintk(dev, 1, "thread started\n");
-
- set_freezable();
-
- for (;;) {
- vivi_sleep(dev);
-
- if (kthread_should_stop())
- break;
- }
- dprintk(dev, 1, "thread: exit\n");
- return 0;
-}
-
-static int vivi_start_generating(struct vivi_dev *dev)
-{
- struct vivi_dmaqueue *dma_q = &dev->vidq;
-
- dprintk(dev, 1, "%s\n", __func__);
-
- /* Resets frame counters */
- dev->ms = 0;
- dev->mv_count = 0;
- dev->jiffies = jiffies;
-
- dma_q->frame = 0;
- dma_q->ini_jiffies = jiffies;
- dma_q->kthread = kthread_run(vivi_thread, dev, "%s",
- dev->v4l2_dev.name);
-
- if (IS_ERR(dma_q->kthread)) {
- v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
- return PTR_ERR(dma_q->kthread);
- }
- /* Wakes thread */
- wake_up_interruptible(&dma_q->wq);
-
- dprintk(dev, 1, "returning from %s\n", __func__);
- return 0;
-}
-
-static void vivi_stop_generating(struct vivi_dev *dev)
-{
- struct vivi_dmaqueue *dma_q = &dev->vidq;
-
- dprintk(dev, 1, "%s\n", __func__);
-
- /* shutdown control thread */
- if (dma_q->kthread) {
- kthread_stop(dma_q->kthread);
- dma_q->kthread = NULL;
- }
-
- /*
- * Typical driver might need to wait here until dma engine stops.
- * In this case we can abort imiedetly, so it's just a noop.
- */
-
- /* Release all active buffers */
- while (!list_empty(&dma_q->active)) {
- struct vivi_buffer *buf;
- buf = list_entry(dma_q->active.next, struct vivi_buffer, list);
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
- dprintk(dev, 2, "[%p/%d] done\n", buf, buf->vb.v4l2_buf.index);
- }
-}
-/* ------------------------------------------------------------------
- Videobuf operations
- ------------------------------------------------------------------*/
-static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
- unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
-{
- struct vivi_dev *dev = vb2_get_drv_priv(vq);
- unsigned long size;
-
- size = dev->width * dev->height * dev->pixelsize;
- if (fmt) {
- if (fmt->fmt.pix.sizeimage < size)
- return -EINVAL;
- size = fmt->fmt.pix.sizeimage;
- /* check against insane over 8K resolution buffers */
- if (size > 7680 * 4320 * dev->pixelsize)
- return -EINVAL;
- }
-
- *nplanes = 1;
-
- sizes[0] = size;
-
- /*
- * videobuf2-vmalloc allocator is context-less so no need to set
- * alloc_ctxs array.
- */
-
- dprintk(dev, 1, "%s, count=%d, size=%ld\n", __func__,
- *nbuffers, size);
-
- return 0;
-}
-
-static int buffer_prepare(struct vb2_buffer *vb)
-{
- struct vivi_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
- unsigned long size;
-
- dprintk(dev, 1, "%s, field=%d\n", __func__, vb->v4l2_buf.field);
-
- BUG_ON(NULL == dev->fmt);
-
- /*
- * Theses properties only change when queue is idle, see s_fmt.
- * The below checks should not be performed here, on each
- * buffer_prepare (i.e. on each qbuf). Most of the code in this function
- * should thus be moved to buffer_init and s_fmt.
- */
- if (dev->width < 48 || dev->width > MAX_WIDTH ||
- dev->height < 32 || dev->height > MAX_HEIGHT)
- return -EINVAL;
-
- size = dev->width * dev->height * dev->pixelsize;
- if (vb2_plane_size(vb, 0) < size) {
- dprintk(dev, 1, "%s data will not fit into plane (%lu < %lu)\n",
- __func__, vb2_plane_size(vb, 0), size);
- return -EINVAL;
- }
-
- vb2_set_plane_payload(&buf->vb, 0, size);
-
- precalculate_bars(dev);
- precalculate_line(dev);
-
- return 0;
-}
-
-static void buffer_queue(struct vb2_buffer *vb)
-{
- struct vivi_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- struct vivi_buffer *buf = container_of(vb, struct vivi_buffer, vb);
- struct vivi_dmaqueue *vidq = &dev->vidq;
- unsigned long flags = 0;
-
- dprintk(dev, 1, "%s\n", __func__);
-
- spin_lock_irqsave(&dev->slock, flags);
- list_add_tail(&buf->list, &vidq->active);
- spin_unlock_irqrestore(&dev->slock, flags);
-}
-
-static int start_streaming(struct vb2_queue *vq, unsigned int count)
-{
- struct vivi_dev *dev = vb2_get_drv_priv(vq);
- int err;
-
- dprintk(dev, 1, "%s\n", __func__);
- dev->seq_count = 0;
- err = vivi_start_generating(dev);
- if (err) {
- struct vivi_buffer *buf, *tmp;
-
- list_for_each_entry_safe(buf, tmp, &dev->vidq.active, list) {
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
- }
- }
- return err;
-}
-
-/* abort streaming and wait for last buffer */
-static void stop_streaming(struct vb2_queue *vq)
-{
- struct vivi_dev *dev = vb2_get_drv_priv(vq);
- dprintk(dev, 1, "%s\n", __func__);
- vivi_stop_generating(dev);
-}
-
-static void vivi_lock(struct vb2_queue *vq)
-{
- struct vivi_dev *dev = vb2_get_drv_priv(vq);
- mutex_lock(&dev->mutex);
-}
-
-static void vivi_unlock(struct vb2_queue *vq)
-{
- struct vivi_dev *dev = vb2_get_drv_priv(vq);
- mutex_unlock(&dev->mutex);
-}
-
-
-static const struct vb2_ops vivi_video_qops = {
- .queue_setup = queue_setup,
- .buf_prepare = buffer_prepare,
- .buf_queue = buffer_queue,
- .start_streaming = start_streaming,
- .stop_streaming = stop_streaming,
- .wait_prepare = vivi_unlock,
- .wait_finish = vivi_lock,
-};
-
-/* ------------------------------------------------------------------
- IOCTL vidioc handling
- ------------------------------------------------------------------*/
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct vivi_dev *dev = video_drvdata(file);
-
- strcpy(cap->driver, "vivi");
- strcpy(cap->card, "vivi");
- snprintf(cap->bus_info, sizeof(cap->bus_info),
- "platform:%s", dev->v4l2_dev.name);
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
- return 0;
-}
-
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- const struct vivi_fmt *fmt;
-
- if (f->index >= ARRAY_SIZE(formats))
- return -EINVAL;
-
- fmt = &formats[f->index];
-
- strlcpy(f->description, fmt->name, sizeof(f->description));
- f->pixelformat = fmt->fourcc;
- return 0;
-}
-
-static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct vivi_dev *dev = video_drvdata(file);
-
- f->fmt.pix.width = dev->width;
- f->fmt.pix.height = dev->height;
- f->fmt.pix.field = V4L2_FIELD_INTERLACED;
- f->fmt.pix.pixelformat = dev->fmt->fourcc;
- f->fmt.pix.bytesperline =
- (f->fmt.pix.width * dev->fmt->depth) >> 3;
- f->fmt.pix.sizeimage =
- f->fmt.pix.height * f->fmt.pix.bytesperline;
- if (dev->fmt->is_yuv)
- f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- else
- f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
- return 0;
-}
-
-static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct vivi_dev *dev = video_drvdata(file);
- const struct vivi_fmt *fmt;
-
- fmt = get_format(f);
- if (!fmt) {
- dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n",
- f->fmt.pix.pixelformat);
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
- fmt = get_format(f);
- }
-
- f->fmt.pix.field = V4L2_FIELD_INTERLACED;
- v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
- &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
- f->fmt.pix.bytesperline =
- (f->fmt.pix.width * fmt->depth) >> 3;
- f->fmt.pix.sizeimage =
- f->fmt.pix.height * f->fmt.pix.bytesperline;
- if (fmt->is_yuv)
- f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- else
- f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
- return 0;
-}
-
-static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct vivi_dev *dev = video_drvdata(file);
- struct vb2_queue *q = &dev->vb_vidq;
-
- int ret = vidioc_try_fmt_vid_cap(file, priv, f);
- if (ret < 0)
- return ret;
-
- if (vb2_is_busy(q)) {
- dprintk(dev, 1, "%s device busy\n", __func__);
- return -EBUSY;
- }
-
- dev->fmt = get_format(f);
- dev->pixelsize = dev->fmt->depth / 8;
- dev->width = f->fmt.pix.width;
- dev->height = f->fmt.pix.height;
-
- return 0;
-}
-
-static int vidioc_enum_framesizes(struct file *file, void *fh,
- struct v4l2_frmsizeenum *fsize)
-{
- static const struct v4l2_frmsize_stepwise sizes = {
- 48, MAX_WIDTH, 4,
- 32, MAX_HEIGHT, 1
- };
- int i;
-
- if (fsize->index)
- return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(formats); i++)
- if (formats[i].fourcc == fsize->pixel_format)
- break;
- if (i == ARRAY_SIZE(formats))
- return -EINVAL;
- fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise = sizes;
- return 0;
-}
-
-/* only one input in this sample driver */
-static int vidioc_enum_input(struct file *file, void *priv,
- struct v4l2_input *inp)
-{
- if (inp->index >= NUM_INPUTS)
- return -EINVAL;
-
- inp->type = V4L2_INPUT_TYPE_CAMERA;
- sprintf(inp->name, "Camera %u", inp->index);
- return 0;
-}
-
-static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
-{
- struct vivi_dev *dev = video_drvdata(file);
-
- *i = dev->input;
- return 0;
-}
-
-static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
-{
- struct vivi_dev *dev = video_drvdata(file);
-
- if (i >= NUM_INPUTS)
- return -EINVAL;
-
- if (i == dev->input)
- return 0;
-
- dev->input = i;
- /*
- * Modify the brightness range depending on the input.
- * This makes it easy to use vivi to test if applications can
- * handle control range modifications and is also how this is
- * typically used in practice as different inputs may be hooked
- * up to different receivers with different control ranges.
- */
- v4l2_ctrl_modify_range(dev->brightness,
- 128 * i, 255 + 128 * i, 1, 127 + 128 * i);
- precalculate_bars(dev);
- precalculate_line(dev);
- return 0;
-}
-
-/* timeperframe is arbitrary and continuous */
-static int vidioc_enum_frameintervals(struct file *file, void *priv,
- struct v4l2_frmivalenum *fival)
-{
- const struct vivi_fmt *fmt;
-
- if (fival->index)
- return -EINVAL;
-
- fmt = __get_format(fival->pixel_format);
- if (!fmt)
- return -EINVAL;
-
- /* check for valid width/height */
- if (fival->width < 48 || fival->width > MAX_WIDTH || (fival->width & 3))
- return -EINVAL;
- if (fival->height < 32 || fival->height > MAX_HEIGHT)
- return -EINVAL;
-
- fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
-
- /* fill in stepwise (step=1.0 is required by V4L2 spec) */
- fival->stepwise.min = tpf_min;
- fival->stepwise.max = tpf_max;
- fival->stepwise.step = (struct v4l2_fract) {1, 1};
-
- return 0;
-}
-
-static int vidioc_g_parm(struct file *file, void *priv,
- struct v4l2_streamparm *parm)
-{
- struct vivi_dev *dev = video_drvdata(file);
-
- if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
- parm->parm.capture.timeperframe = dev->timeperframe;
- parm->parm.capture.readbuffers = 1;
- return 0;
-}
-
-#define FRACT_CMP(a, OP, b) \
- ((u64)(a).numerator * (b).denominator OP (u64)(b).numerator * (a).denominator)
-
-static int vidioc_s_parm(struct file *file, void *priv,
- struct v4l2_streamparm *parm)
-{
- struct vivi_dev *dev = video_drvdata(file);
- struct v4l2_fract tpf;
-
- if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- tpf = parm->parm.capture.timeperframe;
-
- /* tpf: {*, 0} resets timing; clip to [min, max]*/
- tpf = tpf.denominator ? tpf : tpf_default;
- tpf = FRACT_CMP(tpf, <, tpf_min) ? tpf_min : tpf;
- tpf = FRACT_CMP(tpf, >, tpf_max) ? tpf_max : tpf;
-
- dev->timeperframe = tpf;
- parm->parm.capture.timeperframe = tpf;
- parm->parm.capture.readbuffers = 1;
- return 0;
-}
-
-/* --- controls ---------------------------------------------- */
-
-static int vivi_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct vivi_dev *dev = container_of(ctrl->handler, struct vivi_dev, ctrl_handler);
-
- if (ctrl == dev->autogain)
- dev->gain->val = jiffies & 0xff;
- return 0;
-}
-
-static int vivi_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct vivi_dev *dev = container_of(ctrl->handler, struct vivi_dev, ctrl_handler);
-
- switch (ctrl->id) {
- case V4L2_CID_ALPHA_COMPONENT:
- dev->alpha_component = ctrl->val;
- break;
- default:
- if (ctrl == dev->button)
- dev->button_pressed = 30;
- break;
- }
- return 0;
-}
-
-/* ------------------------------------------------------------------
- File operations for the device
- ------------------------------------------------------------------*/
-
-static const struct v4l2_ctrl_ops vivi_ctrl_ops = {
- .g_volatile_ctrl = vivi_g_volatile_ctrl,
- .s_ctrl = vivi_s_ctrl,
-};
-
-#define VIVI_CID_CUSTOM_BASE (V4L2_CID_USER_BASE | 0xf000)
-
-static const struct v4l2_ctrl_config vivi_ctrl_button = {
- .ops = &vivi_ctrl_ops,
- .id = VIVI_CID_CUSTOM_BASE + 0,
- .name = "Button",
- .type = V4L2_CTRL_TYPE_BUTTON,
-};
-
-static const struct v4l2_ctrl_config vivi_ctrl_boolean = {
- .ops = &vivi_ctrl_ops,
- .id = VIVI_CID_CUSTOM_BASE + 1,
- .name = "Boolean",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 1,
-};
-
-static const struct v4l2_ctrl_config vivi_ctrl_int32 = {
- .ops = &vivi_ctrl_ops,
- .id = VIVI_CID_CUSTOM_BASE + 2,
- .name = "Integer 32 Bits",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = -0x80000000LL,
- .max = 0x7fffffff,
- .step = 1,
-};
-
-static const struct v4l2_ctrl_config vivi_ctrl_int64 = {
- .ops = &vivi_ctrl_ops,
- .id = VIVI_CID_CUSTOM_BASE + 3,
- .name = "Integer 64 Bits",
- .type = V4L2_CTRL_TYPE_INTEGER64,
- .min = LLONG_MIN,
- .max = LLONG_MAX,
- .step = 1,
-};
-
-static const char * const vivi_ctrl_menu_strings[] = {
- "Menu Item 0 (Skipped)",
- "Menu Item 1",
- "Menu Item 2 (Skipped)",
- "Menu Item 3",
- "Menu Item 4",
- "Menu Item 5 (Skipped)",
- NULL,
-};
-
-static const struct v4l2_ctrl_config vivi_ctrl_menu = {
- .ops = &vivi_ctrl_ops,
- .id = VIVI_CID_CUSTOM_BASE + 4,
- .name = "Menu",
- .type = V4L2_CTRL_TYPE_MENU,
- .min = 1,
- .max = 4,
- .def = 3,
- .menu_skip_mask = 0x04,
- .qmenu = vivi_ctrl_menu_strings,
-};
-
-static const struct v4l2_ctrl_config vivi_ctrl_string = {
- .ops = &vivi_ctrl_ops,
- .id = VIVI_CID_CUSTOM_BASE + 5,
- .name = "String",
- .type = V4L2_CTRL_TYPE_STRING,
- .min = 2,
- .max = 4,
- .step = 1,
-};
-
-static const struct v4l2_ctrl_config vivi_ctrl_bitmask = {
- .ops = &vivi_ctrl_ops,
- .id = VIVI_CID_CUSTOM_BASE + 6,
- .name = "Bitmask",
- .type = V4L2_CTRL_TYPE_BITMASK,
- .def = 0x80002000,
- .min = 0,
- .max = 0x80402010,
- .step = 0,
-};
-
-static const s64 vivi_ctrl_int_menu_values[] = {
- 1, 1, 2, 3, 5, 8, 13, 21, 42,
-};
-
-static const struct v4l2_ctrl_config vivi_ctrl_int_menu = {
- .ops = &vivi_ctrl_ops,
- .id = VIVI_CID_CUSTOM_BASE + 7,
- .name = "Integer menu",
- .type = V4L2_CTRL_TYPE_INTEGER_MENU,
- .min = 1,
- .max = 8,
- .def = 4,
- .menu_skip_mask = 0x02,
- .qmenu_int = vivi_ctrl_int_menu_values,
-};
-
-static const struct v4l2_file_operations vivi_fops = {
- .owner = THIS_MODULE,
- .open = v4l2_fh_open,
- .release = vb2_fop_release,
- .read = vb2_fop_read,
- .poll = vb2_fop_poll,
- .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
- .mmap = vb2_fop_mmap,
-};
-
-static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_enum_framesizes = vidioc_enum_framesizes,
- .vidioc_reqbufs = vb2_ioctl_reqbufs,
- .vidioc_create_bufs = vb2_ioctl_create_bufs,
- .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
- .vidioc_querybuf = vb2_ioctl_querybuf,
- .vidioc_qbuf = vb2_ioctl_qbuf,
- .vidioc_dqbuf = vb2_ioctl_dqbuf,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
- .vidioc_g_parm = vidioc_g_parm,
- .vidioc_s_parm = vidioc_s_parm,
- .vidioc_streamon = vb2_ioctl_streamon,
- .vidioc_streamoff = vb2_ioctl_streamoff,
- .vidioc_log_status = v4l2_ctrl_log_status,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-static const struct video_device vivi_template = {
- .name = "vivi",
- .fops = &vivi_fops,
- .ioctl_ops = &vivi_ioctl_ops,
- .release = video_device_release_empty,
-};
-
-/* -----------------------------------------------------------------
- Initialization and module stuff
- ------------------------------------------------------------------*/
-
-static int vivi_release(void)
-{
- struct vivi_dev *dev;
- struct list_head *list;
-
- while (!list_empty(&vivi_devlist)) {
- list = vivi_devlist.next;
- list_del(list);
- dev = list_entry(list, struct vivi_dev, vivi_devlist);
-
- v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
- video_device_node_name(&dev->vdev));
- video_unregister_device(&dev->vdev);
- v4l2_device_unregister(&dev->v4l2_dev);
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
- kfree(dev);
- }
-
- return 0;
-}
-
-static int __init vivi_create_instance(int inst)
-{
- struct vivi_dev *dev;
- struct video_device *vfd;
- struct v4l2_ctrl_handler *hdl;
- struct vb2_queue *q;
- int ret;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
- "%s-%03d", VIVI_MODULE_NAME, inst);
- ret = v4l2_device_register(NULL, &dev->v4l2_dev);
- if (ret)
- goto free_dev;
-
- dev->fmt = &formats[0];
- dev->timeperframe = tpf_default;
- dev->width = 640;
- dev->height = 480;
- dev->pixelsize = dev->fmt->depth / 8;
- hdl = &dev->ctrl_handler;
- v4l2_ctrl_handler_init(hdl, 11);
- dev->volume = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
- V4L2_CID_AUDIO_VOLUME, 0, 255, 1, 200);
- dev->brightness = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
- V4L2_CID_BRIGHTNESS, 0, 255, 1, 127);
- dev->contrast = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
- V4L2_CID_CONTRAST, 0, 255, 1, 16);
- dev->saturation = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
- V4L2_CID_SATURATION, 0, 255, 1, 127);
- dev->hue = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
- V4L2_CID_HUE, -128, 127, 1, 0);
- dev->autogain = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
- V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
- dev->gain = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
- V4L2_CID_GAIN, 0, 255, 1, 100);
- dev->alpha = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
- V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 0);
- dev->button = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_button, NULL);
- dev->int32 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int32, NULL);
- dev->int64 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int64, NULL);
- dev->boolean = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_boolean, NULL);
- dev->menu = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_menu, NULL);
- dev->string = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_string, NULL);
- dev->bitmask = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_bitmask, NULL);
- dev->int_menu = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int_menu, NULL);
- if (hdl->error) {
- ret = hdl->error;
- goto unreg_dev;
- }
- v4l2_ctrl_auto_cluster(2, &dev->autogain, 0, true);
- dev->v4l2_dev.ctrl_handler = hdl;
-
- /* initialize locks */
- spin_lock_init(&dev->slock);
-
- /* initialize queue */
- q = &dev->vb_vidq;
- q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivi_buffer);
- q->ops = &vivi_video_qops;
- q->mem_ops = &vb2_vmalloc_memops;
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
-
- ret = vb2_queue_init(q);
- if (ret)
- goto unreg_dev;
-
- mutex_init(&dev->mutex);
-
- /* init video dma queues */
- INIT_LIST_HEAD(&dev->vidq.active);
- init_waitqueue_head(&dev->vidq.wq);
-
- vfd = &dev->vdev;
- *vfd = vivi_template;
- vfd->debug = debug;
- vfd->v4l2_dev = &dev->v4l2_dev;
- vfd->queue = q;
-
- /*
- * Provide a mutex to v4l2 core. It will be used to protect
- * all fops and v4l2 ioctls.
- */
- vfd->lock = &dev->mutex;
- video_set_drvdata(vfd, dev);
-
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
- if (ret < 0)
- goto unreg_dev;
-
- /* Now that everything is fine, let's add it to device list */
- list_add_tail(&dev->vivi_devlist, &vivi_devlist);
-
- v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
- video_device_node_name(vfd));
- return 0;
-
-unreg_dev:
- v4l2_ctrl_handler_free(hdl);
- v4l2_device_unregister(&dev->v4l2_dev);
-free_dev:
- kfree(dev);
- return ret;
-}
-
-/* This routine allocates from 1 to n_devs virtual drivers.
-
- The real maximum number of virtual drivers will depend on how many drivers
- will succeed. This is limited to the maximum number of devices that
- videodev supports, which is equal to VIDEO_NUM_DEVICES.
- */
-static int __init vivi_init(void)
-{
- const struct font_desc *font = find_font("VGA8x16");
- int ret = 0, i;
-
- if (font == NULL) {
- printk(KERN_ERR "vivi: could not find font\n");
- return -ENODEV;
- }
- font8x16 = font->data;
-
- if (n_devs <= 0)
- n_devs = 1;
-
- for (i = 0; i < n_devs; i++) {
- ret = vivi_create_instance(i);
- if (ret) {
- /* If some instantiations succeeded, keep driver */
- if (i)
- ret = 0;
- break;
- }
- }
-
- if (ret < 0) {
- printk(KERN_ERR "vivi: error %d while loading driver\n", ret);
- return ret;
- }
-
- printk(KERN_INFO "Video Technology Magazine Virtual Video "
- "Capture Board ver %s successfully loaded.\n",
- VIVI_VERSION);
-
- /* n_devs will reflect the actual number of allocated devices */
- n_devs = i;
-
- return ret;
-}
-
-static void __exit vivi_exit(void)
-{
- vivi_release();
-}
-
-module_init(vivi_init);
-module_exit(vivi_exit);
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
new file mode 100644
index 000000000000..d71139a2ae00
--- /dev/null
+++ b/drivers/media/platform/vivid/Kconfig
@@ -0,0 +1,19 @@
+config VIDEO_VIVID
+ tristate "Virtual Video Test Driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64
+ select FONT_SUPPORT
+ select FONT_8x16
+ select VIDEOBUF2_VMALLOC
+ default n
+ ---help---
+ Enables a virtual video driver. This driver emulates a webcam,
+ TV, S-Video and HDMI capture hardware, including VBI support for
+ the SDTV inputs. Also video output, VBI output, radio receivers,
+ transmitters and software defined radio capture is emulated.
+
+ It is highly configurable and is ideal for testing applications.
+ Error injection is supported to test rare errors that are hard
+ to reproduce in real hardware.
+
+ Say Y here if you want to test video apps or debug V4L devices.
+ When in doubt, say N.
diff --git a/drivers/media/platform/vivid/Makefile b/drivers/media/platform/vivid/Makefile
new file mode 100644
index 000000000000..756fc12851df
--- /dev/null
+++ b/drivers/media/platform/vivid/Makefile
@@ -0,0 +1,6 @@
+vivid-objs := vivid-core.o vivid-ctrls.o vivid-vid-common.o vivid-vbi-gen.o \
+ vivid-vid-cap.o vivid-vid-out.o vivid-kthread-cap.o vivid-kthread-out.o \
+ vivid-radio-rx.o vivid-radio-tx.o vivid-radio-common.o \
+ vivid-rds-gen.o vivid-sdr-cap.o vivid-vbi-cap.o vivid-vbi-out.o \
+ vivid-osd.o vivid-tpg.o vivid-tpg-colors.o
+obj-$(CONFIG_VIDEO_VIVID) += vivid.o
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
new file mode 100644
index 000000000000..2c61a62ab48b
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -0,0 +1,1390 @@
+/*
+ * vivid-core.c - A Virtual Video Test Driver, core initialization
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/font.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+
+#include "vivid-core.h"
+#include "vivid-vid-common.h"
+#include "vivid-vid-cap.h"
+#include "vivid-vid-out.h"
+#include "vivid-radio-common.h"
+#include "vivid-radio-rx.h"
+#include "vivid-radio-tx.h"
+#include "vivid-sdr-cap.h"
+#include "vivid-vbi-cap.h"
+#include "vivid-vbi-out.h"
+#include "vivid-osd.h"
+#include "vivid-ctrls.h"
+
+#define VIVID_MODULE_NAME "vivid"
+
+/* The maximum number of vivid devices */
+#define VIVID_MAX_DEVS 64
+
+MODULE_DESCRIPTION("Virtual Video Test Driver");
+MODULE_AUTHOR("Hans Verkuil");
+MODULE_LICENSE("GPL");
+
+static unsigned n_devs = 1;
+module_param(n_devs, uint, 0444);
+MODULE_PARM_DESC(n_devs, " number of driver instances to create");
+
+static int vid_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(vid_cap_nr, int, NULL, 0444);
+MODULE_PARM_DESC(vid_cap_nr, " videoX start number, -1 is autodetect");
+
+static int vid_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(vid_out_nr, int, NULL, 0444);
+MODULE_PARM_DESC(vid_out_nr, " videoX start number, -1 is autodetect");
+
+static int vbi_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(vbi_cap_nr, int, NULL, 0444);
+MODULE_PARM_DESC(vbi_cap_nr, " vbiX start number, -1 is autodetect");
+
+static int vbi_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(vbi_out_nr, int, NULL, 0444);
+MODULE_PARM_DESC(vbi_out_nr, " vbiX start number, -1 is autodetect");
+
+static int sdr_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(sdr_cap_nr, int, NULL, 0444);
+MODULE_PARM_DESC(sdr_cap_nr, " swradioX start number, -1 is autodetect");
+
+static int radio_rx_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(radio_rx_nr, int, NULL, 0444);
+MODULE_PARM_DESC(radio_rx_nr, " radioX start number, -1 is autodetect");
+
+static int radio_tx_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(radio_tx_nr, int, NULL, 0444);
+MODULE_PARM_DESC(radio_tx_nr, " radioX start number, -1 is autodetect");
+
+static int ccs_cap_mode[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(ccs_cap_mode, int, NULL, 0444);
+MODULE_PARM_DESC(ccs_cap_mode, " capture crop/compose/scale mode:\n"
+ "\t\t bit 0=crop, 1=compose, 2=scale,\n"
+ "\t\t -1=user-controlled (default)");
+
+static int ccs_out_mode[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(ccs_out_mode, int, NULL, 0444);
+MODULE_PARM_DESC(ccs_out_mode, " output crop/compose/scale mode:\n"
+ "\t\t bit 0=crop, 1=compose, 2=scale,\n"
+ "\t\t -1=user-controlled (default)");
+
+static unsigned multiplanar[VIVID_MAX_DEVS];
+module_param_array(multiplanar, uint, NULL, 0444);
+MODULE_PARM_DESC(multiplanar, " 0 (default) is alternating single and multiplanar devices,\n"
+ "\t\t 1 is single planar devices,\n"
+ "\t\t 2 is multiplanar devices");
+
+/* Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr + vbi-out + vid-out */
+static unsigned node_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0x1d3d };
+module_param_array(node_types, uint, NULL, 0444);
+MODULE_PARM_DESC(node_types, " node types, default is 0x1d3d. Bitmask with the following meaning:\n"
+ "\t\t bit 0: Video Capture node\n"
+ "\t\t bit 2-3: VBI Capture node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
+ "\t\t bit 4: Radio Receiver node\n"
+ "\t\t bit 5: Software Defined Radio Receiver node\n"
+ "\t\t bit 8: Video Output node\n"
+ "\t\t bit 10-11: VBI Output node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
+ "\t\t bit 12: Radio Transmitter node\n"
+ "\t\t bit 16: Framebuffer for testing overlays");
+
+/* Default: 4 inputs */
+static unsigned num_inputs[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 4 };
+module_param_array(num_inputs, uint, NULL, 0444);
+MODULE_PARM_DESC(num_inputs, " number of inputs, default is 4");
+
+/* Default: input 0 = WEBCAM, 1 = TV, 2 = SVID, 3 = HDMI */
+static unsigned input_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0xe4 };
+module_param_array(input_types, uint, NULL, 0444);
+MODULE_PARM_DESC(input_types, " input types, default is 0xe4. Two bits per input,\n"
+ "\t\t bits 0-1 == input 0, bits 31-30 == input 15.\n"
+ "\t\t Type 0 == webcam, 1 == TV, 2 == S-Video, 3 == HDMI");
+
+/* Default: 2 outputs */
+static unsigned num_outputs[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 2 };
+module_param_array(num_outputs, uint, NULL, 0444);
+MODULE_PARM_DESC(num_outputs, " number of outputs, default is 2");
+
+/* Default: output 0 = SVID, 1 = HDMI */
+static unsigned output_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 2 };
+module_param_array(output_types, uint, NULL, 0444);
+MODULE_PARM_DESC(output_types, " output types, default is 0x02. One bit per output,\n"
+ "\t\t bit 0 == output 0, bit 15 == output 15.\n"
+ "\t\t Type 0 == S-Video, 1 == HDMI");
+
+unsigned vivid_debug;
+module_param(vivid_debug, uint, 0644);
+MODULE_PARM_DESC(vivid_debug, " activates debug info");
+
+static bool no_error_inj;
+module_param(no_error_inj, bool, 0444);
+MODULE_PARM_DESC(no_error_inj, " if set disable the error injecting controls");
+
+static struct vivid_dev *vivid_devs[VIVID_MAX_DEVS];
+
+const struct v4l2_rect vivid_min_rect = {
+ 0, 0, MIN_WIDTH, MIN_HEIGHT
+};
+
+const struct v4l2_rect vivid_max_rect = {
+ 0, 0, MAX_WIDTH * MAX_ZOOM, MAX_HEIGHT * MAX_ZOOM
+};
+
+static const u8 vivid_hdmi_edid[256] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x63, 0x3a, 0xaa, 0x55, 0x00, 0x00, 0x00, 0x00,
+ 0x0a, 0x18, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78,
+ 0x0e, 0x00, 0xb2, 0xa0, 0x57, 0x49, 0x9b, 0x26,
+ 0x10, 0x48, 0x4f, 0x2f, 0xcf, 0x00, 0x31, 0x59,
+ 0x45, 0x59, 0x81, 0x80, 0x81, 0x40, 0x90, 0x40,
+ 0x95, 0x00, 0xa9, 0x40, 0xb3, 0x00, 0x02, 0x3a,
+ 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
+ 0x46, 0x00, 0x10, 0x09, 0x00, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18,
+ 0x5e, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 'v',
+ '4', 'l', '2', '-', 'h', 'd', 'm', 'i',
+ 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xf0,
+
+ 0x02, 0x03, 0x1a, 0xc0, 0x48, 0xa2, 0x10, 0x04,
+ 0x02, 0x01, 0x21, 0x14, 0x13, 0x23, 0x09, 0x07,
+ 0x07, 0x65, 0x03, 0x0c, 0x00, 0x10, 0x00, 0xe2,
+ 0x00, 0x2a, 0x01, 0x1d, 0x00, 0x80, 0x51, 0xd0,
+ 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x1e, 0x8c, 0x0a, 0xd0, 0x8a,
+ 0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd7
+};
+
+void vivid_lock(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ mutex_lock(&dev->mutex);
+}
+
+void vivid_unlock(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ strcpy(cap->driver, "vivid");
+ strcpy(cap->card, "vivid");
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", dev->v4l2_dev.name);
+
+ if (vdev->vfl_type == VFL_TYPE_GRABBER && vdev->vfl_dir == VFL_DIR_RX)
+ cap->device_caps = dev->vid_cap_caps;
+ if (vdev->vfl_type == VFL_TYPE_GRABBER && vdev->vfl_dir == VFL_DIR_TX)
+ cap->device_caps = dev->vid_out_caps;
+ else if (vdev->vfl_type == VFL_TYPE_VBI && vdev->vfl_dir == VFL_DIR_RX)
+ cap->device_caps = dev->vbi_cap_caps;
+ else if (vdev->vfl_type == VFL_TYPE_VBI && vdev->vfl_dir == VFL_DIR_TX)
+ cap->device_caps = dev->vbi_out_caps;
+ else if (vdev->vfl_type == VFL_TYPE_SDR)
+ cap->device_caps = dev->sdr_cap_caps;
+ else if (vdev->vfl_type == VFL_TYPE_RADIO && vdev->vfl_dir == VFL_DIR_RX)
+ cap->device_caps = dev->radio_rx_caps;
+ else if (vdev->vfl_type == VFL_TYPE_RADIO && vdev->vfl_dir == VFL_DIR_TX)
+ cap->device_caps = dev->radio_tx_caps;
+ cap->capabilities = dev->vid_cap_caps | dev->vid_out_caps |
+ dev->vbi_cap_caps | dev->vbi_out_caps |
+ dev->radio_rx_caps | dev->radio_tx_caps |
+ dev->sdr_cap_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int vidioc_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO)
+ return vivid_radio_rx_s_hw_freq_seek(file, fh, a);
+ return -ENOTTY;
+}
+
+static int vidioc_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO)
+ return vivid_radio_rx_enum_freq_bands(file, fh, band);
+ if (vdev->vfl_type == VFL_TYPE_SDR)
+ return vivid_sdr_enum_freq_bands(file, fh, band);
+ return -ENOTTY;
+}
+
+static int vidioc_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO)
+ return vivid_radio_rx_g_tuner(file, fh, vt);
+ if (vdev->vfl_type == VFL_TYPE_SDR)
+ return vivid_sdr_g_tuner(file, fh, vt);
+ return vivid_video_g_tuner(file, fh, vt);
+}
+
+static int vidioc_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO)
+ return vivid_radio_rx_s_tuner(file, fh, vt);
+ if (vdev->vfl_type == VFL_TYPE_SDR)
+ return vivid_sdr_s_tuner(file, fh, vt);
+ return vivid_video_s_tuner(file, fh, vt);
+}
+
+static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO)
+ return vivid_radio_g_frequency(file,
+ vdev->vfl_dir == VFL_DIR_RX ?
+ &dev->radio_rx_freq : &dev->radio_tx_freq, vf);
+ if (vdev->vfl_type == VFL_TYPE_SDR)
+ return vivid_sdr_g_frequency(file, fh, vf);
+ return vivid_video_g_frequency(file, fh, vf);
+}
+
+static int vidioc_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO)
+ return vivid_radio_s_frequency(file,
+ vdev->vfl_dir == VFL_DIR_RX ?
+ &dev->radio_rx_freq : &dev->radio_tx_freq, vf);
+ if (vdev->vfl_type == VFL_TYPE_SDR)
+ return vivid_sdr_s_frequency(file, fh, vf);
+ return vivid_video_s_frequency(file, fh, vf);
+}
+
+static int vidioc_overlay(struct file *file, void *fh, unsigned i)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_overlay(file, fh, i);
+ return vivid_vid_out_overlay(file, fh, i);
+}
+
+static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_g_fbuf(file, fh, a);
+ return vivid_vid_out_g_fbuf(file, fh, a);
+}
+
+static int vidioc_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_s_fbuf(file, fh, a);
+ return vivid_vid_out_s_fbuf(file, fh, a);
+}
+
+static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id id)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_s_std(file, fh, id);
+ return vivid_vid_out_s_std(file, fh, id);
+}
+
+static int vidioc_s_dv_timings(struct file *file, void *fh, struct v4l2_dv_timings *timings)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_s_dv_timings(file, fh, timings);
+ return vivid_vid_out_s_dv_timings(file, fh, timings);
+}
+
+static int vidioc_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cc)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_cropcap(file, fh, cc);
+ return vivid_vid_out_cropcap(file, fh, cc);
+}
+
+static int vidioc_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_g_selection(file, fh, sel);
+ return vivid_vid_out_g_selection(file, fh, sel);
+}
+
+static int vidioc_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_s_selection(file, fh, sel);
+ return vivid_vid_out_s_selection(file, fh, sel);
+}
+
+static int vidioc_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_g_parm(file, fh, parm);
+ return vivid_vid_out_g_parm(file, fh, parm);
+}
+
+static int vidioc_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_vid_cap_s_parm(file, fh, parm);
+ return vivid_vid_out_g_parm(file, fh, parm);
+}
+
+static ssize_t vivid_radio_read(struct file *file, char __user *buf,
+ size_t size, loff_t *offset)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_TX)
+ return -EINVAL;
+ return vivid_radio_rx_read(file, buf, size, offset);
+}
+
+static ssize_t vivid_radio_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *offset)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return -EINVAL;
+ return vivid_radio_tx_write(file, buf, size, offset);
+}
+
+static unsigned int vivid_radio_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX)
+ return vivid_radio_rx_poll(file, wait);
+ return vivid_radio_tx_poll(file, wait);
+}
+
+static bool vivid_is_in_use(struct video_device *vdev)
+{
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(&vdev->fh_lock, flags);
+ res = !list_empty(&vdev->fh_list);
+ spin_unlock_irqrestore(&vdev->fh_lock, flags);
+ return res;
+}
+
+static bool vivid_is_last_user(struct vivid_dev *dev)
+{
+ unsigned uses = vivid_is_in_use(&dev->vid_cap_dev) +
+ vivid_is_in_use(&dev->vid_out_dev) +
+ vivid_is_in_use(&dev->vbi_cap_dev) +
+ vivid_is_in_use(&dev->vbi_out_dev) +
+ vivid_is_in_use(&dev->sdr_cap_dev) +
+ vivid_is_in_use(&dev->radio_rx_dev) +
+ vivid_is_in_use(&dev->radio_tx_dev);
+
+ return uses == 1;
+}
+
+static int vivid_fop_release(struct file *file)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ mutex_lock(&dev->mutex);
+ if (!no_error_inj && v4l2_fh_is_singular_file(file) &&
+ !video_is_registered(vdev) && vivid_is_last_user(dev)) {
+ /*
+ * I am the last user of this driver, and a disconnect
+ * was forced (since this video_device is unregistered),
+ * so re-register all video_device's again.
+ */
+ v4l2_info(&dev->v4l2_dev, "reconnect\n");
+ set_bit(V4L2_FL_REGISTERED, &dev->vid_cap_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->vid_out_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->vbi_cap_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->vbi_out_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->sdr_cap_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
+ }
+ mutex_unlock(&dev->mutex);
+ if (file->private_data == dev->overlay_cap_owner)
+ dev->overlay_cap_owner = NULL;
+ if (file->private_data == dev->radio_rx_rds_owner) {
+ dev->radio_rx_rds_last_block = 0;
+ dev->radio_rx_rds_owner = NULL;
+ }
+ if (file->private_data == dev->radio_tx_rds_owner) {
+ dev->radio_tx_rds_last_block = 0;
+ dev->radio_tx_rds_owner = NULL;
+ }
+ if (vdev->queue)
+ return vb2_fop_release(file);
+ return v4l2_fh_release(file);
+}
+
+static const struct v4l2_file_operations vivid_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vivid_fop_release,
+ .read = vb2_fop_read,
+ .write = vb2_fop_write,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static const struct v4l2_file_operations vivid_radio_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vivid_fop_release,
+ .read = vivid_radio_read,
+ .write = vivid_radio_write,
+ .poll = vivid_radio_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap_mplane,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_mplane,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_vid_out_mplane,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out_mplane,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_vid_out_mplane,
+
+ .vidioc_g_selection = vidioc_g_selection,
+ .vidioc_s_selection = vidioc_s_selection,
+ .vidioc_cropcap = vidioc_cropcap,
+
+ .vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
+ .vidioc_try_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
+ .vidioc_s_fmt_vbi_cap = vidioc_s_fmt_vbi_cap,
+
+ .vidioc_g_fmt_sliced_vbi_cap = vidioc_g_fmt_sliced_vbi_cap,
+ .vidioc_try_fmt_sliced_vbi_cap = vidioc_try_fmt_sliced_vbi_cap,
+ .vidioc_s_fmt_sliced_vbi_cap = vidioc_s_fmt_sliced_vbi_cap,
+ .vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap,
+
+ .vidioc_g_fmt_vbi_out = vidioc_g_fmt_vbi_out,
+ .vidioc_try_fmt_vbi_out = vidioc_g_fmt_vbi_out,
+ .vidioc_s_fmt_vbi_out = vidioc_s_fmt_vbi_out,
+
+ .vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out,
+ .vidioc_try_fmt_sliced_vbi_out = vidioc_try_fmt_sliced_vbi_out,
+ .vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out,
+
+ .vidioc_enum_fmt_sdr_cap = vidioc_enum_fmt_sdr_cap,
+ .vidioc_g_fmt_sdr_cap = vidioc_g_fmt_sdr_cap,
+ .vidioc_try_fmt_sdr_cap = vidioc_g_fmt_sdr_cap,
+ .vidioc_s_fmt_sdr_cap = vidioc_g_fmt_sdr_cap,
+
+ .vidioc_overlay = vidioc_overlay,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
+ .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
+ .vidioc_g_parm = vidioc_g_parm,
+ .vidioc_s_parm = vidioc_s_parm,
+
+ .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt_vid_overlay,
+ .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay,
+ .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay,
+ .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay,
+ .vidioc_g_fmt_vid_out_overlay = vidioc_g_fmt_vid_out_overlay,
+ .vidioc_try_fmt_vid_out_overlay = vidioc_try_fmt_vid_out_overlay,
+ .vidioc_s_fmt_vid_out_overlay = vidioc_s_fmt_vid_out_overlay,
+ .vidioc_g_fbuf = vidioc_g_fbuf,
+ .vidioc_s_fbuf = vidioc_s_fbuf,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+/* Not yet .vidioc_expbuf = vb2_ioctl_expbuf,*/
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_s_audio = vidioc_s_audio,
+ .vidioc_g_audio = vidioc_g_audio,
+ .vidioc_enumaudio = vidioc_enumaudio,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_modulator = vidioc_s_modulator,
+ .vidioc_g_modulator = vidioc_g_modulator,
+ .vidioc_s_hw_freq_seek = vidioc_s_hw_freq_seek,
+ .vidioc_enum_freq_bands = vidioc_enum_freq_bands,
+
+ .vidioc_enum_output = vidioc_enum_output,
+ .vidioc_g_output = vidioc_g_output,
+ .vidioc_s_output = vidioc_s_output,
+ .vidioc_s_audout = vidioc_s_audout,
+ .vidioc_g_audout = vidioc_g_audout,
+ .vidioc_enumaudout = vidioc_enumaudout,
+
+ .vidioc_querystd = vidioc_querystd,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_s_dv_timings = vidioc_s_dv_timings,
+ .vidioc_g_dv_timings = vidioc_g_dv_timings,
+ .vidioc_query_dv_timings = vidioc_query_dv_timings,
+ .vidioc_enum_dv_timings = vidioc_enum_dv_timings,
+ .vidioc_dv_timings_cap = vidioc_dv_timings_cap,
+ .vidioc_g_edid = vidioc_g_edid,
+ .vidioc_s_edid = vidioc_s_edid,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = vidioc_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* -----------------------------------------------------------------
+ Initialization and module stuff
+ ------------------------------------------------------------------*/
+
+static int __init vivid_create_instance(int inst)
+{
+ static const struct v4l2_dv_timings def_dv_timings =
+ V4L2_DV_BT_CEA_1280X720P60;
+ unsigned in_type_counter[4] = { 0, 0, 0, 0 };
+ unsigned out_type_counter[4] = { 0, 0, 0, 0 };
+ int ccs_cap = ccs_cap_mode[inst];
+ int ccs_out = ccs_out_mode[inst];
+ bool has_tuner;
+ bool has_modulator;
+ struct vivid_dev *dev;
+ struct video_device *vfd;
+ struct vb2_queue *q;
+ unsigned node_type = node_types[inst];
+ v4l2_std_id tvnorms_cap = 0, tvnorms_out = 0;
+ int ret;
+ int i;
+
+ /* allocate main vivid state structure */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->inst = inst;
+
+ /* register v4l2_device */
+ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
+ "%s-%03d", VIVID_MODULE_NAME, inst);
+ ret = v4l2_device_register(NULL, &dev->v4l2_dev);
+ if (ret)
+ goto free_dev;
+
+ /* start detecting feature set */
+
+ /* do we use single- or multi-planar? */
+ if (multiplanar[inst] == 0)
+ dev->multiplanar = inst & 1;
+ else
+ dev->multiplanar = multiplanar[inst] > 1;
+ v4l2_info(&dev->v4l2_dev, "using %splanar format API\n",
+ dev->multiplanar ? "multi" : "single ");
+
+ /* how many inputs do we have and of what type? */
+ dev->num_inputs = num_inputs[inst];
+ if (dev->num_inputs < 1)
+ dev->num_inputs = 1;
+ if (dev->num_inputs >= MAX_INPUTS)
+ dev->num_inputs = MAX_INPUTS;
+ for (i = 0; i < dev->num_inputs; i++) {
+ dev->input_type[i] = (input_types[inst] >> (i * 2)) & 0x3;
+ dev->input_name_counter[i] = in_type_counter[dev->input_type[i]]++;
+ }
+ dev->has_audio_inputs = in_type_counter[TV] && in_type_counter[SVID];
+
+ /* how many outputs do we have and of what type? */
+ dev->num_outputs = num_outputs[inst];
+ if (dev->num_outputs < 1)
+ dev->num_outputs = 1;
+ if (dev->num_outputs >= MAX_OUTPUTS)
+ dev->num_outputs = MAX_OUTPUTS;
+ for (i = 0; i < dev->num_outputs; i++) {
+ dev->output_type[i] = ((output_types[inst] >> i) & 1) ? HDMI : SVID;
+ dev->output_name_counter[i] = out_type_counter[dev->output_type[i]]++;
+ }
+ dev->has_audio_outputs = out_type_counter[SVID];
+
+ /* do we create a video capture device? */
+ dev->has_vid_cap = node_type & 0x0001;
+
+ /* do we create a vbi capture device? */
+ if (in_type_counter[TV] || in_type_counter[SVID]) {
+ dev->has_raw_vbi_cap = node_type & 0x0004;
+ dev->has_sliced_vbi_cap = node_type & 0x0008;
+ dev->has_vbi_cap = dev->has_raw_vbi_cap | dev->has_sliced_vbi_cap;
+ }
+
+ /* do we create a video output device? */
+ dev->has_vid_out = node_type & 0x0100;
+
+ /* do we create a vbi output device? */
+ if (out_type_counter[SVID]) {
+ dev->has_raw_vbi_out = node_type & 0x0400;
+ dev->has_sliced_vbi_out = node_type & 0x0800;
+ dev->has_vbi_out = dev->has_raw_vbi_out | dev->has_sliced_vbi_out;
+ }
+
+ /* do we create a radio receiver device? */
+ dev->has_radio_rx = node_type & 0x0010;
+
+ /* do we create a radio transmitter device? */
+ dev->has_radio_tx = node_type & 0x1000;
+
+ /* do we create a software defined radio capture device? */
+ dev->has_sdr_cap = node_type & 0x0020;
+
+ /* do we have a tuner? */
+ has_tuner = ((dev->has_vid_cap || dev->has_vbi_cap) && in_type_counter[TV]) ||
+ dev->has_radio_rx || dev->has_sdr_cap;
+
+ /* do we have a modulator? */
+ has_modulator = dev->has_radio_tx;
+
+ if (dev->has_vid_cap)
+ /* do we have a framebuffer for overlay testing? */
+ dev->has_fb = node_type & 0x10000;
+
+ /* can we do crop/compose/scaling while capturing? */
+ if (no_error_inj && ccs_cap == -1)
+ ccs_cap = 7;
+
+ /* if ccs_cap == -1, then the use can select it using controls */
+ if (ccs_cap != -1) {
+ dev->has_crop_cap = ccs_cap & 1;
+ dev->has_compose_cap = ccs_cap & 2;
+ dev->has_scaler_cap = ccs_cap & 4;
+ v4l2_info(&dev->v4l2_dev, "Capture Crop: %c Compose: %c Scaler: %c\n",
+ dev->has_crop_cap ? 'Y' : 'N',
+ dev->has_compose_cap ? 'Y' : 'N',
+ dev->has_scaler_cap ? 'Y' : 'N');
+ }
+
+ /* can we do crop/compose/scaling with video output? */
+ if (no_error_inj && ccs_out == -1)
+ ccs_out = 7;
+
+ /* if ccs_out == -1, then the use can select it using controls */
+ if (ccs_out != -1) {
+ dev->has_crop_out = ccs_out & 1;
+ dev->has_compose_out = ccs_out & 2;
+ dev->has_scaler_out = ccs_out & 4;
+ v4l2_info(&dev->v4l2_dev, "Output Crop: %c Compose: %c Scaler: %c\n",
+ dev->has_crop_out ? 'Y' : 'N',
+ dev->has_compose_out ? 'Y' : 'N',
+ dev->has_scaler_out ? 'Y' : 'N');
+ }
+
+ /* end detecting feature set */
+
+ if (dev->has_vid_cap) {
+ /* set up the capabilities of the video capture device */
+ dev->vid_cap_caps = dev->multiplanar ?
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE :
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY;
+ dev->vid_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_inputs)
+ dev->vid_cap_caps |= V4L2_CAP_AUDIO;
+ if (in_type_counter[TV])
+ dev->vid_cap_caps |= V4L2_CAP_TUNER;
+ }
+ if (dev->has_vid_out) {
+ /* set up the capabilities of the video output device */
+ dev->vid_out_caps = dev->multiplanar ?
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE :
+ V4L2_CAP_VIDEO_OUTPUT;
+ if (dev->has_fb)
+ dev->vid_out_caps |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
+ dev->vid_out_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_outputs)
+ dev->vid_out_caps |= V4L2_CAP_AUDIO;
+ }
+ if (dev->has_vbi_cap) {
+ /* set up the capabilities of the vbi capture device */
+ dev->vbi_cap_caps = (dev->has_raw_vbi_cap ? V4L2_CAP_VBI_CAPTURE : 0) |
+ (dev->has_sliced_vbi_cap ? V4L2_CAP_SLICED_VBI_CAPTURE : 0);
+ dev->vbi_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_inputs)
+ dev->vbi_cap_caps |= V4L2_CAP_AUDIO;
+ if (in_type_counter[TV])
+ dev->vbi_cap_caps |= V4L2_CAP_TUNER;
+ }
+ if (dev->has_vbi_out) {
+ /* set up the capabilities of the vbi output device */
+ dev->vbi_out_caps = (dev->has_raw_vbi_out ? V4L2_CAP_VBI_OUTPUT : 0) |
+ (dev->has_sliced_vbi_out ? V4L2_CAP_SLICED_VBI_OUTPUT : 0);
+ dev->vbi_out_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_outputs)
+ dev->vbi_out_caps |= V4L2_CAP_AUDIO;
+ }
+ if (dev->has_sdr_cap) {
+ /* set up the capabilities of the sdr capture device */
+ dev->sdr_cap_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER;
+ dev->sdr_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ }
+ /* set up the capabilities of the radio receiver device */
+ if (dev->has_radio_rx)
+ dev->radio_rx_caps = V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE |
+ V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER |
+ V4L2_CAP_READWRITE;
+ /* set up the capabilities of the radio transmitter device */
+ if (dev->has_radio_tx)
+ dev->radio_tx_caps = V4L2_CAP_RDS_OUTPUT | V4L2_CAP_MODULATOR |
+ V4L2_CAP_READWRITE;
+
+ /* initialize the test pattern generator */
+ tpg_init(&dev->tpg, 640, 360);
+ if (tpg_alloc(&dev->tpg, MAX_ZOOM * MAX_WIDTH))
+ goto free_dev;
+ dev->scaled_line = vzalloc(MAX_ZOOM * MAX_WIDTH);
+ if (!dev->scaled_line)
+ goto free_dev;
+ dev->blended_line = vzalloc(MAX_ZOOM * MAX_WIDTH);
+ if (!dev->blended_line)
+ goto free_dev;
+
+ /* load the edid */
+ dev->edid = vmalloc(256 * 128);
+ if (!dev->edid)
+ goto free_dev;
+
+ /* create a string array containing the names of all the preset timings */
+ while (v4l2_dv_timings_presets[dev->query_dv_timings_size].bt.width)
+ dev->query_dv_timings_size++;
+ dev->query_dv_timings_qmenu = kmalloc(dev->query_dv_timings_size *
+ (sizeof(void *) + 32), GFP_KERNEL);
+ if (dev->query_dv_timings_qmenu == NULL)
+ goto free_dev;
+ for (i = 0; i < dev->query_dv_timings_size; i++) {
+ const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
+ char *p = (char *)&dev->query_dv_timings_qmenu[dev->query_dv_timings_size];
+ u32 htot, vtot;
+
+ p += i * 32;
+ dev->query_dv_timings_qmenu[i] = p;
+
+ htot = V4L2_DV_BT_FRAME_WIDTH(bt);
+ vtot = V4L2_DV_BT_FRAME_HEIGHT(bt);
+ snprintf(p, 32, "%ux%u%s%u",
+ bt->width, bt->height, bt->interlaced ? "i" : "p",
+ (u32)bt->pixelclock / (htot * vtot));
+ }
+
+ /* disable invalid ioctls based on the feature set */
+ if (!dev->has_audio_inputs) {
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_AUDIO);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_AUDIO);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_ENUMAUDIO);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_AUDIO);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_AUDIO);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_ENUMAUDIO);
+ }
+ if (!dev->has_audio_outputs) {
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_AUDOUT);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_AUDOUT);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUMAUDOUT);
+ v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_AUDOUT);
+ v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_AUDOUT);
+ v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_ENUMAUDOUT);
+ }
+ if (!in_type_counter[TV] && !in_type_counter[SVID]) {
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_STD);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_STD);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_ENUMSTD);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_QUERYSTD);
+ }
+ if (!out_type_counter[SVID]) {
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_STD);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_STD);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUMSTD);
+ }
+ if (!has_tuner && !has_modulator) {
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_FREQUENCY);
+ }
+ if (!has_tuner) {
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_TUNER);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_TUNER);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_TUNER);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_TUNER);
+ }
+ if (in_type_counter[HDMI] == 0) {
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_EDID);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_EDID);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_DV_TIMINGS_CAP);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_DV_TIMINGS);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_DV_TIMINGS);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_ENUM_DV_TIMINGS);
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_QUERY_DV_TIMINGS);
+ }
+ if (out_type_counter[HDMI] == 0) {
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_EDID);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_DV_TIMINGS_CAP);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_DV_TIMINGS);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_DV_TIMINGS);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_DV_TIMINGS);
+ }
+ if (!dev->has_fb) {
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_FBUF);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_FBUF);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_OVERLAY);
+ }
+ v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
+ v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
+ v4l2_disable_ioctl(&dev->sdr_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMESIZES);
+ v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMEINTERVALS);
+ v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_FREQUENCY);
+
+ /* configure internal data */
+ dev->fmt_cap = &vivid_formats[0];
+ dev->fmt_out = &vivid_formats[0];
+ if (!dev->multiplanar)
+ vivid_formats[0].data_offset[0] = 0;
+ dev->webcam_size_idx = 1;
+ dev->webcam_ival_idx = 3;
+ tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
+ dev->std_cap = V4L2_STD_PAL;
+ dev->std_out = V4L2_STD_PAL;
+ if (dev->input_type[0] == TV || dev->input_type[0] == SVID)
+ tvnorms_cap = V4L2_STD_ALL;
+ if (dev->output_type[0] == SVID)
+ tvnorms_out = V4L2_STD_ALL;
+ dev->dv_timings_cap = def_dv_timings;
+ dev->dv_timings_out = def_dv_timings;
+ dev->tv_freq = 2804 /* 175.25 * 16 */;
+ dev->tv_audmode = V4L2_TUNER_MODE_STEREO;
+ dev->tv_field_cap = V4L2_FIELD_INTERLACED;
+ dev->tv_field_out = V4L2_FIELD_INTERLACED;
+ dev->radio_rx_freq = 95000 * 16;
+ dev->radio_rx_audmode = V4L2_TUNER_MODE_STEREO;
+ if (dev->has_radio_tx) {
+ dev->radio_tx_freq = 95500 * 16;
+ dev->radio_rds_loop = false;
+ }
+ dev->radio_tx_subchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_RDS;
+ dev->sdr_adc_freq = 300000;
+ dev->sdr_fm_freq = 50000000;
+ dev->edid_max_blocks = dev->edid_blocks = 2;
+ memcpy(dev->edid, vivid_hdmi_edid, sizeof(vivid_hdmi_edid));
+ ktime_get_ts(&dev->radio_rds_init_ts);
+
+ /* create all controls */
+ ret = vivid_create_controls(dev, ccs_cap == -1, ccs_out == -1, no_error_inj,
+ in_type_counter[TV] || in_type_counter[SVID] ||
+ out_type_counter[SVID],
+ in_type_counter[HDMI] || out_type_counter[HDMI]);
+ if (ret)
+ goto unreg_dev;
+
+ /*
+ * update the capture and output formats to do a proper initial
+ * configuration.
+ */
+ vivid_update_format_cap(dev, false);
+ vivid_update_format_out(dev);
+
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_out);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_out);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_rx);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_tx);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_sdr_cap);
+
+ /* initialize overlay */
+ dev->fb_cap.fmt.width = dev->src_rect.width;
+ dev->fb_cap.fmt.height = dev->src_rect.height;
+ dev->fb_cap.fmt.pixelformat = dev->fmt_cap->fourcc;
+ dev->fb_cap.fmt.bytesperline = dev->src_rect.width * tpg_g_twopixelsize(&dev->tpg, 0) / 2;
+ dev->fb_cap.fmt.sizeimage = dev->src_rect.height * dev->fb_cap.fmt.bytesperline;
+
+ /* initialize locks */
+ spin_lock_init(&dev->slock);
+ mutex_init(&dev->mutex);
+
+ /* init dma queues */
+ INIT_LIST_HEAD(&dev->vid_cap_active);
+ INIT_LIST_HEAD(&dev->vid_out_active);
+ INIT_LIST_HEAD(&dev->vbi_cap_active);
+ INIT_LIST_HEAD(&dev->vbi_out_active);
+ INIT_LIST_HEAD(&dev->sdr_cap_active);
+
+ /* start creating the vb2 queues */
+ if (dev->has_vid_cap) {
+ /* initialize vid_cap queue */
+ q = &dev->vb_vid_cap_q;
+ q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct vivid_buffer);
+ q->ops = &vivid_vid_cap_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_vid_out) {
+ /* initialize vid_out queue */
+ q = &dev->vb_vid_out_q;
+ q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_WRITE;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct vivid_buffer);
+ q->ops = &vivid_vid_out_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_vbi_cap) {
+ /* initialize vbi_cap queue */
+ q = &dev->vb_vbi_cap_q;
+ q->type = dev->has_raw_vbi_cap ? V4L2_BUF_TYPE_VBI_CAPTURE :
+ V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct vivid_buffer);
+ q->ops = &vivid_vbi_cap_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_vbi_out) {
+ /* initialize vbi_out queue */
+ q = &dev->vb_vbi_out_q;
+ q->type = dev->has_raw_vbi_out ? V4L2_BUF_TYPE_VBI_OUTPUT :
+ V4L2_BUF_TYPE_SLICED_VBI_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_WRITE;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct vivid_buffer);
+ q->ops = &vivid_vbi_out_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_sdr_cap) {
+ /* initialize sdr_cap queue */
+ q = &dev->vb_sdr_cap_q;
+ q->type = V4L2_BUF_TYPE_SDR_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct vivid_buffer);
+ q->ops = &vivid_sdr_cap_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 8;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_fb) {
+ /* Create framebuffer for testing capture/output overlay */
+ ret = vivid_fb_init(dev);
+ if (ret)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "Framebuffer device registered as fb%d\n",
+ dev->fb_info.node);
+ }
+
+ /* finally start creating the device nodes */
+ if (dev->has_vid_cap) {
+ vfd = &dev->vid_cap_dev;
+ strlcpy(vfd->name, "vivid-vid-cap", sizeof(vfd->name));
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_vid_cap_q;
+ vfd->tvnorms = tvnorms_cap;
+
+ /*
+ * Provide a mutex to v4l2 core. It will be used to protect
+ * all fops and v4l2 ioctls.
+ */
+ vfd->lock = &dev->mutex;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, vid_cap_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
+ if (dev->has_vid_out) {
+ vfd = &dev->vid_out_dev;
+ strlcpy(vfd->name, "vivid-vid-out", sizeof(vfd->name));
+ vfd->vfl_dir = VFL_DIR_TX;
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_vid_out_q;
+ vfd->tvnorms = tvnorms_out;
+
+ /*
+ * Provide a mutex to v4l2 core. It will be used to protect
+ * all fops and v4l2 ioctls.
+ */
+ vfd->lock = &dev->mutex;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, vid_out_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "V4L2 output device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
+ if (dev->has_vbi_cap) {
+ vfd = &dev->vbi_cap_dev;
+ strlcpy(vfd->name, "vivid-vbi-cap", sizeof(vfd->name));
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_vbi_cap_q;
+ vfd->lock = &dev->mutex;
+ vfd->tvnorms = tvnorms_cap;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_cap_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s, supports %s VBI\n",
+ video_device_node_name(vfd),
+ (dev->has_raw_vbi_cap && dev->has_sliced_vbi_cap) ?
+ "raw and sliced" :
+ (dev->has_raw_vbi_cap ? "raw" : "sliced"));
+ }
+
+ if (dev->has_vbi_out) {
+ vfd = &dev->vbi_out_dev;
+ strlcpy(vfd->name, "vivid-vbi-out", sizeof(vfd->name));
+ vfd->vfl_dir = VFL_DIR_TX;
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_vbi_out_q;
+ vfd->lock = &dev->mutex;
+ vfd->tvnorms = tvnorms_out;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_out_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "V4L2 output device registered as %s, supports %s VBI\n",
+ video_device_node_name(vfd),
+ (dev->has_raw_vbi_out && dev->has_sliced_vbi_out) ?
+ "raw and sliced" :
+ (dev->has_raw_vbi_out ? "raw" : "sliced"));
+ }
+
+ if (dev->has_sdr_cap) {
+ vfd = &dev->sdr_cap_dev;
+ strlcpy(vfd->name, "vivid-sdr-cap", sizeof(vfd->name));
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_sdr_cap_q;
+ vfd->lock = &dev->mutex;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_SDR, sdr_cap_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
+ if (dev->has_radio_rx) {
+ vfd = &dev->radio_rx_dev;
+ strlcpy(vfd->name, "vivid-rad-rx", sizeof(vfd->name));
+ vfd->fops = &vivid_radio_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->lock = &dev->mutex;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_RADIO, radio_rx_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "V4L2 receiver device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
+ if (dev->has_radio_tx) {
+ vfd = &dev->radio_tx_dev;
+ strlcpy(vfd->name, "vivid-rad-tx", sizeof(vfd->name));
+ vfd->vfl_dir = VFL_DIR_TX;
+ vfd->fops = &vivid_radio_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->lock = &dev->mutex;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_RADIO, radio_tx_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev, "V4L2 transmitter device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
+ /* Now that everything is fine, let's add it to device list */
+ vivid_devs[inst] = dev;
+
+ return 0;
+
+unreg_dev:
+ video_unregister_device(&dev->radio_tx_dev);
+ video_unregister_device(&dev->radio_rx_dev);
+ video_unregister_device(&dev->sdr_cap_dev);
+ video_unregister_device(&dev->vbi_out_dev);
+ video_unregister_device(&dev->vbi_cap_dev);
+ video_unregister_device(&dev->vid_out_dev);
+ video_unregister_device(&dev->vid_cap_dev);
+ vivid_free_controls(dev);
+ v4l2_device_unregister(&dev->v4l2_dev);
+free_dev:
+ vfree(dev->scaled_line);
+ vfree(dev->blended_line);
+ vfree(dev->edid);
+ tpg_free(&dev->tpg);
+ kfree(dev->query_dv_timings_qmenu);
+ kfree(dev);
+ return ret;
+}
+
+/* This routine allocates from 1 to n_devs virtual drivers.
+
+ The real maximum number of virtual drivers will depend on how many drivers
+ will succeed. This is limited to the maximum number of devices that
+ videodev supports, which is equal to VIDEO_NUM_DEVICES.
+ */
+static int __init vivid_init(void)
+{
+ const struct font_desc *font = find_font("VGA8x16");
+ int ret = 0, i;
+
+ if (font == NULL) {
+ pr_err("vivid: could not find font\n");
+ return -ENODEV;
+ }
+
+ tpg_set_font(font->data);
+
+ n_devs = clamp_t(unsigned, n_devs, 1, VIVID_MAX_DEVS);
+
+ for (i = 0; i < n_devs; i++) {
+ ret = vivid_create_instance(i);
+ if (ret) {
+ /* If some instantiations succeeded, keep driver */
+ if (i)
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret < 0) {
+ pr_err("vivid: error %d while loading driver\n", ret);
+ return ret;
+ }
+
+ /* n_devs will reflect the actual number of allocated devices */
+ n_devs = i;
+
+ return ret;
+}
+
+static void __exit vivid_exit(void)
+{
+ struct vivid_dev *dev;
+ unsigned i;
+
+ for (i = 0; vivid_devs[i]; i++) {
+ dev = vivid_devs[i];
+
+ if (dev->has_vid_cap) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->vid_cap_dev));
+ video_unregister_device(&dev->vid_cap_dev);
+ }
+ if (dev->has_vid_out) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->vid_out_dev));
+ video_unregister_device(&dev->vid_out_dev);
+ }
+ if (dev->has_vbi_cap) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->vbi_cap_dev));
+ video_unregister_device(&dev->vbi_cap_dev);
+ }
+ if (dev->has_vbi_out) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->vbi_out_dev));
+ video_unregister_device(&dev->vbi_out_dev);
+ }
+ if (dev->has_sdr_cap) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->sdr_cap_dev));
+ video_unregister_device(&dev->sdr_cap_dev);
+ }
+ if (dev->has_radio_rx) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->radio_rx_dev));
+ video_unregister_device(&dev->radio_rx_dev);
+ }
+ if (dev->has_radio_tx) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->radio_tx_dev));
+ video_unregister_device(&dev->radio_tx_dev);
+ }
+ if (dev->has_fb) {
+ v4l2_info(&dev->v4l2_dev, "unregistering fb%d\n",
+ dev->fb_info.node);
+ unregister_framebuffer(&dev->fb_info);
+ vivid_fb_release_buffers(dev);
+ }
+ v4l2_device_unregister(&dev->v4l2_dev);
+ vivid_free_controls(dev);
+ vfree(dev->scaled_line);
+ vfree(dev->blended_line);
+ vfree(dev->edid);
+ vfree(dev->bitmap_cap);
+ vfree(dev->bitmap_out);
+ tpg_free(&dev->tpg);
+ kfree(dev->query_dv_timings_qmenu);
+ kfree(dev);
+ vivid_devs[i] = NULL;
+ }
+}
+
+module_init(vivid_init);
+module_exit(vivid_exit);
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
new file mode 100644
index 000000000000..811c286491a5
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -0,0 +1,520 @@
+/*
+ * vivid-core.h - core datastructures
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_CORE_H_
+#define _VIVID_CORE_H_
+
+#include <linux/fb.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ctrls.h>
+#include "vivid-tpg.h"
+#include "vivid-rds-gen.h"
+#include "vivid-vbi-gen.h"
+
+#define dprintk(dev, level, fmt, arg...) \
+ v4l2_dbg(level, vivid_debug, &dev->v4l2_dev, fmt, ## arg)
+
+/* Maximum allowed frame rate
+ *
+ * vivid will allow setting timeperframe in [1/FPS_MAX - FPS_MAX/1] range.
+ *
+ * Ideally FPS_MAX should be infinity, i.e. practically UINT_MAX, but that
+ * might hit application errors when they manipulate these values.
+ *
+ * Besides, for tpf < 10ms image-generation logic should be changed, to avoid
+ * producing frames with equal content.
+ */
+#define FPS_MAX 100
+
+/* The maximum number of clip rectangles */
+#define MAX_CLIPS 16
+/* The maximum number of inputs */
+#define MAX_INPUTS 16
+/* The maximum number of outputs */
+#define MAX_OUTPUTS 16
+/* The maximum up or down scaling factor is 4 */
+#define MAX_ZOOM 4
+/* The maximum image width/height are set to 4K DMT */
+#define MAX_WIDTH 4096
+#define MAX_HEIGHT 2160
+/* The minimum image width/height */
+#define MIN_WIDTH 16
+#define MIN_HEIGHT 16
+/* The data_offset of plane 0 for the multiplanar formats */
+#define PLANE0_DATA_OFFSET 128
+
+/* The supported TV frequency range in MHz */
+#define MIN_TV_FREQ (44U * 16U)
+#define MAX_TV_FREQ (958U * 16U)
+
+/* The number of samples returned in every SDR buffer */
+#define SDR_CAP_SAMPLES_PER_BUF 0x4000
+
+/* used by the threads to know when to resync internal counters */
+#define JIFFIES_PER_DAY (3600U * 24U * HZ)
+#define JIFFIES_RESYNC (JIFFIES_PER_DAY * (0xf0000000U / JIFFIES_PER_DAY))
+
+extern const struct v4l2_rect vivid_min_rect;
+extern const struct v4l2_rect vivid_max_rect;
+extern unsigned vivid_debug;
+
+struct vivid_fmt {
+ const char *name;
+ u32 fourcc; /* v4l2 format id */
+ u8 depth;
+ bool is_yuv;
+ bool can_do_overlay;
+ u32 alpha_mask;
+ u8 planes;
+ u32 data_offset[2];
+};
+
+extern struct vivid_fmt vivid_formats[];
+
+/* buffer for one video frame */
+struct vivid_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
+enum vivid_input {
+ WEBCAM,
+ TV,
+ SVID,
+ HDMI,
+};
+
+enum vivid_signal_mode {
+ CURRENT_DV_TIMINGS,
+ CURRENT_STD = CURRENT_DV_TIMINGS,
+ NO_SIGNAL,
+ NO_LOCK,
+ OUT_OF_RANGE,
+ SELECTED_DV_TIMINGS,
+ SELECTED_STD = SELECTED_DV_TIMINGS,
+ CYCLE_DV_TIMINGS,
+ CYCLE_STD = CYCLE_DV_TIMINGS,
+ CUSTOM_DV_TIMINGS,
+};
+
+#define VIVID_INVALID_SIGNAL(mode) \
+ ((mode) == NO_SIGNAL || (mode) == NO_LOCK || (mode) == OUT_OF_RANGE)
+
+struct vivid_dev {
+ unsigned inst;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_user_gen;
+ struct v4l2_ctrl_handler ctrl_hdl_user_vid;
+ struct v4l2_ctrl_handler ctrl_hdl_user_aud;
+ struct v4l2_ctrl_handler ctrl_hdl_streaming;
+ struct v4l2_ctrl_handler ctrl_hdl_sdtv_cap;
+ struct v4l2_ctrl_handler ctrl_hdl_loop_out;
+ struct video_device vid_cap_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_vid_cap;
+ struct video_device vid_out_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_vid_out;
+ struct video_device vbi_cap_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_vbi_cap;
+ struct video_device vbi_out_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_vbi_out;
+ struct video_device radio_rx_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_radio_rx;
+ struct video_device radio_tx_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_radio_tx;
+ struct video_device sdr_cap_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_sdr_cap;
+ spinlock_t slock;
+ struct mutex mutex;
+
+ /* capabilities */
+ u32 vid_cap_caps;
+ u32 vid_out_caps;
+ u32 vbi_cap_caps;
+ u32 vbi_out_caps;
+ u32 sdr_cap_caps;
+ u32 radio_rx_caps;
+ u32 radio_tx_caps;
+
+ /* supported features */
+ bool multiplanar;
+ unsigned num_inputs;
+ u8 input_type[MAX_INPUTS];
+ u8 input_name_counter[MAX_INPUTS];
+ unsigned num_outputs;
+ u8 output_type[MAX_OUTPUTS];
+ u8 output_name_counter[MAX_OUTPUTS];
+ bool has_audio_inputs;
+ bool has_audio_outputs;
+ bool has_vid_cap;
+ bool has_vid_out;
+ bool has_vbi_cap;
+ bool has_raw_vbi_cap;
+ bool has_sliced_vbi_cap;
+ bool has_vbi_out;
+ bool has_raw_vbi_out;
+ bool has_sliced_vbi_out;
+ bool has_radio_rx;
+ bool has_radio_tx;
+ bool has_sdr_cap;
+ bool has_fb;
+
+ bool can_loop_video;
+
+ /* controls */
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *contrast;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *hue;
+ struct {
+ /* autogain/gain cluster */
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *gain;
+ };
+ struct v4l2_ctrl *volume;
+ struct v4l2_ctrl *mute;
+ struct v4l2_ctrl *alpha;
+ struct v4l2_ctrl *button;
+ struct v4l2_ctrl *boolean;
+ struct v4l2_ctrl *int32;
+ struct v4l2_ctrl *int64;
+ struct v4l2_ctrl *menu;
+ struct v4l2_ctrl *string;
+ struct v4l2_ctrl *bitmask;
+ struct v4l2_ctrl *int_menu;
+ struct v4l2_ctrl *test_pattern;
+ struct v4l2_ctrl *colorspace;
+ struct v4l2_ctrl *rgb_range_cap;
+ struct v4l2_ctrl *real_rgb_range_cap;
+ struct {
+ /* std_signal_mode/standard cluster */
+ struct v4l2_ctrl *ctrl_std_signal_mode;
+ struct v4l2_ctrl *ctrl_standard;
+ };
+ struct {
+ /* dv_timings_signal_mode/timings cluster */
+ struct v4l2_ctrl *ctrl_dv_timings_signal_mode;
+ struct v4l2_ctrl *ctrl_dv_timings;
+ };
+ struct v4l2_ctrl *ctrl_has_crop_cap;
+ struct v4l2_ctrl *ctrl_has_compose_cap;
+ struct v4l2_ctrl *ctrl_has_scaler_cap;
+ struct v4l2_ctrl *ctrl_has_crop_out;
+ struct v4l2_ctrl *ctrl_has_compose_out;
+ struct v4l2_ctrl *ctrl_has_scaler_out;
+ struct v4l2_ctrl *ctrl_tx_mode;
+ struct v4l2_ctrl *ctrl_tx_rgb_range;
+
+ struct v4l2_ctrl *radio_tx_rds_pi;
+ struct v4l2_ctrl *radio_tx_rds_pty;
+ struct v4l2_ctrl *radio_tx_rds_mono_stereo;
+ struct v4l2_ctrl *radio_tx_rds_art_head;
+ struct v4l2_ctrl *radio_tx_rds_compressed;
+ struct v4l2_ctrl *radio_tx_rds_dyn_pty;
+ struct v4l2_ctrl *radio_tx_rds_ta;
+ struct v4l2_ctrl *radio_tx_rds_tp;
+ struct v4l2_ctrl *radio_tx_rds_ms;
+ struct v4l2_ctrl *radio_tx_rds_psname;
+ struct v4l2_ctrl *radio_tx_rds_radiotext;
+
+ struct v4l2_ctrl *radio_rx_rds_pty;
+ struct v4l2_ctrl *radio_rx_rds_ta;
+ struct v4l2_ctrl *radio_rx_rds_tp;
+ struct v4l2_ctrl *radio_rx_rds_ms;
+ struct v4l2_ctrl *radio_rx_rds_psname;
+ struct v4l2_ctrl *radio_rx_rds_radiotext;
+
+ unsigned input_brightness[MAX_INPUTS];
+ unsigned osd_mode;
+ unsigned button_pressed;
+ bool sensor_hflip;
+ bool sensor_vflip;
+ bool hflip;
+ bool vflip;
+ bool vbi_cap_interlaced;
+ bool loop_video;
+
+ /* Framebuffer */
+ unsigned long video_pbase;
+ void *video_vbase;
+ u32 video_buffer_size;
+ int display_width;
+ int display_height;
+ int display_byte_stride;
+ int bits_per_pixel;
+ int bytes_per_pixel;
+ struct fb_info fb_info;
+ struct fb_var_screeninfo fb_defined;
+ struct fb_fix_screeninfo fb_fix;
+
+ /* Error injection */
+ bool queue_setup_error;
+ bool buf_prepare_error;
+ bool start_streaming_error;
+ bool dqbuf_error;
+ bool seq_wrap;
+ bool time_wrap;
+ __kernel_time_t time_wrap_offset;
+ unsigned perc_dropped_buffers;
+ enum vivid_signal_mode std_signal_mode;
+ unsigned query_std_last;
+ v4l2_std_id query_std;
+ enum tpg_video_aspect std_aspect_ratio;
+
+ enum vivid_signal_mode dv_timings_signal_mode;
+ char **query_dv_timings_qmenu;
+ unsigned query_dv_timings_size;
+ unsigned query_dv_timings_last;
+ unsigned query_dv_timings;
+ enum tpg_video_aspect dv_timings_aspect_ratio;
+
+ /* Input */
+ unsigned input;
+ v4l2_std_id std_cap;
+ struct v4l2_dv_timings dv_timings_cap;
+ u32 service_set_cap;
+ struct vivid_vbi_gen_data vbi_gen;
+ u8 *edid;
+ unsigned edid_blocks;
+ unsigned edid_max_blocks;
+ unsigned webcam_size_idx;
+ unsigned webcam_ival_idx;
+ unsigned tv_freq;
+ unsigned tv_audmode;
+ unsigned tv_field_cap;
+ unsigned tv_audio_input;
+
+ /* Capture Overlay */
+ struct v4l2_framebuffer fb_cap;
+ struct v4l2_fh *overlay_cap_owner;
+ void *fb_vbase_cap;
+ int overlay_cap_top, overlay_cap_left;
+ enum v4l2_field overlay_cap_field;
+ void *bitmap_cap;
+ struct v4l2_clip clips_cap[MAX_CLIPS];
+ struct v4l2_clip try_clips_cap[MAX_CLIPS];
+ unsigned clipcount_cap;
+
+ /* Output */
+ unsigned output;
+ v4l2_std_id std_out;
+ struct v4l2_dv_timings dv_timings_out;
+ u32 colorspace_out;
+ u32 service_set_out;
+ u32 bytesperline_out[2];
+ unsigned tv_field_out;
+ unsigned tv_audio_output;
+ bool vbi_out_have_wss;
+ u8 vbi_out_wss[2];
+ bool vbi_out_have_cc[2];
+ u8 vbi_out_cc[2][2];
+ bool dvi_d_out;
+ u8 *scaled_line;
+ u8 *blended_line;
+ unsigned cur_scaled_line;
+
+ /* Output Overlay */
+ void *fb_vbase_out;
+ bool overlay_out_enabled;
+ int overlay_out_top, overlay_out_left;
+ void *bitmap_out;
+ struct v4l2_clip clips_out[MAX_CLIPS];
+ struct v4l2_clip try_clips_out[MAX_CLIPS];
+ unsigned clipcount_out;
+ unsigned fbuf_out_flags;
+ u32 chromakey_out;
+ u8 global_alpha_out;
+
+ /* video capture */
+ struct tpg_data tpg;
+ unsigned ms_vid_cap;
+ bool must_blank[VIDEO_MAX_FRAME];
+
+ const struct vivid_fmt *fmt_cap;
+ struct v4l2_fract timeperframe_vid_cap;
+ enum v4l2_field field_cap;
+ struct v4l2_rect src_rect;
+ struct v4l2_rect fmt_cap_rect;
+ struct v4l2_rect crop_cap;
+ struct v4l2_rect compose_cap;
+ struct v4l2_rect crop_bounds_cap;
+ struct vb2_queue vb_vid_cap_q;
+ struct list_head vid_cap_active;
+ struct vb2_queue vb_vbi_cap_q;
+ struct list_head vbi_cap_active;
+
+ /* thread for generating video capture stream */
+ struct task_struct *kthread_vid_cap;
+ unsigned long jiffies_vid_cap;
+ u32 cap_seq_offset;
+ u32 cap_seq_count;
+ bool cap_seq_resync;
+ u32 vid_cap_seq_start;
+ u32 vid_cap_seq_count;
+ bool vid_cap_streaming;
+ u32 vbi_cap_seq_start;
+ u32 vbi_cap_seq_count;
+ bool vbi_cap_streaming;
+ bool stream_sliced_vbi_cap;
+
+ /* video output */
+ const struct vivid_fmt *fmt_out;
+ struct v4l2_fract timeperframe_vid_out;
+ enum v4l2_field field_out;
+ struct v4l2_rect sink_rect;
+ struct v4l2_rect fmt_out_rect;
+ struct v4l2_rect crop_out;
+ struct v4l2_rect compose_out;
+ struct v4l2_rect compose_bounds_out;
+ struct vb2_queue vb_vid_out_q;
+ struct list_head vid_out_active;
+ struct vb2_queue vb_vbi_out_q;
+ struct list_head vbi_out_active;
+
+ /* video loop precalculated rectangles */
+
+ /*
+ * Intersection between what the output side composes and the capture side
+ * crops. I.e., what actually needs to be copied from the output buffer to
+ * the capture buffer.
+ */
+ struct v4l2_rect loop_vid_copy;
+ /* The part of the output buffer that (after scaling) corresponds to loop_vid_copy. */
+ struct v4l2_rect loop_vid_out;
+ /* The part of the capture buffer that (after scaling) corresponds to loop_vid_copy. */
+ struct v4l2_rect loop_vid_cap;
+ /*
+ * The intersection of the framebuffer, the overlay output window and
+ * loop_vid_copy. I.e., the part of the framebuffer that actually should be
+ * blended with the compose_out rectangle. This uses the framebuffer origin.
+ */
+ struct v4l2_rect loop_fb_copy;
+ /* The same as loop_fb_copy but with compose_out origin. */
+ struct v4l2_rect loop_vid_overlay;
+ /*
+ * The part of the capture buffer that (after scaling) corresponds
+ * to loop_vid_overlay.
+ */
+ struct v4l2_rect loop_vid_overlay_cap;
+
+ /* thread for generating video output stream */
+ struct task_struct *kthread_vid_out;
+ unsigned long jiffies_vid_out;
+ u32 out_seq_offset;
+ u32 out_seq_count;
+ bool out_seq_resync;
+ u32 vid_out_seq_start;
+ u32 vid_out_seq_count;
+ bool vid_out_streaming;
+ u32 vbi_out_seq_start;
+ u32 vbi_out_seq_count;
+ bool vbi_out_streaming;
+ bool stream_sliced_vbi_out;
+
+ /* SDR capture */
+ struct vb2_queue vb_sdr_cap_q;
+ struct list_head sdr_cap_active;
+ unsigned sdr_adc_freq;
+ unsigned sdr_fm_freq;
+ int sdr_fixp_src_phase;
+ int sdr_fixp_mod_phase;
+
+ bool tstamp_src_is_soe;
+ bool has_crop_cap;
+ bool has_compose_cap;
+ bool has_scaler_cap;
+ bool has_crop_out;
+ bool has_compose_out;
+ bool has_scaler_out;
+
+ /* thread for generating SDR stream */
+ struct task_struct *kthread_sdr_cap;
+ unsigned long jiffies_sdr_cap;
+ u32 sdr_cap_seq_offset;
+ u32 sdr_cap_seq_count;
+ bool sdr_cap_seq_resync;
+
+ /* RDS generator */
+ struct vivid_rds_gen rds_gen;
+
+ /* Radio receiver */
+ unsigned radio_rx_freq;
+ unsigned radio_rx_audmode;
+ int radio_rx_sig_qual;
+ unsigned radio_rx_hw_seek_mode;
+ bool radio_rx_hw_seek_prog_lim;
+ bool radio_rx_rds_controls;
+ bool radio_rx_rds_enabled;
+ unsigned radio_rx_rds_use_alternates;
+ unsigned radio_rx_rds_last_block;
+ struct v4l2_fh *radio_rx_rds_owner;
+
+ /* Radio transmitter */
+ unsigned radio_tx_freq;
+ unsigned radio_tx_subchans;
+ bool radio_tx_rds_controls;
+ unsigned radio_tx_rds_last_block;
+ struct v4l2_fh *radio_tx_rds_owner;
+
+ /* Shared between radio receiver and transmitter */
+ bool radio_rds_loop;
+ struct timespec radio_rds_init_ts;
+};
+
+static inline bool vivid_is_webcam(const struct vivid_dev *dev)
+{
+ return dev->input_type[dev->input] == WEBCAM;
+}
+
+static inline bool vivid_is_tv_cap(const struct vivid_dev *dev)
+{
+ return dev->input_type[dev->input] == TV;
+}
+
+static inline bool vivid_is_svid_cap(const struct vivid_dev *dev)
+{
+ return dev->input_type[dev->input] == SVID;
+}
+
+static inline bool vivid_is_hdmi_cap(const struct vivid_dev *dev)
+{
+ return dev->input_type[dev->input] == HDMI;
+}
+
+static inline bool vivid_is_sdtv_cap(const struct vivid_dev *dev)
+{
+ return vivid_is_tv_cap(dev) || vivid_is_svid_cap(dev);
+}
+
+static inline bool vivid_is_svid_out(const struct vivid_dev *dev)
+{
+ return dev->output_type[dev->output] == SVID;
+}
+
+static inline bool vivid_is_hdmi_out(const struct vivid_dev *dev)
+{
+ return dev->output_type[dev->output] == HDMI;
+}
+
+void vivid_lock(struct vb2_queue *vq);
+void vivid_unlock(struct vb2_queue *vq);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
new file mode 100644
index 000000000000..d5cbf0038f24
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -0,0 +1,1502 @@
+/*
+ * vivid-ctrls.c - control support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-common.h>
+
+#include "vivid-core.h"
+#include "vivid-vid-cap.h"
+#include "vivid-vid-out.h"
+#include "vivid-vid-common.h"
+#include "vivid-radio-common.h"
+#include "vivid-osd.h"
+#include "vivid-ctrls.h"
+
+#define VIVID_CID_CUSTOM_BASE (V4L2_CID_USER_BASE | 0xf000)
+#define VIVID_CID_BUTTON (VIVID_CID_CUSTOM_BASE + 0)
+#define VIVID_CID_BOOLEAN (VIVID_CID_CUSTOM_BASE + 1)
+#define VIVID_CID_INTEGER (VIVID_CID_CUSTOM_BASE + 2)
+#define VIVID_CID_INTEGER64 (VIVID_CID_CUSTOM_BASE + 3)
+#define VIVID_CID_MENU (VIVID_CID_CUSTOM_BASE + 4)
+#define VIVID_CID_STRING (VIVID_CID_CUSTOM_BASE + 5)
+#define VIVID_CID_BITMASK (VIVID_CID_CUSTOM_BASE + 6)
+#define VIVID_CID_INTMENU (VIVID_CID_CUSTOM_BASE + 7)
+
+#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000)
+#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1)
+#define VIVID_CID_TEST_PATTERN (VIVID_CID_VIVID_BASE + 0)
+#define VIVID_CID_OSD_TEXT_MODE (VIVID_CID_VIVID_BASE + 1)
+#define VIVID_CID_HOR_MOVEMENT (VIVID_CID_VIVID_BASE + 2)
+#define VIVID_CID_VERT_MOVEMENT (VIVID_CID_VIVID_BASE + 3)
+#define VIVID_CID_SHOW_BORDER (VIVID_CID_VIVID_BASE + 4)
+#define VIVID_CID_SHOW_SQUARE (VIVID_CID_VIVID_BASE + 5)
+#define VIVID_CID_INSERT_SAV (VIVID_CID_VIVID_BASE + 6)
+#define VIVID_CID_INSERT_EAV (VIVID_CID_VIVID_BASE + 7)
+#define VIVID_CID_VBI_CAP_INTERLACED (VIVID_CID_VIVID_BASE + 8)
+
+#define VIVID_CID_HFLIP (VIVID_CID_VIVID_BASE + 20)
+#define VIVID_CID_VFLIP (VIVID_CID_VIVID_BASE + 21)
+#define VIVID_CID_STD_ASPECT_RATIO (VIVID_CID_VIVID_BASE + 22)
+#define VIVID_CID_DV_TIMINGS_ASPECT_RATIO (VIVID_CID_VIVID_BASE + 23)
+#define VIVID_CID_TSTAMP_SRC (VIVID_CID_VIVID_BASE + 24)
+#define VIVID_CID_COLORSPACE (VIVID_CID_VIVID_BASE + 25)
+#define VIVID_CID_LIMITED_RGB_RANGE (VIVID_CID_VIVID_BASE + 26)
+#define VIVID_CID_ALPHA_MODE (VIVID_CID_VIVID_BASE + 27)
+#define VIVID_CID_HAS_CROP_CAP (VIVID_CID_VIVID_BASE + 28)
+#define VIVID_CID_HAS_COMPOSE_CAP (VIVID_CID_VIVID_BASE + 29)
+#define VIVID_CID_HAS_SCALER_CAP (VIVID_CID_VIVID_BASE + 30)
+#define VIVID_CID_HAS_CROP_OUT (VIVID_CID_VIVID_BASE + 31)
+#define VIVID_CID_HAS_COMPOSE_OUT (VIVID_CID_VIVID_BASE + 32)
+#define VIVID_CID_HAS_SCALER_OUT (VIVID_CID_VIVID_BASE + 33)
+#define VIVID_CID_LOOP_VIDEO (VIVID_CID_VIVID_BASE + 34)
+#define VIVID_CID_SEQ_WRAP (VIVID_CID_VIVID_BASE + 35)
+#define VIVID_CID_TIME_WRAP (VIVID_CID_VIVID_BASE + 36)
+#define VIVID_CID_MAX_EDID_BLOCKS (VIVID_CID_VIVID_BASE + 37)
+#define VIVID_CID_PERCENTAGE_FILL (VIVID_CID_VIVID_BASE + 38)
+
+#define VIVID_CID_STD_SIGNAL_MODE (VIVID_CID_VIVID_BASE + 60)
+#define VIVID_CID_STANDARD (VIVID_CID_VIVID_BASE + 61)
+#define VIVID_CID_DV_TIMINGS_SIGNAL_MODE (VIVID_CID_VIVID_BASE + 62)
+#define VIVID_CID_DV_TIMINGS (VIVID_CID_VIVID_BASE + 63)
+#define VIVID_CID_PERC_DROPPED (VIVID_CID_VIVID_BASE + 64)
+#define VIVID_CID_DISCONNECT (VIVID_CID_VIVID_BASE + 65)
+#define VIVID_CID_DQBUF_ERROR (VIVID_CID_VIVID_BASE + 66)
+#define VIVID_CID_QUEUE_SETUP_ERROR (VIVID_CID_VIVID_BASE + 67)
+#define VIVID_CID_BUF_PREPARE_ERROR (VIVID_CID_VIVID_BASE + 68)
+#define VIVID_CID_START_STR_ERROR (VIVID_CID_VIVID_BASE + 69)
+#define VIVID_CID_QUEUE_ERROR (VIVID_CID_VIVID_BASE + 70)
+#define VIVID_CID_CLEAR_FB (VIVID_CID_VIVID_BASE + 71)
+
+#define VIVID_CID_RADIO_SEEK_MODE (VIVID_CID_VIVID_BASE + 90)
+#define VIVID_CID_RADIO_SEEK_PROG_LIM (VIVID_CID_VIVID_BASE + 91)
+#define VIVID_CID_RADIO_RX_RDS_RBDS (VIVID_CID_VIVID_BASE + 92)
+#define VIVID_CID_RADIO_RX_RDS_BLOCKIO (VIVID_CID_VIVID_BASE + 93)
+
+#define VIVID_CID_RADIO_TX_RDS_BLOCKIO (VIVID_CID_VIVID_BASE + 94)
+
+
+/* General User Controls */
+
+static int vivid_user_gen_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_user_gen);
+
+ switch (ctrl->id) {
+ case VIVID_CID_DISCONNECT:
+ v4l2_info(&dev->v4l2_dev, "disconnect\n");
+ clear_bit(V4L2_FL_REGISTERED, &dev->vid_cap_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->vid_out_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->vbi_cap_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->vbi_out_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->sdr_cap_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
+ break;
+ case VIVID_CID_CLEAR_FB:
+ vivid_clear_fb(dev);
+ break;
+ case VIVID_CID_BUTTON:
+ dev->button_pressed = 30;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_user_gen_ctrl_ops = {
+ .s_ctrl = vivid_user_gen_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_button = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_BUTTON,
+ .name = "Button",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_boolean = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_BOOLEAN,
+ .name = "Boolean",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_int32 = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_INTEGER,
+ .name = "Integer 32 Bits",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0xffffffff80000000ULL,
+ .max = 0x7fffffff,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_int64 = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_INTEGER64,
+ .name = "Integer 64 Bits",
+ .type = V4L2_CTRL_TYPE_INTEGER64,
+ .min = 0x8000000000000000ULL,
+ .max = 0x7fffffffffffffffLL,
+ .step = 1,
+};
+
+static const char * const vivid_ctrl_menu_strings[] = {
+ "Menu Item 0 (Skipped)",
+ "Menu Item 1",
+ "Menu Item 2 (Skipped)",
+ "Menu Item 3",
+ "Menu Item 4",
+ "Menu Item 5 (Skipped)",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_menu = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_MENU,
+ .name = "Menu",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 1,
+ .max = 4,
+ .def = 3,
+ .menu_skip_mask = 0x04,
+ .qmenu = vivid_ctrl_menu_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_string = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_STRING,
+ .name = "String",
+ .type = V4L2_CTRL_TYPE_STRING,
+ .min = 2,
+ .max = 4,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_bitmask = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_BITMASK,
+ .name = "Bitmask",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .def = 0x80002000,
+ .min = 0,
+ .max = 0x80402010,
+ .step = 0,
+};
+
+static const s64 vivid_ctrl_int_menu_values[] = {
+ 1, 1, 2, 3, 5, 8, 13, 21, 42,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_int_menu = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_INTMENU,
+ .name = "Integer Menu",
+ .type = V4L2_CTRL_TYPE_INTEGER_MENU,
+ .min = 1,
+ .max = 8,
+ .def = 4,
+ .menu_skip_mask = 0x02,
+ .qmenu_int = vivid_ctrl_int_menu_values,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_disconnect = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_DISCONNECT,
+ .name = "Disconnect",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_clear_fb = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_CLEAR_FB,
+ .name = "Clear Framebuffer",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+
+/* Video User Controls */
+
+static int vivid_user_vid_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_user_vid);
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ dev->gain->val = dev->jiffies_vid_cap & 0xff;
+ break;
+ }
+ return 0;
+}
+
+static int vivid_user_vid_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_user_vid);
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ dev->input_brightness[dev->input] = ctrl->val - dev->input * 128;
+ tpg_s_brightness(&dev->tpg, dev->input_brightness[dev->input]);
+ break;
+ case V4L2_CID_CONTRAST:
+ tpg_s_contrast(&dev->tpg, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ tpg_s_saturation(&dev->tpg, ctrl->val);
+ break;
+ case V4L2_CID_HUE:
+ tpg_s_hue(&dev->tpg, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ dev->hflip = ctrl->val;
+ tpg_s_hflip(&dev->tpg, dev->sensor_hflip ^ dev->hflip);
+ break;
+ case V4L2_CID_VFLIP:
+ dev->vflip = ctrl->val;
+ tpg_s_vflip(&dev->tpg, dev->sensor_vflip ^ dev->vflip);
+ break;
+ case V4L2_CID_ALPHA_COMPONENT:
+ tpg_s_alpha_component(&dev->tpg, ctrl->val);
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_user_vid_ctrl_ops = {
+ .g_volatile_ctrl = vivid_user_vid_g_volatile_ctrl,
+ .s_ctrl = vivid_user_vid_s_ctrl,
+};
+
+
+/* Video Capture Controls */
+
+static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_vid_cap);
+ unsigned i;
+
+ switch (ctrl->id) {
+ case VIVID_CID_TEST_PATTERN:
+ vivid_update_quality(dev);
+ tpg_s_pattern(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_COLORSPACE:
+ tpg_s_colorspace(&dev->tpg, ctrl->val);
+ vivid_send_source_change(dev, TV);
+ vivid_send_source_change(dev, SVID);
+ vivid_send_source_change(dev, HDMI);
+ vivid_send_source_change(dev, WEBCAM);
+ break;
+ case V4L2_CID_DV_RX_RGB_RANGE:
+ if (!vivid_is_hdmi_cap(dev))
+ break;
+ tpg_s_rgb_range(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_LIMITED_RGB_RANGE:
+ tpg_s_real_rgb_range(&dev->tpg, ctrl->val ?
+ V4L2_DV_RGB_RANGE_LIMITED : V4L2_DV_RGB_RANGE_FULL);
+ break;
+ case VIVID_CID_ALPHA_MODE:
+ tpg_s_alpha_mode(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_HOR_MOVEMENT:
+ tpg_s_mv_hor_mode(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_VERT_MOVEMENT:
+ tpg_s_mv_vert_mode(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_OSD_TEXT_MODE:
+ dev->osd_mode = ctrl->val;
+ break;
+ case VIVID_CID_PERCENTAGE_FILL:
+ tpg_s_perc_fill(&dev->tpg, ctrl->val);
+ for (i = 0; i < VIDEO_MAX_FRAME; i++)
+ dev->must_blank[i] = ctrl->val < 100;
+ break;
+ case VIVID_CID_INSERT_SAV:
+ tpg_s_insert_sav(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_INSERT_EAV:
+ tpg_s_insert_eav(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_HFLIP:
+ dev->sensor_hflip = ctrl->val;
+ tpg_s_hflip(&dev->tpg, dev->sensor_hflip ^ dev->hflip);
+ break;
+ case VIVID_CID_VFLIP:
+ dev->sensor_vflip = ctrl->val;
+ tpg_s_vflip(&dev->tpg, dev->sensor_vflip ^ dev->vflip);
+ break;
+ case VIVID_CID_HAS_CROP_CAP:
+ dev->has_crop_cap = ctrl->val;
+ vivid_update_format_cap(dev, true);
+ break;
+ case VIVID_CID_HAS_COMPOSE_CAP:
+ dev->has_compose_cap = ctrl->val;
+ vivid_update_format_cap(dev, true);
+ break;
+ case VIVID_CID_HAS_SCALER_CAP:
+ dev->has_scaler_cap = ctrl->val;
+ vivid_update_format_cap(dev, true);
+ break;
+ case VIVID_CID_SHOW_BORDER:
+ tpg_s_show_border(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_SHOW_SQUARE:
+ tpg_s_show_square(&dev->tpg, ctrl->val);
+ break;
+ case VIVID_CID_STD_ASPECT_RATIO:
+ dev->std_aspect_ratio = ctrl->val;
+ tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
+ break;
+ case VIVID_CID_DV_TIMINGS_SIGNAL_MODE:
+ dev->dv_timings_signal_mode = dev->ctrl_dv_timings_signal_mode->val;
+ if (dev->dv_timings_signal_mode == SELECTED_DV_TIMINGS)
+ dev->query_dv_timings = dev->ctrl_dv_timings->val;
+ v4l2_ctrl_activate(dev->ctrl_dv_timings,
+ dev->dv_timings_signal_mode == SELECTED_DV_TIMINGS);
+ vivid_update_quality(dev);
+ vivid_send_source_change(dev, HDMI);
+ break;
+ case VIVID_CID_DV_TIMINGS_ASPECT_RATIO:
+ dev->dv_timings_aspect_ratio = ctrl->val;
+ tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
+ break;
+ case VIVID_CID_TSTAMP_SRC:
+ dev->tstamp_src_is_soe = ctrl->val;
+ dev->vb_vid_cap_q.timestamp_flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ if (dev->tstamp_src_is_soe)
+ dev->vb_vid_cap_q.timestamp_flags |= V4L2_BUF_FLAG_TSTAMP_SRC_SOE;
+ break;
+ case VIVID_CID_MAX_EDID_BLOCKS:
+ dev->edid_max_blocks = ctrl->val;
+ if (dev->edid_blocks > dev->edid_max_blocks)
+ dev->edid_blocks = dev->edid_max_blocks;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_vid_cap_ctrl_ops = {
+ .s_ctrl = vivid_vid_cap_s_ctrl,
+};
+
+static const char * const vivid_ctrl_hor_movement_strings[] = {
+ "Move Left Fast",
+ "Move Left",
+ "Move Left Slow",
+ "No Movement",
+ "Move Right Slow",
+ "Move Right",
+ "Move Right Fast",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_hor_movement = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_HOR_MOVEMENT,
+ .name = "Horizontal Movement",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = TPG_MOVE_POS_FAST,
+ .def = TPG_MOVE_NONE,
+ .qmenu = vivid_ctrl_hor_movement_strings,
+};
+
+static const char * const vivid_ctrl_vert_movement_strings[] = {
+ "Move Up Fast",
+ "Move Up",
+ "Move Up Slow",
+ "No Movement",
+ "Move Down Slow",
+ "Move Down",
+ "Move Down Fast",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_vert_movement = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_VERT_MOVEMENT,
+ .name = "Vertical Movement",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = TPG_MOVE_POS_FAST,
+ .def = TPG_MOVE_NONE,
+ .qmenu = vivid_ctrl_vert_movement_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_show_border = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_SHOW_BORDER,
+ .name = "Show Border",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_show_square = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_SHOW_SQUARE,
+ .name = "Show Square",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const char * const vivid_ctrl_osd_mode_strings[] = {
+ "All",
+ "Counters Only",
+ "None",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_osd_mode = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_OSD_TEXT_MODE,
+ .name = "OSD Text Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = 2,
+ .qmenu = vivid_ctrl_osd_mode_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_perc_fill = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_PERCENTAGE_FILL,
+ .name = "Fill Percentage of Frame",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .def = 100,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_insert_sav = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_INSERT_SAV,
+ .name = "Insert SAV Code in Image",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_insert_eav = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_INSERT_EAV,
+ .name = "Insert EAV Code in Image",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_hflip = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_HFLIP,
+ .name = "Sensor Flipped Horizontally",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_vflip = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_VFLIP,
+ .name = "Sensor Flipped Vertically",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_has_crop_cap = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_HAS_CROP_CAP,
+ .name = "Enable Capture Cropping",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_has_compose_cap = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_HAS_COMPOSE_CAP,
+ .name = "Enable Capture Composing",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_has_scaler_cap = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_HAS_SCALER_CAP,
+ .name = "Enable Capture Scaler",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+static const char * const vivid_ctrl_tstamp_src_strings[] = {
+ "End of Frame",
+ "Start of Exposure",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_tstamp_src = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_TSTAMP_SRC,
+ .name = "Timestamp Source",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = 1,
+ .qmenu = vivid_ctrl_tstamp_src_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_std_aspect_ratio = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_STD_ASPECT_RATIO,
+ .name = "Standard Aspect Ratio",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 1,
+ .max = 4,
+ .def = 1,
+ .qmenu = tpg_aspect_strings,
+};
+
+static const char * const vivid_ctrl_dv_timings_signal_mode_strings[] = {
+ "Current DV Timings",
+ "No Signal",
+ "No Lock",
+ "Out of Range",
+ "Selected DV Timings",
+ "Cycle Through All DV Timings",
+ "Custom DV Timings",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_dv_timings_signal_mode = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_DV_TIMINGS_SIGNAL_MODE,
+ .name = "DV Timings Signal Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = 5,
+ .qmenu = vivid_ctrl_dv_timings_signal_mode_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_dv_timings_aspect_ratio = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_DV_TIMINGS_ASPECT_RATIO,
+ .name = "DV Timings Aspect Ratio",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = 3,
+ .qmenu = tpg_aspect_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_max_edid_blocks = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_MAX_EDID_BLOCKS,
+ .name = "Maximum EDID Blocks",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 256,
+ .def = 2,
+ .step = 1,
+};
+
+static const char * const vivid_ctrl_colorspace_strings[] = {
+ "",
+ "SMPTE 170M",
+ "SMPTE 240M",
+ "REC 709",
+ "", /* Skip Bt878 entry */
+ "470 System M",
+ "470 System BG",
+ "", /* Skip JPEG entry */
+ "sRGB",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_colorspace = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_COLORSPACE,
+ .name = "Colorspace",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 1,
+ .max = 8,
+ .menu_skip_mask = (1 << 4) | (1 << 7),
+ .def = 8,
+ .qmenu = vivid_ctrl_colorspace_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_alpha_mode = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_ALPHA_MODE,
+ .name = "Apply Alpha To Red Only",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_limited_rgb_range = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_LIMITED_RGB_RANGE,
+ .name = "Limited RGB Range (16-235)",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+
+/* VBI Capture Control */
+
+static int vivid_vbi_cap_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_vbi_cap);
+
+ switch (ctrl->id) {
+ case VIVID_CID_VBI_CAP_INTERLACED:
+ dev->vbi_cap_interlaced = ctrl->val;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_vbi_cap_ctrl_ops = {
+ .s_ctrl = vivid_vbi_cap_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_vbi_cap_interlaced = {
+ .ops = &vivid_vbi_cap_ctrl_ops,
+ .id = VIVID_CID_VBI_CAP_INTERLACED,
+ .name = "Interlaced VBI Format",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+
+/* Video Output Controls */
+
+static int vivid_vid_out_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_vid_out);
+ struct v4l2_bt_timings *bt = &dev->dv_timings_out.bt;
+
+ switch (ctrl->id) {
+ case VIVID_CID_HAS_CROP_OUT:
+ dev->has_crop_out = ctrl->val;
+ vivid_update_format_out(dev);
+ break;
+ case VIVID_CID_HAS_COMPOSE_OUT:
+ dev->has_compose_out = ctrl->val;
+ vivid_update_format_out(dev);
+ break;
+ case VIVID_CID_HAS_SCALER_OUT:
+ dev->has_scaler_out = ctrl->val;
+ vivid_update_format_out(dev);
+ break;
+ case V4L2_CID_DV_TX_MODE:
+ dev->dvi_d_out = ctrl->val == V4L2_DV_TX_MODE_DVI_D;
+ if (!vivid_is_hdmi_out(dev))
+ break;
+ if (!dev->dvi_d_out && (bt->standards & V4L2_DV_BT_STD_CEA861)) {
+ if (bt->width == 720 && bt->height <= 576)
+ dev->colorspace_out = V4L2_COLORSPACE_SMPTE170M;
+ else
+ dev->colorspace_out = V4L2_COLORSPACE_REC709;
+ } else {
+ dev->colorspace_out = V4L2_COLORSPACE_SRGB;
+ }
+ if (dev->loop_video)
+ vivid_send_source_change(dev, HDMI);
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_vid_out_ctrl_ops = {
+ .s_ctrl = vivid_vid_out_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_has_crop_out = {
+ .ops = &vivid_vid_out_ctrl_ops,
+ .id = VIVID_CID_HAS_CROP_OUT,
+ .name = "Enable Output Cropping",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_has_compose_out = {
+ .ops = &vivid_vid_out_ctrl_ops,
+ .id = VIVID_CID_HAS_COMPOSE_OUT,
+ .name = "Enable Output Composing",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_has_scaler_out = {
+ .ops = &vivid_vid_out_ctrl_ops,
+ .id = VIVID_CID_HAS_SCALER_OUT,
+ .name = "Enable Output Scaler",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+
+/* Streaming Controls */
+
+static int vivid_streaming_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_streaming);
+ struct timeval tv;
+
+ switch (ctrl->id) {
+ case VIVID_CID_DQBUF_ERROR:
+ dev->dqbuf_error = true;
+ break;
+ case VIVID_CID_PERC_DROPPED:
+ dev->perc_dropped_buffers = ctrl->val;
+ break;
+ case VIVID_CID_QUEUE_SETUP_ERROR:
+ dev->queue_setup_error = true;
+ break;
+ case VIVID_CID_BUF_PREPARE_ERROR:
+ dev->buf_prepare_error = true;
+ break;
+ case VIVID_CID_START_STR_ERROR:
+ dev->start_streaming_error = true;
+ break;
+ case VIVID_CID_QUEUE_ERROR:
+ if (dev->vb_vid_cap_q.start_streaming_called)
+ vb2_queue_error(&dev->vb_vid_cap_q);
+ if (dev->vb_vbi_cap_q.start_streaming_called)
+ vb2_queue_error(&dev->vb_vbi_cap_q);
+ if (dev->vb_vid_out_q.start_streaming_called)
+ vb2_queue_error(&dev->vb_vid_out_q);
+ if (dev->vb_vbi_out_q.start_streaming_called)
+ vb2_queue_error(&dev->vb_vbi_out_q);
+ if (dev->vb_sdr_cap_q.start_streaming_called)
+ vb2_queue_error(&dev->vb_sdr_cap_q);
+ break;
+ case VIVID_CID_SEQ_WRAP:
+ dev->seq_wrap = ctrl->val;
+ break;
+ case VIVID_CID_TIME_WRAP:
+ dev->time_wrap = ctrl->val;
+ if (ctrl->val == 0) {
+ dev->time_wrap_offset = 0;
+ break;
+ }
+ v4l2_get_timestamp(&tv);
+ dev->time_wrap_offset = -tv.tv_sec - 16;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_streaming_ctrl_ops = {
+ .s_ctrl = vivid_streaming_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_dqbuf_error = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_DQBUF_ERROR,
+ .name = "Inject V4L2_BUF_FLAG_ERROR",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_perc_dropped = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_PERC_DROPPED,
+ .name = "Percentage of Dropped Buffers",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_queue_setup_error = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_QUEUE_SETUP_ERROR,
+ .name = "Inject VIDIOC_REQBUFS Error",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_buf_prepare_error = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_BUF_PREPARE_ERROR,
+ .name = "Inject VIDIOC_QBUF Error",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_start_streaming_error = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_START_STR_ERROR,
+ .name = "Inject VIDIOC_STREAMON Error",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_queue_error = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_QUEUE_ERROR,
+ .name = "Inject Fatal Streaming Error",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_seq_wrap = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_SEQ_WRAP,
+ .name = "Wrap Sequence Number",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_time_wrap = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_TIME_WRAP,
+ .name = "Wrap Timestamp",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+
+/* SDTV Capture Controls */
+
+static int vivid_sdtv_cap_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_sdtv_cap);
+
+ switch (ctrl->id) {
+ case VIVID_CID_STD_SIGNAL_MODE:
+ dev->std_signal_mode = dev->ctrl_std_signal_mode->val;
+ if (dev->std_signal_mode == SELECTED_STD)
+ dev->query_std = vivid_standard[dev->ctrl_standard->val];
+ v4l2_ctrl_activate(dev->ctrl_standard, dev->std_signal_mode == SELECTED_STD);
+ vivid_update_quality(dev);
+ vivid_send_source_change(dev, TV);
+ vivid_send_source_change(dev, SVID);
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_sdtv_cap_ctrl_ops = {
+ .s_ctrl = vivid_sdtv_cap_s_ctrl,
+};
+
+static const char * const vivid_ctrl_std_signal_mode_strings[] = {
+ "Current Standard",
+ "No Signal",
+ "No Lock",
+ "",
+ "Selected Standard",
+ "Cycle Through All Standards",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_std_signal_mode = {
+ .ops = &vivid_sdtv_cap_ctrl_ops,
+ .id = VIVID_CID_STD_SIGNAL_MODE,
+ .name = "Standard Signal Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = 5,
+ .menu_skip_mask = 1 << 3,
+ .qmenu = vivid_ctrl_std_signal_mode_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_standard = {
+ .ops = &vivid_sdtv_cap_ctrl_ops,
+ .id = VIVID_CID_STANDARD,
+ .name = "Standard",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = 14,
+ .qmenu = vivid_ctrl_standard_strings,
+};
+
+
+
+/* Radio Receiver Controls */
+
+static int vivid_radio_rx_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_radio_rx);
+
+ switch (ctrl->id) {
+ case VIVID_CID_RADIO_SEEK_MODE:
+ dev->radio_rx_hw_seek_mode = ctrl->val;
+ break;
+ case VIVID_CID_RADIO_SEEK_PROG_LIM:
+ dev->radio_rx_hw_seek_prog_lim = ctrl->val;
+ break;
+ case VIVID_CID_RADIO_RX_RDS_RBDS:
+ dev->rds_gen.use_rbds = ctrl->val;
+ break;
+ case VIVID_CID_RADIO_RX_RDS_BLOCKIO:
+ dev->radio_rx_rds_controls = ctrl->val;
+ dev->radio_rx_caps &= ~V4L2_CAP_READWRITE;
+ dev->radio_rx_rds_use_alternates = false;
+ if (!dev->radio_rx_rds_controls) {
+ dev->radio_rx_caps |= V4L2_CAP_READWRITE;
+ __v4l2_ctrl_s_ctrl(dev->radio_rx_rds_pty, 0);
+ __v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ta, 0);
+ __v4l2_ctrl_s_ctrl(dev->radio_rx_rds_tp, 0);
+ __v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ms, 0);
+ __v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_psname, "");
+ __v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_radiotext, "");
+ }
+ v4l2_ctrl_activate(dev->radio_rx_rds_pty, dev->radio_rx_rds_controls);
+ v4l2_ctrl_activate(dev->radio_rx_rds_psname, dev->radio_rx_rds_controls);
+ v4l2_ctrl_activate(dev->radio_rx_rds_radiotext, dev->radio_rx_rds_controls);
+ v4l2_ctrl_activate(dev->radio_rx_rds_ta, dev->radio_rx_rds_controls);
+ v4l2_ctrl_activate(dev->radio_rx_rds_tp, dev->radio_rx_rds_controls);
+ v4l2_ctrl_activate(dev->radio_rx_rds_ms, dev->radio_rx_rds_controls);
+ break;
+ case V4L2_CID_RDS_RECEPTION:
+ dev->radio_rx_rds_enabled = ctrl->val;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_radio_rx_ctrl_ops = {
+ .s_ctrl = vivid_radio_rx_s_ctrl,
+};
+
+static const char * const vivid_ctrl_radio_rds_mode_strings[] = {
+ "Block I/O",
+ "Controls",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_radio_rx_rds_blockio = {
+ .ops = &vivid_radio_rx_ctrl_ops,
+ .id = VIVID_CID_RADIO_RX_RDS_BLOCKIO,
+ .name = "RDS Rx I/O Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .qmenu = vivid_ctrl_radio_rds_mode_strings,
+ .max = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_radio_rx_rds_rbds = {
+ .ops = &vivid_radio_rx_ctrl_ops,
+ .id = VIVID_CID_RADIO_RX_RDS_RBDS,
+ .name = "Generate RBDS Instead of RDS",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+static const char * const vivid_ctrl_radio_hw_seek_mode_strings[] = {
+ "Bounded",
+ "Wrap Around",
+ "Both",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_radio_hw_seek_mode = {
+ .ops = &vivid_radio_rx_ctrl_ops,
+ .id = VIVID_CID_RADIO_SEEK_MODE,
+ .name = "Radio HW Seek Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = 2,
+ .qmenu = vivid_ctrl_radio_hw_seek_mode_strings,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_radio_hw_seek_prog_lim = {
+ .ops = &vivid_radio_rx_ctrl_ops,
+ .id = VIVID_CID_RADIO_SEEK_PROG_LIM,
+ .name = "Radio Programmable HW Seek",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+
+/* Radio Transmitter Controls */
+
+static int vivid_radio_tx_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_radio_tx);
+
+ switch (ctrl->id) {
+ case VIVID_CID_RADIO_TX_RDS_BLOCKIO:
+ dev->radio_tx_rds_controls = ctrl->val;
+ dev->radio_tx_caps &= ~V4L2_CAP_READWRITE;
+ if (!dev->radio_tx_rds_controls)
+ dev->radio_tx_caps |= V4L2_CAP_READWRITE;
+ break;
+ case V4L2_CID_RDS_TX_PTY:
+ if (dev->radio_rx_rds_controls)
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_pty, ctrl->val);
+ break;
+ case V4L2_CID_RDS_TX_PS_NAME:
+ if (dev->radio_rx_rds_controls)
+ v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_psname, ctrl->p_new.p_char);
+ break;
+ case V4L2_CID_RDS_TX_RADIO_TEXT:
+ if (dev->radio_rx_rds_controls)
+ v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_radiotext, ctrl->p_new.p_char);
+ break;
+ case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT:
+ if (dev->radio_rx_rds_controls)
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ta, ctrl->val);
+ break;
+ case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM:
+ if (dev->radio_rx_rds_controls)
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_tp, ctrl->val);
+ break;
+ case V4L2_CID_RDS_TX_MUSIC_SPEECH:
+ if (dev->radio_rx_rds_controls)
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ms, ctrl->val);
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_radio_tx_ctrl_ops = {
+ .s_ctrl = vivid_radio_tx_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_radio_tx_rds_blockio = {
+ .ops = &vivid_radio_tx_ctrl_ops,
+ .id = VIVID_CID_RADIO_TX_RDS_BLOCKIO,
+ .name = "RDS Tx I/O Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .qmenu = vivid_ctrl_radio_rds_mode_strings,
+ .max = 1,
+ .def = 1,
+};
+
+
+
+/* Video Loop Control */
+
+static int vivid_loop_out_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_loop_out);
+
+ switch (ctrl->id) {
+ case VIVID_CID_LOOP_VIDEO:
+ dev->loop_video = ctrl->val;
+ vivid_update_quality(dev);
+ vivid_send_source_change(dev, SVID);
+ vivid_send_source_change(dev, HDMI);
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_loop_out_ctrl_ops = {
+ .s_ctrl = vivid_loop_out_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_loop_video = {
+ .ops = &vivid_loop_out_ctrl_ops,
+ .id = VIVID_CID_LOOP_VIDEO,
+ .name = "Loop Video",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+};
+
+
+static const struct v4l2_ctrl_config vivid_ctrl_class = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY,
+ .id = VIVID_CID_VIVID_CLASS,
+ .name = "Vivid Controls",
+ .type = V4L2_CTRL_TYPE_CTRL_CLASS,
+};
+
+int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
+ bool show_ccs_out, bool no_error_inj,
+ bool has_sdtv, bool has_hdmi)
+{
+ struct v4l2_ctrl_handler *hdl_user_gen = &dev->ctrl_hdl_user_gen;
+ struct v4l2_ctrl_handler *hdl_user_vid = &dev->ctrl_hdl_user_vid;
+ struct v4l2_ctrl_handler *hdl_user_aud = &dev->ctrl_hdl_user_aud;
+ struct v4l2_ctrl_handler *hdl_streaming = &dev->ctrl_hdl_streaming;
+ struct v4l2_ctrl_handler *hdl_sdtv_cap = &dev->ctrl_hdl_sdtv_cap;
+ struct v4l2_ctrl_handler *hdl_loop_out = &dev->ctrl_hdl_loop_out;
+ struct v4l2_ctrl_handler *hdl_vid_cap = &dev->ctrl_hdl_vid_cap;
+ struct v4l2_ctrl_handler *hdl_vid_out = &dev->ctrl_hdl_vid_out;
+ struct v4l2_ctrl_handler *hdl_vbi_cap = &dev->ctrl_hdl_vbi_cap;
+ struct v4l2_ctrl_handler *hdl_vbi_out = &dev->ctrl_hdl_vbi_out;
+ struct v4l2_ctrl_handler *hdl_radio_rx = &dev->ctrl_hdl_radio_rx;
+ struct v4l2_ctrl_handler *hdl_radio_tx = &dev->ctrl_hdl_radio_tx;
+ struct v4l2_ctrl_handler *hdl_sdr_cap = &dev->ctrl_hdl_sdr_cap;
+ struct v4l2_ctrl_config vivid_ctrl_dv_timings = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_DV_TIMINGS,
+ .name = "DV Timings",
+ .type = V4L2_CTRL_TYPE_MENU,
+ };
+ int i;
+
+ v4l2_ctrl_handler_init(hdl_user_gen, 10);
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_user_vid, 9);
+ v4l2_ctrl_new_custom(hdl_user_vid, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_user_aud, 2);
+ v4l2_ctrl_new_custom(hdl_user_aud, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_streaming, 8);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_sdtv_cap, 2);
+ v4l2_ctrl_new_custom(hdl_sdtv_cap, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_loop_out, 1);
+ v4l2_ctrl_new_custom(hdl_loop_out, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_vid_cap, 55);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_vid_out, 26);
+ v4l2_ctrl_new_custom(hdl_vid_out, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_vbi_cap, 21);
+ v4l2_ctrl_new_custom(hdl_vbi_cap, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_vbi_out, 19);
+ v4l2_ctrl_new_custom(hdl_vbi_out, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_radio_rx, 17);
+ v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_radio_tx, 17);
+ v4l2_ctrl_new_custom(hdl_radio_tx, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_sdr_cap, 18);
+ v4l2_ctrl_new_custom(hdl_sdr_cap, &vivid_ctrl_class, NULL);
+
+ /* User Controls */
+ dev->volume = v4l2_ctrl_new_std(hdl_user_aud, NULL,
+ V4L2_CID_AUDIO_VOLUME, 0, 255, 1, 200);
+ dev->mute = v4l2_ctrl_new_std(hdl_user_aud, NULL,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
+ if (dev->has_vid_cap) {
+ dev->brightness = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ for (i = 0; i < MAX_INPUTS; i++)
+ dev->input_brightness[i] = 128;
+ dev->contrast = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 128);
+ dev->saturation = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 128);
+ dev->hue = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_HUE, -128, 128, 1, 0);
+ v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ dev->autogain = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ dev->gain = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_GAIN, 0, 255, 1, 100);
+ dev->alpha = v4l2_ctrl_new_std(hdl_user_vid, &vivid_user_vid_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 0);
+ }
+ dev->button = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_button, NULL);
+ dev->int32 = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int32, NULL);
+ dev->int64 = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int64, NULL);
+ dev->boolean = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_boolean, NULL);
+ dev->menu = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_menu, NULL);
+ dev->string = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_string, NULL);
+ dev->bitmask = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_bitmask, NULL);
+ dev->int_menu = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int_menu, NULL);
+
+ if (dev->has_vid_cap) {
+ /* Image Processing Controls */
+ struct v4l2_ctrl_config vivid_ctrl_test_pattern = {
+ .ops = &vivid_vid_cap_ctrl_ops,
+ .id = VIVID_CID_TEST_PATTERN,
+ .name = "Test Pattern",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = TPG_PAT_NOISE,
+ .qmenu = tpg_pattern_strings,
+ };
+
+ dev->test_pattern = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_test_pattern, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_perc_fill, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_hor_movement, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_vert_movement, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_osd_mode, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_show_border, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_show_square, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_hflip, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_vflip, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_insert_sav, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_insert_eav, NULL);
+ if (show_ccs_cap) {
+ dev->ctrl_has_crop_cap = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_has_crop_cap, NULL);
+ dev->ctrl_has_compose_cap = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_has_compose_cap, NULL);
+ dev->ctrl_has_scaler_cap = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_has_scaler_cap, NULL);
+ }
+
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_tstamp_src, NULL);
+ dev->colorspace = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_colorspace, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_alpha_mode, NULL);
+ }
+
+ if (dev->has_vid_out && show_ccs_out) {
+ dev->ctrl_has_crop_out = v4l2_ctrl_new_custom(hdl_vid_out,
+ &vivid_ctrl_has_crop_out, NULL);
+ dev->ctrl_has_compose_out = v4l2_ctrl_new_custom(hdl_vid_out,
+ &vivid_ctrl_has_compose_out, NULL);
+ dev->ctrl_has_scaler_out = v4l2_ctrl_new_custom(hdl_vid_out,
+ &vivid_ctrl_has_scaler_out, NULL);
+ }
+
+ /*
+ * Testing this driver with v4l2-compliance will trigger the error
+ * injection controls, and after that nothing will work as expected.
+ * So we have a module option to drop these error injecting controls
+ * allowing us to run v4l2_compliance again.
+ */
+ if (!no_error_inj) {
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_disconnect, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_dqbuf_error, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_perc_dropped, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_queue_setup_error, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_buf_prepare_error, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_start_streaming_error, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_queue_error, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_seq_wrap, NULL);
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_time_wrap, NULL);
+ }
+
+ if (has_sdtv && (dev->has_vid_cap || dev->has_vbi_cap)) {
+ if (dev->has_vid_cap)
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_std_aspect_ratio, NULL);
+ dev->ctrl_std_signal_mode = v4l2_ctrl_new_custom(hdl_sdtv_cap,
+ &vivid_ctrl_std_signal_mode, NULL);
+ dev->ctrl_standard = v4l2_ctrl_new_custom(hdl_sdtv_cap,
+ &vivid_ctrl_standard, NULL);
+ if (dev->ctrl_std_signal_mode)
+ v4l2_ctrl_cluster(2, &dev->ctrl_std_signal_mode);
+ if (dev->has_raw_vbi_cap)
+ v4l2_ctrl_new_custom(hdl_vbi_cap, &vivid_ctrl_vbi_cap_interlaced, NULL);
+ }
+
+ if (has_hdmi && dev->has_vid_cap) {
+ dev->ctrl_dv_timings_signal_mode = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_dv_timings_signal_mode, NULL);
+
+ vivid_ctrl_dv_timings.max = dev->query_dv_timings_size - 1;
+ vivid_ctrl_dv_timings.qmenu =
+ (const char * const *)dev->query_dv_timings_qmenu;
+ dev->ctrl_dv_timings = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_dv_timings, NULL);
+ if (dev->ctrl_dv_timings_signal_mode)
+ v4l2_ctrl_cluster(2, &dev->ctrl_dv_timings_signal_mode);
+
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_dv_timings_aspect_ratio, NULL);
+ v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_max_edid_blocks, NULL);
+ dev->real_rgb_range_cap = v4l2_ctrl_new_custom(hdl_vid_cap,
+ &vivid_ctrl_limited_rgb_range, NULL);
+ dev->rgb_range_cap = v4l2_ctrl_new_std_menu(hdl_vid_cap,
+ &vivid_vid_cap_ctrl_ops,
+ V4L2_CID_DV_RX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
+ 0, V4L2_DV_RGB_RANGE_AUTO);
+ }
+ if (has_hdmi && dev->has_vid_out) {
+ /*
+ * We aren't doing anything with this at the moment, but
+ * HDMI outputs typically have this controls.
+ */
+ dev->ctrl_tx_rgb_range = v4l2_ctrl_new_std_menu(hdl_vid_out, NULL,
+ V4L2_CID_DV_TX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
+ 0, V4L2_DV_RGB_RANGE_AUTO);
+ dev->ctrl_tx_mode = v4l2_ctrl_new_std_menu(hdl_vid_out, NULL,
+ V4L2_CID_DV_TX_MODE, V4L2_DV_TX_MODE_HDMI,
+ 0, V4L2_DV_TX_MODE_HDMI);
+ }
+ if ((dev->has_vid_cap && dev->has_vid_out) ||
+ (dev->has_vbi_cap && dev->has_vbi_out))
+ v4l2_ctrl_new_custom(hdl_loop_out, &vivid_ctrl_loop_video, NULL);
+
+ if (dev->has_fb)
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_clear_fb, NULL);
+
+ if (dev->has_radio_rx) {
+ v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_radio_hw_seek_mode, NULL);
+ v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_radio_hw_seek_prog_lim, NULL);
+ v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_radio_rx_rds_blockio, NULL);
+ v4l2_ctrl_new_custom(hdl_radio_rx, &vivid_ctrl_radio_rx_rds_rbds, NULL);
+ v4l2_ctrl_new_std(hdl_radio_rx, &vivid_radio_rx_ctrl_ops,
+ V4L2_CID_RDS_RECEPTION, 0, 1, 1, 1);
+ dev->radio_rx_rds_pty = v4l2_ctrl_new_std(hdl_radio_rx,
+ &vivid_radio_rx_ctrl_ops,
+ V4L2_CID_RDS_RX_PTY, 0, 31, 1, 0);
+ dev->radio_rx_rds_psname = v4l2_ctrl_new_std(hdl_radio_rx,
+ &vivid_radio_rx_ctrl_ops,
+ V4L2_CID_RDS_RX_PS_NAME, 0, 8, 8, 0);
+ dev->radio_rx_rds_radiotext = v4l2_ctrl_new_std(hdl_radio_rx,
+ &vivid_radio_rx_ctrl_ops,
+ V4L2_CID_RDS_RX_RADIO_TEXT, 0, 64, 64, 0);
+ dev->radio_rx_rds_ta = v4l2_ctrl_new_std(hdl_radio_rx,
+ &vivid_radio_rx_ctrl_ops,
+ V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT, 0, 1, 1, 0);
+ dev->radio_rx_rds_tp = v4l2_ctrl_new_std(hdl_radio_rx,
+ &vivid_radio_rx_ctrl_ops,
+ V4L2_CID_RDS_RX_TRAFFIC_PROGRAM, 0, 1, 1, 0);
+ dev->radio_rx_rds_ms = v4l2_ctrl_new_std(hdl_radio_rx,
+ &vivid_radio_rx_ctrl_ops,
+ V4L2_CID_RDS_RX_MUSIC_SPEECH, 0, 1, 1, 1);
+ }
+ if (dev->has_radio_tx) {
+ v4l2_ctrl_new_custom(hdl_radio_tx,
+ &vivid_ctrl_radio_tx_rds_blockio, NULL);
+ dev->radio_tx_rds_pi = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_PI, 0, 0xffff, 1, 0x8088);
+ dev->radio_tx_rds_pty = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_PTY, 0, 31, 1, 3);
+ dev->radio_tx_rds_psname = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_PS_NAME, 0, 8, 8, 0);
+ if (dev->radio_tx_rds_psname)
+ v4l2_ctrl_s_ctrl_string(dev->radio_tx_rds_psname, "VIVID-TX");
+ dev->radio_tx_rds_radiotext = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_RADIO_TEXT, 0, 64 * 2, 64, 0);
+ if (dev->radio_tx_rds_radiotext)
+ v4l2_ctrl_s_ctrl_string(dev->radio_tx_rds_radiotext,
+ "This is a VIVID default Radio Text template text, change at will");
+ dev->radio_tx_rds_mono_stereo = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_MONO_STEREO, 0, 1, 1, 1);
+ dev->radio_tx_rds_art_head = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_ARTIFICIAL_HEAD, 0, 1, 1, 0);
+ dev->radio_tx_rds_compressed = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_COMPRESSED, 0, 1, 1, 0);
+ dev->radio_tx_rds_dyn_pty = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_DYNAMIC_PTY, 0, 1, 1, 0);
+ dev->radio_tx_rds_ta = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT, 0, 1, 1, 0);
+ dev->radio_tx_rds_tp = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_TRAFFIC_PROGRAM, 0, 1, 1, 1);
+ dev->radio_tx_rds_ms = v4l2_ctrl_new_std(hdl_radio_tx,
+ &vivid_radio_tx_ctrl_ops,
+ V4L2_CID_RDS_TX_MUSIC_SPEECH, 0, 1, 1, 1);
+ }
+ if (hdl_user_gen->error)
+ return hdl_user_gen->error;
+ if (hdl_user_vid->error)
+ return hdl_user_vid->error;
+ if (hdl_user_aud->error)
+ return hdl_user_aud->error;
+ if (hdl_streaming->error)
+ return hdl_streaming->error;
+ if (hdl_sdr_cap->error)
+ return hdl_sdr_cap->error;
+ if (hdl_loop_out->error)
+ return hdl_loop_out->error;
+
+ if (dev->autogain)
+ v4l2_ctrl_auto_cluster(2, &dev->autogain, 0, true);
+
+ if (dev->has_vid_cap) {
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_gen, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_vid, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_aud, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_streaming, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_sdtv_cap, NULL);
+ if (hdl_vid_cap->error)
+ return hdl_vid_cap->error;
+ dev->vid_cap_dev.ctrl_handler = hdl_vid_cap;
+ }
+ if (dev->has_vid_out) {
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_gen, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_aud, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_streaming, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_loop_out, NULL);
+ if (hdl_vid_out->error)
+ return hdl_vid_out->error;
+ dev->vid_out_dev.ctrl_handler = hdl_vid_out;
+ }
+ if (dev->has_vbi_cap) {
+ v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_user_gen, NULL);
+ v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_streaming, NULL);
+ v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_sdtv_cap, NULL);
+ if (hdl_vbi_cap->error)
+ return hdl_vbi_cap->error;
+ dev->vbi_cap_dev.ctrl_handler = hdl_vbi_cap;
+ }
+ if (dev->has_vbi_out) {
+ v4l2_ctrl_add_handler(hdl_vbi_out, hdl_user_gen, NULL);
+ v4l2_ctrl_add_handler(hdl_vbi_out, hdl_streaming, NULL);
+ v4l2_ctrl_add_handler(hdl_vbi_out, hdl_loop_out, NULL);
+ if (hdl_vbi_out->error)
+ return hdl_vbi_out->error;
+ dev->vbi_out_dev.ctrl_handler = hdl_vbi_out;
+ }
+ if (dev->has_radio_rx) {
+ v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_gen, NULL);
+ v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_aud, NULL);
+ if (hdl_radio_rx->error)
+ return hdl_radio_rx->error;
+ dev->radio_rx_dev.ctrl_handler = hdl_radio_rx;
+ }
+ if (dev->has_radio_tx) {
+ v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_gen, NULL);
+ v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_aud, NULL);
+ if (hdl_radio_tx->error)
+ return hdl_radio_tx->error;
+ dev->radio_tx_dev.ctrl_handler = hdl_radio_tx;
+ }
+ if (dev->has_sdr_cap) {
+ v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_user_gen, NULL);
+ v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_streaming, NULL);
+ if (hdl_sdr_cap->error)
+ return hdl_sdr_cap->error;
+ dev->sdr_cap_dev.ctrl_handler = hdl_sdr_cap;
+ }
+ return 0;
+}
+
+void vivid_free_controls(struct vivid_dev *dev)
+{
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_vid_cap);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_vid_out);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_vbi_cap);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_vbi_out);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_radio_rx);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_radio_tx);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_sdr_cap);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_user_gen);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_user_vid);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_user_aud);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_streaming);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_sdtv_cap);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_loop_out);
+}
diff --git a/drivers/media/platform/vivid/vivid-ctrls.h b/drivers/media/platform/vivid/vivid-ctrls.h
new file mode 100644
index 000000000000..9bcca9d56359
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-ctrls.h
@@ -0,0 +1,34 @@
+/*
+ * vivid-ctrls.h - control support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_CTRLS_H_
+#define _VIVID_CTRLS_H_
+
+enum vivid_hw_seek_modes {
+ VIVID_HW_SEEK_BOUNDED,
+ VIVID_HW_SEEK_WRAP,
+ VIVID_HW_SEEK_BOTH,
+};
+
+int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
+ bool show_ccs_out, bool no_error_inj,
+ bool has_sdtv, bool has_hdmi);
+void vivid_free_controls(struct vivid_dev *dev);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
new file mode 100644
index 000000000000..39a67cfae120
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -0,0 +1,886 @@
+/*
+ * vivid-kthread-cap.h - video/vbi capture thread support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/font.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/random.h>
+#include <linux/v4l2-dv-timings.h>
+#include <asm/div64.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+
+#include "vivid-core.h"
+#include "vivid-vid-common.h"
+#include "vivid-vid-cap.h"
+#include "vivid-vid-out.h"
+#include "vivid-radio-common.h"
+#include "vivid-radio-rx.h"
+#include "vivid-radio-tx.h"
+#include "vivid-sdr-cap.h"
+#include "vivid-vbi-cap.h"
+#include "vivid-vbi-out.h"
+#include "vivid-osd.h"
+#include "vivid-ctrls.h"
+#include "vivid-kthread-cap.h"
+
+static inline v4l2_std_id vivid_get_std_cap(const struct vivid_dev *dev)
+{
+ if (vivid_is_sdtv_cap(dev))
+ return dev->std_cap;
+ return 0;
+}
+
+static void copy_pix(struct vivid_dev *dev, int win_y, int win_x,
+ u16 *cap, const u16 *osd)
+{
+ u16 out;
+ int left = dev->overlay_out_left;
+ int top = dev->overlay_out_top;
+ int fb_x = win_x + left;
+ int fb_y = win_y + top;
+ int i;
+
+ out = *cap;
+ *cap = *osd;
+ if (dev->bitmap_out) {
+ const u8 *p = dev->bitmap_out;
+ unsigned stride = (dev->compose_out.width + 7) / 8;
+
+ win_x -= dev->compose_out.left;
+ win_y -= dev->compose_out.top;
+ if (!(p[stride * win_y + win_x / 8] & (1 << (win_x & 7))))
+ return;
+ }
+
+ for (i = 0; i < dev->clipcount_out; i++) {
+ struct v4l2_rect *r = &dev->clips_out[i].c;
+
+ if (fb_y >= r->top && fb_y < r->top + r->height &&
+ fb_x >= r->left && fb_x < r->left + r->width)
+ return;
+ }
+ if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_CHROMAKEY) &&
+ *osd != dev->chromakey_out)
+ return;
+ if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) &&
+ out == dev->chromakey_out)
+ return;
+ if (dev->fmt_cap->alpha_mask) {
+ if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) &&
+ dev->global_alpha_out)
+ return;
+ if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) &&
+ *cap & dev->fmt_cap->alpha_mask)
+ return;
+ if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_LOCAL_INV_ALPHA) &&
+ !(*cap & dev->fmt_cap->alpha_mask))
+ return;
+ }
+ *cap = out;
+}
+
+static void blend_line(struct vivid_dev *dev, unsigned y_offset, unsigned x_offset,
+ u8 *vcapbuf, const u8 *vosdbuf,
+ unsigned width, unsigned pixsize)
+{
+ unsigned x;
+
+ for (x = 0; x < width; x++, vcapbuf += pixsize, vosdbuf += pixsize) {
+ copy_pix(dev, y_offset, x_offset + x,
+ (u16 *)vcapbuf, (const u16 *)vosdbuf);
+ }
+}
+
+static void scale_line(const u8 *src, u8 *dst, unsigned srcw, unsigned dstw, unsigned twopixsize)
+{
+ /* Coarse scaling with Bresenham */
+ unsigned int_part;
+ unsigned fract_part;
+ unsigned src_x = 0;
+ unsigned error = 0;
+ unsigned x;
+
+ /*
+ * We always combine two pixels to prevent color bleed in the packed
+ * yuv case.
+ */
+ srcw /= 2;
+ dstw /= 2;
+ int_part = srcw / dstw;
+ fract_part = srcw % dstw;
+ for (x = 0; x < dstw; x++, dst += twopixsize) {
+ memcpy(dst, src + src_x * twopixsize, twopixsize);
+ src_x += int_part;
+ error += fract_part;
+ if (error >= dstw) {
+ error -= dstw;
+ src_x++;
+ }
+ }
+}
+
+/*
+ * Precalculate the rectangles needed to perform video looping:
+ *
+ * The nominal pipeline is that the video output buffer is cropped by
+ * crop_out, scaled to compose_out, overlaid with the output overlay,
+ * cropped on the capture side by crop_cap and scaled again to the video
+ * capture buffer using compose_cap.
+ *
+ * To keep things efficient we calculate the intersection of compose_out
+ * and crop_cap (since that's the only part of the video that will
+ * actually end up in the capture buffer), determine which part of the
+ * video output buffer that is and which part of the video capture buffer
+ * so we can scale the video straight from the output buffer to the capture
+ * buffer without any intermediate steps.
+ *
+ * If we need to deal with an output overlay, then there is no choice and
+ * that intermediate step still has to be taken. For the output overlay
+ * support we calculate the intersection of the framebuffer and the overlay
+ * window (which may be partially or wholly outside of the framebuffer
+ * itself) and the intersection of that with loop_vid_copy (i.e. the part of
+ * the actual looped video that will be overlaid). The result is calculated
+ * both in framebuffer coordinates (loop_fb_copy) and compose_out coordinates
+ * (loop_vid_overlay). Finally calculate the part of the capture buffer that
+ * will receive that overlaid video.
+ */
+static void vivid_precalc_copy_rects(struct vivid_dev *dev)
+{
+ /* Framebuffer rectangle */
+ struct v4l2_rect r_fb = {
+ 0, 0, dev->display_width, dev->display_height
+ };
+ /* Overlay window rectangle in framebuffer coordinates */
+ struct v4l2_rect r_overlay = {
+ dev->overlay_out_left, dev->overlay_out_top,
+ dev->compose_out.width, dev->compose_out.height
+ };
+
+ dev->loop_vid_copy = rect_intersect(&dev->crop_cap, &dev->compose_out);
+
+ dev->loop_vid_out = dev->loop_vid_copy;
+ rect_scale(&dev->loop_vid_out, &dev->compose_out, &dev->crop_out);
+ dev->loop_vid_out.left += dev->crop_out.left;
+ dev->loop_vid_out.top += dev->crop_out.top;
+
+ dev->loop_vid_cap = dev->loop_vid_copy;
+ rect_scale(&dev->loop_vid_cap, &dev->crop_cap, &dev->compose_cap);
+
+ dprintk(dev, 1,
+ "loop_vid_copy: %dx%d@%dx%d loop_vid_out: %dx%d@%dx%d loop_vid_cap: %dx%d@%dx%d\n",
+ dev->loop_vid_copy.width, dev->loop_vid_copy.height,
+ dev->loop_vid_copy.left, dev->loop_vid_copy.top,
+ dev->loop_vid_out.width, dev->loop_vid_out.height,
+ dev->loop_vid_out.left, dev->loop_vid_out.top,
+ dev->loop_vid_cap.width, dev->loop_vid_cap.height,
+ dev->loop_vid_cap.left, dev->loop_vid_cap.top);
+
+ r_overlay = rect_intersect(&r_fb, &r_overlay);
+
+ /* shift r_overlay to the same origin as compose_out */
+ r_overlay.left += dev->compose_out.left - dev->overlay_out_left;
+ r_overlay.top += dev->compose_out.top - dev->overlay_out_top;
+
+ dev->loop_vid_overlay = rect_intersect(&r_overlay, &dev->loop_vid_copy);
+ dev->loop_fb_copy = dev->loop_vid_overlay;
+
+ /* shift dev->loop_fb_copy back again to the fb origin */
+ dev->loop_fb_copy.left -= dev->compose_out.left - dev->overlay_out_left;
+ dev->loop_fb_copy.top -= dev->compose_out.top - dev->overlay_out_top;
+
+ dev->loop_vid_overlay_cap = dev->loop_vid_overlay;
+ rect_scale(&dev->loop_vid_overlay_cap, &dev->crop_cap, &dev->compose_cap);
+
+ dprintk(dev, 1,
+ "loop_fb_copy: %dx%d@%dx%d loop_vid_overlay: %dx%d@%dx%d loop_vid_overlay_cap: %dx%d@%dx%d\n",
+ dev->loop_fb_copy.width, dev->loop_fb_copy.height,
+ dev->loop_fb_copy.left, dev->loop_fb_copy.top,
+ dev->loop_vid_overlay.width, dev->loop_vid_overlay.height,
+ dev->loop_vid_overlay.left, dev->loop_vid_overlay.top,
+ dev->loop_vid_overlay_cap.width, dev->loop_vid_overlay_cap.height,
+ dev->loop_vid_overlay_cap.left, dev->loop_vid_overlay_cap.top);
+}
+
+static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
+ struct vivid_buffer *vid_cap_buf)
+{
+ bool blank = dev->must_blank[vid_cap_buf->vb.v4l2_buf.index];
+ struct tpg_data *tpg = &dev->tpg;
+ struct vivid_buffer *vid_out_buf = NULL;
+ unsigned pixsize = tpg_g_twopixelsize(tpg, p) / 2;
+ unsigned img_width = dev->compose_cap.width;
+ unsigned img_height = dev->compose_cap.height;
+ unsigned stride_cap = tpg->bytesperline[p];
+ unsigned stride_out = dev->bytesperline_out[p];
+ unsigned stride_osd = dev->display_byte_stride;
+ unsigned hmax = (img_height * tpg->perc_fill) / 100;
+ u8 *voutbuf;
+ u8 *vosdbuf = NULL;
+ unsigned y;
+ bool blend = dev->bitmap_out || dev->clipcount_out || dev->fbuf_out_flags;
+ /* Coarse scaling with Bresenham */
+ unsigned vid_out_int_part;
+ unsigned vid_out_fract_part;
+ unsigned vid_out_y = 0;
+ unsigned vid_out_error = 0;
+ unsigned vid_overlay_int_part = 0;
+ unsigned vid_overlay_fract_part = 0;
+ unsigned vid_overlay_y = 0;
+ unsigned vid_overlay_error = 0;
+ unsigned vid_cap_right;
+ bool quick;
+
+ vid_out_int_part = dev->loop_vid_out.height / dev->loop_vid_cap.height;
+ vid_out_fract_part = dev->loop_vid_out.height % dev->loop_vid_cap.height;
+
+ if (!list_empty(&dev->vid_out_active))
+ vid_out_buf = list_entry(dev->vid_out_active.next,
+ struct vivid_buffer, list);
+ if (vid_out_buf == NULL)
+ return -ENODATA;
+
+ vid_cap_buf->vb.v4l2_buf.field = vid_out_buf->vb.v4l2_buf.field;
+
+ voutbuf = vb2_plane_vaddr(&vid_out_buf->vb, p) +
+ vid_out_buf->vb.v4l2_planes[p].data_offset;
+ voutbuf += dev->loop_vid_out.left * pixsize + dev->loop_vid_out.top * stride_out;
+ vcapbuf += dev->compose_cap.left * pixsize + dev->compose_cap.top * stride_cap;
+
+ if (dev->loop_vid_copy.width == 0 || dev->loop_vid_copy.height == 0) {
+ /*
+ * If there is nothing to copy, then just fill the capture window
+ * with black.
+ */
+ for (y = 0; y < hmax; y++, vcapbuf += stride_cap)
+ memcpy(vcapbuf, tpg->black_line[p], img_width * pixsize);
+ return 0;
+ }
+
+ if (dev->overlay_out_enabled &&
+ dev->loop_vid_overlay.width && dev->loop_vid_overlay.height) {
+ vosdbuf = dev->video_vbase;
+ vosdbuf += dev->loop_fb_copy.left * pixsize +
+ dev->loop_fb_copy.top * stride_osd;
+ vid_overlay_int_part = dev->loop_vid_overlay.height /
+ dev->loop_vid_overlay_cap.height;
+ vid_overlay_fract_part = dev->loop_vid_overlay.height %
+ dev->loop_vid_overlay_cap.height;
+ }
+
+ vid_cap_right = dev->loop_vid_cap.left + dev->loop_vid_cap.width;
+ /* quick is true if no video scaling is needed */
+ quick = dev->loop_vid_out.width == dev->loop_vid_cap.width;
+
+ dev->cur_scaled_line = dev->loop_vid_out.height;
+ for (y = 0; y < hmax; y++, vcapbuf += stride_cap) {
+ /* osdline is true if this line requires overlay blending */
+ bool osdline = vosdbuf && y >= dev->loop_vid_overlay_cap.top &&
+ y < dev->loop_vid_overlay_cap.top + dev->loop_vid_overlay_cap.height;
+
+ /*
+ * If this line of the capture buffer doesn't get any video, then
+ * just fill with black.
+ */
+ if (y < dev->loop_vid_cap.top ||
+ y >= dev->loop_vid_cap.top + dev->loop_vid_cap.height) {
+ memcpy(vcapbuf, tpg->black_line[p], img_width * pixsize);
+ continue;
+ }
+
+ /* fill the left border with black */
+ if (dev->loop_vid_cap.left)
+ memcpy(vcapbuf, tpg->black_line[p], dev->loop_vid_cap.left * pixsize);
+
+ /* fill the right border with black */
+ if (vid_cap_right < img_width)
+ memcpy(vcapbuf + vid_cap_right * pixsize,
+ tpg->black_line[p], (img_width - vid_cap_right) * pixsize);
+
+ if (quick && !osdline) {
+ memcpy(vcapbuf + dev->loop_vid_cap.left * pixsize,
+ voutbuf + vid_out_y * stride_out,
+ dev->loop_vid_cap.width * pixsize);
+ goto update_vid_out_y;
+ }
+ if (dev->cur_scaled_line == vid_out_y) {
+ memcpy(vcapbuf + dev->loop_vid_cap.left * pixsize,
+ dev->scaled_line,
+ dev->loop_vid_cap.width * pixsize);
+ goto update_vid_out_y;
+ }
+ if (!osdline) {
+ scale_line(voutbuf + vid_out_y * stride_out, dev->scaled_line,
+ dev->loop_vid_out.width, dev->loop_vid_cap.width,
+ tpg_g_twopixelsize(tpg, p));
+ } else {
+ /*
+ * Offset in bytes within loop_vid_copy to the start of the
+ * loop_vid_overlay rectangle.
+ */
+ unsigned offset =
+ (dev->loop_vid_overlay.left - dev->loop_vid_copy.left) * pixsize;
+ u8 *osd = vosdbuf + vid_overlay_y * stride_osd;
+
+ scale_line(voutbuf + vid_out_y * stride_out, dev->blended_line,
+ dev->loop_vid_out.width, dev->loop_vid_copy.width,
+ tpg_g_twopixelsize(tpg, p));
+ if (blend)
+ blend_line(dev, vid_overlay_y + dev->loop_vid_overlay.top,
+ dev->loop_vid_overlay.left,
+ dev->blended_line + offset, osd,
+ dev->loop_vid_overlay.width, pixsize);
+ else
+ memcpy(dev->blended_line + offset,
+ osd, dev->loop_vid_overlay.width * pixsize);
+ scale_line(dev->blended_line, dev->scaled_line,
+ dev->loop_vid_copy.width, dev->loop_vid_cap.width,
+ tpg_g_twopixelsize(tpg, p));
+ }
+ dev->cur_scaled_line = vid_out_y;
+ memcpy(vcapbuf + dev->loop_vid_cap.left * pixsize,
+ dev->scaled_line,
+ dev->loop_vid_cap.width * pixsize);
+
+update_vid_out_y:
+ if (osdline) {
+ vid_overlay_y += vid_overlay_int_part;
+ vid_overlay_error += vid_overlay_fract_part;
+ if (vid_overlay_error >= dev->loop_vid_overlay_cap.height) {
+ vid_overlay_error -= dev->loop_vid_overlay_cap.height;
+ vid_overlay_y++;
+ }
+ }
+ vid_out_y += vid_out_int_part;
+ vid_out_error += vid_out_fract_part;
+ if (vid_out_error >= dev->loop_vid_cap.height) {
+ vid_out_error -= dev->loop_vid_cap.height;
+ vid_out_y++;
+ }
+ }
+
+ if (!blank)
+ return 0;
+ for (; y < img_height; y++, vcapbuf += stride_cap)
+ memcpy(vcapbuf, tpg->contrast_line[p], img_width * pixsize);
+ return 0;
+}
+
+static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
+{
+ unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
+ unsigned line_height = 16 / factor;
+ bool is_tv = vivid_is_sdtv_cap(dev);
+ bool is_60hz = is_tv && (dev->std_cap & V4L2_STD_525_60);
+ unsigned p;
+ int line = 1;
+ u8 *basep[TPG_MAX_PLANES][2];
+ unsigned ms;
+ char str[100];
+ s32 gain;
+ bool is_loop = false;
+
+ if (dev->loop_video && dev->can_loop_video &&
+ ((vivid_is_svid_cap(dev) && !VIVID_INVALID_SIGNAL(dev->std_signal_mode)) ||
+ (vivid_is_hdmi_cap(dev) && !VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode))))
+ is_loop = true;
+
+ buf->vb.v4l2_buf.sequence = dev->vid_cap_seq_count;
+ /*
+ * Take the timestamp now if the timestamp source is set to
+ * "Start of Exposure".
+ */
+ if (dev->tstamp_src_is_soe)
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
+ /*
+ * 60 Hz standards start with the bottom field, 50 Hz standards
+ * with the top field. So if the 0-based seq_count is even,
+ * then the field is TOP for 50 Hz and BOTTOM for 60 Hz
+ * standards.
+ */
+ buf->vb.v4l2_buf.field = ((dev->vid_cap_seq_count & 1) ^ is_60hz) ?
+ V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+ /*
+ * The sequence counter counts frames, not fields. So divide
+ * by two.
+ */
+ buf->vb.v4l2_buf.sequence /= 2;
+ } else {
+ buf->vb.v4l2_buf.field = dev->field_cap;
+ }
+ tpg_s_field(&dev->tpg, buf->vb.v4l2_buf.field);
+ tpg_s_perc_fill_blank(&dev->tpg, dev->must_blank[buf->vb.v4l2_buf.index]);
+
+ vivid_precalc_copy_rects(dev);
+
+ for (p = 0; p < tpg_g_planes(&dev->tpg); p++) {
+ void *vbuf = vb2_plane_vaddr(&buf->vb, p);
+
+ /*
+ * The first plane of a multiplanar format has a non-zero
+ * data_offset. This helps testing whether the application
+ * correctly supports non-zero data offsets.
+ */
+ if (dev->fmt_cap->data_offset[p]) {
+ memset(vbuf, dev->fmt_cap->data_offset[p] & 0xff,
+ dev->fmt_cap->data_offset[p]);
+ vbuf += dev->fmt_cap->data_offset[p];
+ }
+ tpg_calc_text_basep(&dev->tpg, basep, p, vbuf);
+ if (!is_loop || vivid_copy_buffer(dev, p, vbuf, buf))
+ tpg_fillbuffer(&dev->tpg, vivid_get_std_cap(dev), p, vbuf);
+ }
+ dev->must_blank[buf->vb.v4l2_buf.index] = false;
+
+ /* Updates stream time, only update at the start of a new frame. */
+ if (dev->field_cap != V4L2_FIELD_ALTERNATE || (buf->vb.v4l2_buf.sequence & 1) == 0)
+ dev->ms_vid_cap = jiffies_to_msecs(jiffies - dev->jiffies_vid_cap);
+
+ ms = dev->ms_vid_cap;
+ if (dev->osd_mode <= 1) {
+ snprintf(str, sizeof(str), " %02d:%02d:%02d:%03d %u%s",
+ (ms / (60 * 60 * 1000)) % 24,
+ (ms / (60 * 1000)) % 60,
+ (ms / 1000) % 60,
+ ms % 1000,
+ buf->vb.v4l2_buf.sequence,
+ (dev->field_cap == V4L2_FIELD_ALTERNATE) ?
+ (buf->vb.v4l2_buf.field == V4L2_FIELD_TOP ?
+ " top" : " bottom") : "");
+ tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ }
+ if (dev->osd_mode == 0) {
+ snprintf(str, sizeof(str), " %dx%d, input %d ",
+ dev->src_rect.width, dev->src_rect.height, dev->input);
+ tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+
+ gain = v4l2_ctrl_g_ctrl(dev->gain);
+ mutex_lock(dev->ctrl_hdl_user_vid.lock);
+ snprintf(str, sizeof(str),
+ " brightness %3d, contrast %3d, saturation %3d, hue %d ",
+ dev->brightness->cur.val,
+ dev->contrast->cur.val,
+ dev->saturation->cur.val,
+ dev->hue->cur.val);
+ tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ snprintf(str, sizeof(str),
+ " autogain %d, gain %3d, alpha 0x%02x ",
+ dev->autogain->cur.val, gain, dev->alpha->cur.val);
+ mutex_unlock(dev->ctrl_hdl_user_vid.lock);
+ tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ mutex_lock(dev->ctrl_hdl_user_aud.lock);
+ snprintf(str, sizeof(str),
+ " volume %3d, mute %d ",
+ dev->volume->cur.val, dev->mute->cur.val);
+ mutex_unlock(dev->ctrl_hdl_user_aud.lock);
+ tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ mutex_lock(dev->ctrl_hdl_user_gen.lock);
+ snprintf(str, sizeof(str), " int32 %d, int64 %lld, bitmask %08x ",
+ dev->int32->cur.val,
+ *dev->int64->p_cur.p_s64,
+ dev->bitmask->cur.val);
+ tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ snprintf(str, sizeof(str), " boolean %d, menu %s, string \"%s\" ",
+ dev->boolean->cur.val,
+ dev->menu->qmenu[dev->menu->cur.val],
+ dev->string->p_cur.p_char);
+ tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ snprintf(str, sizeof(str), " integer_menu %lld, value %d ",
+ dev->int_menu->qmenu_int[dev->int_menu->cur.val],
+ dev->int_menu->cur.val);
+ mutex_unlock(dev->ctrl_hdl_user_gen.lock);
+ tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ if (dev->button_pressed) {
+ dev->button_pressed--;
+ snprintf(str, sizeof(str), " button pressed!");
+ tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
+ }
+ }
+
+ /*
+ * If "End of Frame" is specified at the timestamp source, then take
+ * the timestamp now.
+ */
+ if (!dev->tstamp_src_is_soe)
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
+}
+
+/*
+ * Return true if this pixel coordinate is a valid video pixel.
+ */
+static bool valid_pix(struct vivid_dev *dev, int win_y, int win_x, int fb_y, int fb_x)
+{
+ int i;
+
+ if (dev->bitmap_cap) {
+ /*
+ * Only if the corresponding bit in the bitmap is set can
+ * the video pixel be shown. Coordinates are relative to
+ * the overlay window set by VIDIOC_S_FMT.
+ */
+ const u8 *p = dev->bitmap_cap;
+ unsigned stride = (dev->compose_cap.width + 7) / 8;
+
+ if (!(p[stride * win_y + win_x / 8] & (1 << (win_x & 7))))
+ return false;
+ }
+
+ for (i = 0; i < dev->clipcount_cap; i++) {
+ /*
+ * Only if the framebuffer coordinate is not in any of the
+ * clip rectangles will be video pixel be shown.
+ */
+ struct v4l2_rect *r = &dev->clips_cap[i].c;
+
+ if (fb_y >= r->top && fb_y < r->top + r->height &&
+ fb_x >= r->left && fb_x < r->left + r->width)
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Draw the image into the overlay buffer.
+ * Note that the combination of overlay and multiplanar is not supported.
+ */
+static void vivid_overlay(struct vivid_dev *dev, struct vivid_buffer *buf)
+{
+ struct tpg_data *tpg = &dev->tpg;
+ unsigned pixsize = tpg_g_twopixelsize(tpg, 0) / 2;
+ void *vbase = dev->fb_vbase_cap;
+ void *vbuf = vb2_plane_vaddr(&buf->vb, 0);
+ unsigned img_width = dev->compose_cap.width;
+ unsigned img_height = dev->compose_cap.height;
+ unsigned stride = tpg->bytesperline[0];
+ /* if quick is true, then valid_pix() doesn't have to be called */
+ bool quick = dev->bitmap_cap == NULL && dev->clipcount_cap == 0;
+ int x, y, w, out_x = 0;
+
+ if ((dev->overlay_cap_field == V4L2_FIELD_TOP ||
+ dev->overlay_cap_field == V4L2_FIELD_BOTTOM) &&
+ dev->overlay_cap_field != buf->vb.v4l2_buf.field)
+ return;
+
+ vbuf += dev->compose_cap.left * pixsize + dev->compose_cap.top * stride;
+ x = dev->overlay_cap_left;
+ w = img_width;
+ if (x < 0) {
+ out_x = -x;
+ w = w - out_x;
+ x = 0;
+ } else {
+ w = dev->fb_cap.fmt.width - x;
+ if (w > img_width)
+ w = img_width;
+ }
+ if (w <= 0)
+ return;
+ if (dev->overlay_cap_top >= 0)
+ vbase += dev->overlay_cap_top * dev->fb_cap.fmt.bytesperline;
+ for (y = dev->overlay_cap_top;
+ y < dev->overlay_cap_top + (int)img_height;
+ y++, vbuf += stride) {
+ int px;
+
+ if (y < 0 || y > dev->fb_cap.fmt.height)
+ continue;
+ if (quick) {
+ memcpy(vbase + x * pixsize,
+ vbuf + out_x * pixsize, w * pixsize);
+ vbase += dev->fb_cap.fmt.bytesperline;
+ continue;
+ }
+ for (px = 0; px < w; px++) {
+ if (!valid_pix(dev, y - dev->overlay_cap_top,
+ px + out_x, y, px + x))
+ continue;
+ memcpy(vbase + (px + x) * pixsize,
+ vbuf + (px + out_x) * pixsize,
+ pixsize);
+ }
+ vbase += dev->fb_cap.fmt.bytesperline;
+ }
+}
+
+static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
+{
+ struct vivid_buffer *vid_cap_buf = NULL;
+ struct vivid_buffer *vbi_cap_buf = NULL;
+
+ dprintk(dev, 1, "Video Capture Thread Tick\n");
+
+ while (dropped_bufs-- > 1)
+ tpg_update_mv_count(&dev->tpg,
+ dev->field_cap == V4L2_FIELD_NONE ||
+ dev->field_cap == V4L2_FIELD_ALTERNATE);
+
+ /* Drop a certain percentage of buffers. */
+ if (dev->perc_dropped_buffers &&
+ prandom_u32_max(100) < dev->perc_dropped_buffers)
+ goto update_mv;
+
+ spin_lock(&dev->slock);
+ if (!list_empty(&dev->vid_cap_active)) {
+ vid_cap_buf = list_entry(dev->vid_cap_active.next, struct vivid_buffer, list);
+ list_del(&vid_cap_buf->list);
+ }
+ if (!list_empty(&dev->vbi_cap_active)) {
+ if (dev->field_cap != V4L2_FIELD_ALTERNATE ||
+ (dev->vbi_cap_seq_count & 1)) {
+ vbi_cap_buf = list_entry(dev->vbi_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&vbi_cap_buf->list);
+ }
+ }
+ spin_unlock(&dev->slock);
+
+ if (!vid_cap_buf && !vbi_cap_buf)
+ goto update_mv;
+
+ if (vid_cap_buf) {
+ /* Fill buffer */
+ vivid_fillbuff(dev, vid_cap_buf);
+ dprintk(dev, 1, "filled buffer %d\n",
+ vid_cap_buf->vb.v4l2_buf.index);
+
+ /* Handle overlay */
+ if (dev->overlay_cap_owner && dev->fb_cap.base &&
+ dev->fb_cap.fmt.pixelformat == dev->fmt_cap->fourcc)
+ vivid_overlay(dev, vid_cap_buf);
+
+ vb2_buffer_done(&vid_cap_buf->vb, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "vid_cap buffer %d done\n",
+ vid_cap_buf->vb.v4l2_buf.index);
+ }
+
+ if (vbi_cap_buf) {
+ if (dev->stream_sliced_vbi_cap)
+ vivid_sliced_vbi_cap_process(dev, vbi_cap_buf);
+ else
+ vivid_raw_vbi_cap_process(dev, vbi_cap_buf);
+ vb2_buffer_done(&vbi_cap_buf->vb, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "vbi_cap %d done\n",
+ vbi_cap_buf->vb.v4l2_buf.index);
+ }
+ dev->dqbuf_error = false;
+
+update_mv:
+ /* Update the test pattern movement counters */
+ tpg_update_mv_count(&dev->tpg, dev->field_cap == V4L2_FIELD_NONE ||
+ dev->field_cap == V4L2_FIELD_ALTERNATE);
+}
+
+static int vivid_thread_vid_cap(void *data)
+{
+ struct vivid_dev *dev = data;
+ u64 numerators_since_start;
+ u64 buffers_since_start;
+ u64 next_jiffies_since_start;
+ unsigned long jiffies_since_start;
+ unsigned long cur_jiffies;
+ unsigned wait_jiffies;
+ unsigned numerator;
+ unsigned denominator;
+ int dropped_bufs;
+
+ dprintk(dev, 1, "Video Capture Thread Start\n");
+
+ set_freezable();
+
+ /* Resets frame counters */
+ dev->cap_seq_offset = 0;
+ dev->cap_seq_count = 0;
+ dev->cap_seq_resync = false;
+ dev->jiffies_vid_cap = jiffies;
+
+ for (;;) {
+ try_to_freeze();
+ if (kthread_should_stop())
+ break;
+
+ mutex_lock(&dev->mutex);
+ cur_jiffies = jiffies;
+ if (dev->cap_seq_resync) {
+ dev->jiffies_vid_cap = cur_jiffies;
+ dev->cap_seq_offset = dev->cap_seq_count + 1;
+ dev->cap_seq_count = 0;
+ dev->cap_seq_resync = false;
+ }
+ numerator = dev->timeperframe_vid_cap.numerator;
+ denominator = dev->timeperframe_vid_cap.denominator;
+
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE)
+ denominator *= 2;
+
+ /* Calculate the number of jiffies since we started streaming */
+ jiffies_since_start = cur_jiffies - dev->jiffies_vid_cap;
+ /* Get the number of buffers streamed since the start */
+ buffers_since_start = (u64)jiffies_since_start * denominator +
+ (HZ * numerator) / 2;
+ do_div(buffers_since_start, HZ * numerator);
+
+ /*
+ * After more than 0xf0000000 (rounded down to a multiple of
+ * 'jiffies-per-day' to ease jiffies_to_msecs calculation)
+ * jiffies have passed since we started streaming reset the
+ * counters and keep track of the sequence offset.
+ */
+ if (jiffies_since_start > JIFFIES_RESYNC) {
+ dev->jiffies_vid_cap = cur_jiffies;
+ dev->cap_seq_offset = buffers_since_start;
+ buffers_since_start = 0;
+ }
+ dropped_bufs = buffers_since_start + dev->cap_seq_offset - dev->cap_seq_count;
+ dev->cap_seq_count = buffers_since_start + dev->cap_seq_offset;
+ dev->vid_cap_seq_count = dev->cap_seq_count - dev->vid_cap_seq_start;
+ dev->vbi_cap_seq_count = dev->cap_seq_count - dev->vbi_cap_seq_start;
+
+ vivid_thread_vid_cap_tick(dev, dropped_bufs);
+
+ /*
+ * Calculate the number of 'numerators' streamed since we started,
+ * including the current buffer.
+ */
+ numerators_since_start = ++buffers_since_start * numerator;
+
+ /* And the number of jiffies since we started */
+ jiffies_since_start = jiffies - dev->jiffies_vid_cap;
+
+ mutex_unlock(&dev->mutex);
+
+ /*
+ * Calculate when that next buffer is supposed to start
+ * in jiffies since we started streaming.
+ */
+ next_jiffies_since_start = numerators_since_start * HZ +
+ denominator / 2;
+ do_div(next_jiffies_since_start, denominator);
+ /* If it is in the past, then just schedule asap */
+ if (next_jiffies_since_start < jiffies_since_start)
+ next_jiffies_since_start = jiffies_since_start;
+
+ wait_jiffies = next_jiffies_since_start - jiffies_since_start;
+ schedule_timeout_interruptible(wait_jiffies ? wait_jiffies : 1);
+ }
+ dprintk(dev, 1, "Video Capture Thread End\n");
+ return 0;
+}
+
+static void vivid_grab_controls(struct vivid_dev *dev, bool grab)
+{
+ v4l2_ctrl_grab(dev->ctrl_has_crop_cap, grab);
+ v4l2_ctrl_grab(dev->ctrl_has_compose_cap, grab);
+ v4l2_ctrl_grab(dev->ctrl_has_scaler_cap, grab);
+}
+
+int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
+{
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->kthread_vid_cap) {
+ u32 seq_count = dev->cap_seq_count + dev->seq_wrap * 128;
+
+ if (pstreaming == &dev->vid_cap_streaming)
+ dev->vid_cap_seq_start = seq_count;
+ else
+ dev->vbi_cap_seq_start = seq_count;
+ *pstreaming = true;
+ return 0;
+ }
+
+ /* Resets frame counters */
+ tpg_init_mv_count(&dev->tpg);
+
+ dev->vid_cap_seq_start = dev->seq_wrap * 128;
+ dev->vbi_cap_seq_start = dev->seq_wrap * 128;
+
+ dev->kthread_vid_cap = kthread_run(vivid_thread_vid_cap, dev,
+ "%s-vid-cap", dev->v4l2_dev.name);
+
+ if (IS_ERR(dev->kthread_vid_cap)) {
+ v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+ return PTR_ERR(dev->kthread_vid_cap);
+ }
+ *pstreaming = true;
+ vivid_grab_controls(dev, true);
+
+ dprintk(dev, 1, "returning from %s\n", __func__);
+ return 0;
+}
+
+void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
+{
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->kthread_vid_cap == NULL)
+ return;
+
+ *pstreaming = false;
+ if (pstreaming == &dev->vid_cap_streaming) {
+ /* Release all active buffers */
+ while (!list_empty(&dev->vid_cap_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->vid_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "vid_cap buffer %d done\n",
+ buf->vb.v4l2_buf.index);
+ }
+ }
+
+ if (pstreaming == &dev->vbi_cap_streaming) {
+ while (!list_empty(&dev->vbi_cap_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->vbi_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "vbi_cap buffer %d done\n",
+ buf->vb.v4l2_buf.index);
+ }
+ }
+
+ if (dev->vid_cap_streaming || dev->vbi_cap_streaming)
+ return;
+
+ /* shutdown control thread */
+ vivid_grab_controls(dev, false);
+ mutex_unlock(&dev->mutex);
+ kthread_stop(dev->kthread_vid_cap);
+ dev->kthread_vid_cap = NULL;
+ mutex_lock(&dev->mutex);
+}
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.h b/drivers/media/platform/vivid/vivid-kthread-cap.h
new file mode 100644
index 000000000000..5b92fc9a0d04
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.h
@@ -0,0 +1,26 @@
+/*
+ * vivid-kthread-cap.h - video/vbi capture thread support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_KTHREAD_CAP_H_
+#define _VIVID_KTHREAD_CAP_H_
+
+int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming);
+void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
new file mode 100644
index 000000000000..d9f36ccd7efb
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -0,0 +1,305 @@
+/*
+ * vivid-kthread-out.h - video/vbi output thread support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/font.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/random.h>
+#include <linux/v4l2-dv-timings.h>
+#include <asm/div64.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+
+#include "vivid-core.h"
+#include "vivid-vid-common.h"
+#include "vivid-vid-cap.h"
+#include "vivid-vid-out.h"
+#include "vivid-radio-common.h"
+#include "vivid-radio-rx.h"
+#include "vivid-radio-tx.h"
+#include "vivid-sdr-cap.h"
+#include "vivid-vbi-cap.h"
+#include "vivid-vbi-out.h"
+#include "vivid-osd.h"
+#include "vivid-ctrls.h"
+#include "vivid-kthread-out.h"
+
+static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
+{
+ struct vivid_buffer *vid_out_buf = NULL;
+ struct vivid_buffer *vbi_out_buf = NULL;
+
+ dprintk(dev, 1, "Video Output Thread Tick\n");
+
+ /* Drop a certain percentage of buffers. */
+ if (dev->perc_dropped_buffers &&
+ prandom_u32_max(100) < dev->perc_dropped_buffers)
+ return;
+
+ spin_lock(&dev->slock);
+ /*
+ * Only dequeue buffer if there is at least one more pending.
+ * This makes video loopback possible.
+ */
+ if (!list_empty(&dev->vid_out_active) &&
+ !list_is_singular(&dev->vid_out_active)) {
+ vid_out_buf = list_entry(dev->vid_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&vid_out_buf->list);
+ }
+ if (!list_empty(&dev->vbi_out_active) &&
+ (dev->field_out != V4L2_FIELD_ALTERNATE ||
+ (dev->vbi_out_seq_count & 1))) {
+ vbi_out_buf = list_entry(dev->vbi_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&vbi_out_buf->list);
+ }
+ spin_unlock(&dev->slock);
+
+ if (!vid_out_buf && !vbi_out_buf)
+ return;
+
+ if (vid_out_buf) {
+ vid_out_buf->vb.v4l2_buf.sequence = dev->vid_out_seq_count;
+ if (dev->field_out == V4L2_FIELD_ALTERNATE) {
+ /*
+ * The sequence counter counts frames, not fields. So divide
+ * by two.
+ */
+ vid_out_buf->vb.v4l2_buf.sequence /= 2;
+ }
+ v4l2_get_timestamp(&vid_out_buf->vb.v4l2_buf.timestamp);
+ vid_out_buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
+ vb2_buffer_done(&vid_out_buf->vb, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "vid_out buffer %d done\n",
+ vid_out_buf->vb.v4l2_buf.index);
+ }
+
+ if (vbi_out_buf) {
+ if (dev->stream_sliced_vbi_out)
+ vivid_sliced_vbi_out_process(dev, vbi_out_buf);
+
+ vbi_out_buf->vb.v4l2_buf.sequence = dev->vbi_out_seq_count;
+ v4l2_get_timestamp(&vbi_out_buf->vb.v4l2_buf.timestamp);
+ vbi_out_buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
+ vb2_buffer_done(&vbi_out_buf->vb, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "vbi_out buffer %d done\n",
+ vbi_out_buf->vb.v4l2_buf.index);
+ }
+ dev->dqbuf_error = false;
+}
+
+static int vivid_thread_vid_out(void *data)
+{
+ struct vivid_dev *dev = data;
+ u64 numerators_since_start;
+ u64 buffers_since_start;
+ u64 next_jiffies_since_start;
+ unsigned long jiffies_since_start;
+ unsigned long cur_jiffies;
+ unsigned wait_jiffies;
+ unsigned numerator;
+ unsigned denominator;
+
+ dprintk(dev, 1, "Video Output Thread Start\n");
+
+ set_freezable();
+
+ /* Resets frame counters */
+ dev->out_seq_offset = 0;
+ if (dev->seq_wrap)
+ dev->out_seq_count = 0xffffff80U;
+ dev->jiffies_vid_out = jiffies;
+ dev->vid_out_seq_start = dev->vbi_out_seq_start = 0;
+ dev->out_seq_resync = false;
+
+ for (;;) {
+ try_to_freeze();
+ if (kthread_should_stop())
+ break;
+
+ mutex_lock(&dev->mutex);
+ cur_jiffies = jiffies;
+ if (dev->out_seq_resync) {
+ dev->jiffies_vid_out = cur_jiffies;
+ dev->out_seq_offset = dev->out_seq_count + 1;
+ dev->out_seq_count = 0;
+ dev->out_seq_resync = false;
+ }
+ numerator = dev->timeperframe_vid_out.numerator;
+ denominator = dev->timeperframe_vid_out.denominator;
+
+ if (dev->field_out == V4L2_FIELD_ALTERNATE)
+ denominator *= 2;
+
+ /* Calculate the number of jiffies since we started streaming */
+ jiffies_since_start = cur_jiffies - dev->jiffies_vid_out;
+ /* Get the number of buffers streamed since the start */
+ buffers_since_start = (u64)jiffies_since_start * denominator +
+ (HZ * numerator) / 2;
+ do_div(buffers_since_start, HZ * numerator);
+
+ /*
+ * After more than 0xf0000000 (rounded down to a multiple of
+ * 'jiffies-per-day' to ease jiffies_to_msecs calculation)
+ * jiffies have passed since we started streaming reset the
+ * counters and keep track of the sequence offset.
+ */
+ if (jiffies_since_start > JIFFIES_RESYNC) {
+ dev->jiffies_vid_out = cur_jiffies;
+ dev->out_seq_offset = buffers_since_start;
+ buffers_since_start = 0;
+ }
+ dev->out_seq_count = buffers_since_start + dev->out_seq_offset;
+ dev->vid_out_seq_count = dev->out_seq_count - dev->vid_out_seq_start;
+ dev->vbi_out_seq_count = dev->out_seq_count - dev->vbi_out_seq_start;
+
+ vivid_thread_vid_out_tick(dev);
+ mutex_unlock(&dev->mutex);
+
+ /*
+ * Calculate the number of 'numerators' streamed since we started,
+ * not including the current buffer.
+ */
+ numerators_since_start = buffers_since_start * numerator;
+
+ /* And the number of jiffies since we started */
+ jiffies_since_start = jiffies - dev->jiffies_vid_out;
+
+ /* Increase by the 'numerator' of one buffer */
+ numerators_since_start += numerator;
+ /*
+ * Calculate when that next buffer is supposed to start
+ * in jiffies since we started streaming.
+ */
+ next_jiffies_since_start = numerators_since_start * HZ +
+ denominator / 2;
+ do_div(next_jiffies_since_start, denominator);
+ /* If it is in the past, then just schedule asap */
+ if (next_jiffies_since_start < jiffies_since_start)
+ next_jiffies_since_start = jiffies_since_start;
+
+ wait_jiffies = next_jiffies_since_start - jiffies_since_start;
+ schedule_timeout_interruptible(wait_jiffies ? wait_jiffies : 1);
+ }
+ dprintk(dev, 1, "Video Output Thread End\n");
+ return 0;
+}
+
+static void vivid_grab_controls(struct vivid_dev *dev, bool grab)
+{
+ v4l2_ctrl_grab(dev->ctrl_has_crop_out, grab);
+ v4l2_ctrl_grab(dev->ctrl_has_compose_out, grab);
+ v4l2_ctrl_grab(dev->ctrl_has_scaler_out, grab);
+ v4l2_ctrl_grab(dev->ctrl_tx_mode, grab);
+ v4l2_ctrl_grab(dev->ctrl_tx_rgb_range, grab);
+}
+
+int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
+{
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->kthread_vid_out) {
+ u32 seq_count = dev->out_seq_count + dev->seq_wrap * 128;
+
+ if (pstreaming == &dev->vid_out_streaming)
+ dev->vid_out_seq_start = seq_count;
+ else
+ dev->vbi_out_seq_start = seq_count;
+ *pstreaming = true;
+ return 0;
+ }
+
+ /* Resets frame counters */
+ dev->jiffies_vid_out = jiffies;
+ dev->vid_out_seq_start = dev->seq_wrap * 128;
+ dev->vbi_out_seq_start = dev->seq_wrap * 128;
+
+ dev->kthread_vid_out = kthread_run(vivid_thread_vid_out, dev,
+ "%s-vid-out", dev->v4l2_dev.name);
+
+ if (IS_ERR(dev->kthread_vid_out)) {
+ v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+ return PTR_ERR(dev->kthread_vid_out);
+ }
+ *pstreaming = true;
+ vivid_grab_controls(dev, true);
+
+ dprintk(dev, 1, "returning from %s\n", __func__);
+ return 0;
+}
+
+void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
+{
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->kthread_vid_out == NULL)
+ return;
+
+ *pstreaming = false;
+ if (pstreaming == &dev->vid_out_streaming) {
+ /* Release all active buffers */
+ while (!list_empty(&dev->vid_out_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->vid_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "vid_out buffer %d done\n",
+ buf->vb.v4l2_buf.index);
+ }
+ }
+
+ if (pstreaming == &dev->vbi_out_streaming) {
+ while (!list_empty(&dev->vbi_out_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->vbi_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "vbi_out buffer %d done\n",
+ buf->vb.v4l2_buf.index);
+ }
+ }
+
+ if (dev->vid_out_streaming || dev->vbi_out_streaming)
+ return;
+
+ /* shutdown control thread */
+ vivid_grab_controls(dev, false);
+ mutex_unlock(&dev->mutex);
+ kthread_stop(dev->kthread_vid_out);
+ dev->kthread_vid_out = NULL;
+ mutex_lock(&dev->mutex);
+}
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.h b/drivers/media/platform/vivid/vivid-kthread-out.h
new file mode 100644
index 000000000000..2bf04a17b05d
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-kthread-out.h
@@ -0,0 +1,26 @@
+/*
+ * vivid-kthread-out.h - video/vbi output thread support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_KTHREAD_OUT_H_
+#define _VIVID_KTHREAD_OUT_H_
+
+int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming);
+void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/platform/vivid/vivid-osd.c
new file mode 100644
index 000000000000..084d346fb4c4
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-osd.c
@@ -0,0 +1,400 @@
+/*
+ * vivid-osd.c - osd support for testing overlays.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/font.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/fb.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-common.h>
+
+#include "vivid-core.h"
+#include "vivid-osd.h"
+
+#define MAX_OSD_WIDTH 720
+#define MAX_OSD_HEIGHT 576
+
+/*
+ * Order: white, yellow, cyan, green, magenta, red, blue, black,
+ * and same again with the alpha bit set (if any)
+ */
+static const u16 rgb555[16] = {
+ 0x7fff, 0x7fe0, 0x03ff, 0x03e0, 0x7c1f, 0x7c00, 0x001f, 0x0000,
+ 0xffff, 0xffe0, 0x83ff, 0x83e0, 0xfc1f, 0xfc00, 0x801f, 0x8000
+};
+
+static const u16 rgb565[16] = {
+ 0xffff, 0xffe0, 0x07ff, 0x07e0, 0xf81f, 0xf800, 0x001f, 0x0000,
+ 0xffff, 0xffe0, 0x07ff, 0x07e0, 0xf81f, 0xf800, 0x001f, 0x0000
+};
+
+void vivid_clear_fb(struct vivid_dev *dev)
+{
+ void *p = dev->video_vbase;
+ const u16 *rgb = rgb555;
+ unsigned x, y;
+
+ if (dev->fb_defined.green.length == 6)
+ rgb = rgb565;
+
+ for (y = 0; y < dev->display_height; y++) {
+ u16 *d = p;
+
+ for (x = 0; x < dev->display_width; x++)
+ d[x] = rgb[(y / 16 + x / 16) % 16];
+ p += dev->display_byte_stride;
+ }
+}
+
+/* --------------------------------------------------------------------- */
+
+static int vivid_fb_ioctl(struct fb_info *info, unsigned cmd, unsigned long arg)
+{
+ struct vivid_dev *dev = (struct vivid_dev *)info->par;
+
+ switch (cmd) {
+ case FBIOGET_VBLANK: {
+ struct fb_vblank vblank;
+
+ vblank.flags = FB_VBLANK_HAVE_COUNT | FB_VBLANK_HAVE_VCOUNT |
+ FB_VBLANK_HAVE_VSYNC;
+ vblank.count = 0;
+ vblank.vcount = 0;
+ vblank.hcount = 0;
+ if (copy_to_user((void __user *)arg, &vblank, sizeof(vblank)))
+ return -EFAULT;
+ return 0;
+ }
+
+ default:
+ dprintk(dev, 1, "Unknown ioctl %08x\n", cmd);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Framebuffer device handling */
+
+static int vivid_fb_set_var(struct vivid_dev *dev, struct fb_var_screeninfo *var)
+{
+ dprintk(dev, 1, "vivid_fb_set_var\n");
+
+ if (var->bits_per_pixel != 16) {
+ dprintk(dev, 1, "vivid_fb_set_var - Invalid bpp\n");
+ return -EINVAL;
+ }
+ dev->display_byte_stride = var->xres * dev->bytes_per_pixel;
+
+ return 0;
+}
+
+static int vivid_fb_get_fix(struct vivid_dev *dev, struct fb_fix_screeninfo *fix)
+{
+ dprintk(dev, 1, "vivid_fb_get_fix\n");
+ memset(fix, 0, sizeof(struct fb_fix_screeninfo));
+ strlcpy(fix->id, "vioverlay fb", sizeof(fix->id));
+ fix->smem_start = dev->video_pbase;
+ fix->smem_len = dev->video_buffer_size;
+ fix->type = FB_TYPE_PACKED_PIXELS;
+ fix->visual = FB_VISUAL_TRUECOLOR;
+ fix->xpanstep = 1;
+ fix->ypanstep = 1;
+ fix->ywrapstep = 0;
+ fix->line_length = dev->display_byte_stride;
+ fix->accel = FB_ACCEL_NONE;
+ return 0;
+}
+
+/* Check the requested display mode, returning -EINVAL if we can't
+ handle it. */
+
+static int _vivid_fb_check_var(struct fb_var_screeninfo *var, struct vivid_dev *dev)
+{
+ dprintk(dev, 1, "vivid_fb_check_var\n");
+
+ var->bits_per_pixel = 16;
+ if (var->green.length == 5) {
+ var->red.offset = 10;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 5;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->transp.offset = 15;
+ var->transp.length = 1;
+ } else {
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ }
+ var->xoffset = var->yoffset = 0;
+ var->left_margin = var->upper_margin = 0;
+ var->nonstd = 0;
+
+ var->vmode &= ~FB_VMODE_MASK;
+ var->vmode = FB_VMODE_NONINTERLACED;
+
+ /* Dummy values */
+ var->hsync_len = 24;
+ var->vsync_len = 2;
+ var->pixclock = 84316;
+ var->right_margin = 776;
+ var->lower_margin = 591;
+ return 0;
+}
+
+static int vivid_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct vivid_dev *dev = (struct vivid_dev *) info->par;
+
+ dprintk(dev, 1, "vivid_fb_check_var\n");
+ return _vivid_fb_check_var(var, dev);
+}
+
+static int vivid_fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ return 0;
+}
+
+static int vivid_fb_set_par(struct fb_info *info)
+{
+ int rc = 0;
+ struct vivid_dev *dev = (struct vivid_dev *) info->par;
+
+ dprintk(dev, 1, "vivid_fb_set_par\n");
+
+ rc = vivid_fb_set_var(dev, &info->var);
+ vivid_fb_get_fix(dev, &info->fix);
+ return rc;
+}
+
+static int vivid_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
+ struct fb_info *info)
+{
+ u32 color, *palette;
+
+ if (regno >= info->cmap.len)
+ return -EINVAL;
+
+ color = ((transp & 0xFF00) << 16) | ((red & 0xFF00) << 8) |
+ (green & 0xFF00) | ((blue & 0xFF00) >> 8);
+ if (regno >= 16)
+ return -EINVAL;
+
+ palette = info->pseudo_palette;
+ if (info->var.bits_per_pixel == 16) {
+ switch (info->var.green.length) {
+ case 6:
+ color = (red & 0xf800) |
+ ((green & 0xfc00) >> 5) |
+ ((blue & 0xf800) >> 11);
+ break;
+ case 5:
+ color = ((red & 0xf800) >> 1) |
+ ((green & 0xf800) >> 6) |
+ ((blue & 0xf800) >> 11) |
+ (transp ? 0x8000 : 0);
+ break;
+ }
+ }
+ palette[regno] = color;
+ return 0;
+}
+
+/* We don't really support blanking. All this does is enable or
+ disable the OSD. */
+static int vivid_fb_blank(int blank_mode, struct fb_info *info)
+{
+ struct vivid_dev *dev = (struct vivid_dev *)info->par;
+
+ dprintk(dev, 1, "Set blanking mode : %d\n", blank_mode);
+ switch (blank_mode) {
+ case FB_BLANK_UNBLANK:
+ break;
+ case FB_BLANK_NORMAL:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_POWERDOWN:
+ break;
+ }
+ return 0;
+}
+
+static struct fb_ops vivid_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = vivid_fb_check_var,
+ .fb_set_par = vivid_fb_set_par,
+ .fb_setcolreg = vivid_fb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_cursor = NULL,
+ .fb_ioctl = vivid_fb_ioctl,
+ .fb_pan_display = vivid_fb_pan_display,
+ .fb_blank = vivid_fb_blank,
+};
+
+/* Initialization */
+
+
+/* Setup our initial video mode */
+static int vivid_fb_init_vidmode(struct vivid_dev *dev)
+{
+ struct v4l2_rect start_window;
+
+ /* Color mode */
+
+ dev->bits_per_pixel = 16;
+ dev->bytes_per_pixel = dev->bits_per_pixel / 8;
+
+ start_window.width = MAX_OSD_WIDTH;
+ start_window.left = 0;
+
+ dev->display_byte_stride = start_window.width * dev->bytes_per_pixel;
+
+ /* Vertical size & position */
+
+ start_window.height = MAX_OSD_HEIGHT;
+ start_window.top = 0;
+
+ dev->display_width = start_window.width;
+ dev->display_height = start_window.height;
+
+ /* Generate a valid fb_var_screeninfo */
+
+ dev->fb_defined.xres = dev->display_width;
+ dev->fb_defined.yres = dev->display_height;
+ dev->fb_defined.xres_virtual = dev->display_width;
+ dev->fb_defined.yres_virtual = dev->display_height;
+ dev->fb_defined.bits_per_pixel = dev->bits_per_pixel;
+ dev->fb_defined.vmode = FB_VMODE_NONINTERLACED;
+ dev->fb_defined.left_margin = start_window.left + 1;
+ dev->fb_defined.upper_margin = start_window.top + 1;
+ dev->fb_defined.accel_flags = FB_ACCEL_NONE;
+ dev->fb_defined.nonstd = 0;
+ /* set default to 1:5:5:5 */
+ dev->fb_defined.green.length = 5;
+
+ /* We've filled in the most data, let the usual mode check
+ routine fill in the rest. */
+ _vivid_fb_check_var(&dev->fb_defined, dev);
+
+ /* Generate valid fb_fix_screeninfo */
+
+ vivid_fb_get_fix(dev, &dev->fb_fix);
+
+ /* Generate valid fb_info */
+
+ dev->fb_info.node = -1;
+ dev->fb_info.flags = FBINFO_FLAG_DEFAULT;
+ dev->fb_info.fbops = &vivid_fb_ops;
+ dev->fb_info.par = dev;
+ dev->fb_info.var = dev->fb_defined;
+ dev->fb_info.fix = dev->fb_fix;
+ dev->fb_info.screen_base = (u8 __iomem *)dev->video_vbase;
+ dev->fb_info.fbops = &vivid_fb_ops;
+
+ /* Supply some monitor specs. Bogus values will do for now */
+ dev->fb_info.monspecs.hfmin = 8000;
+ dev->fb_info.monspecs.hfmax = 70000;
+ dev->fb_info.monspecs.vfmin = 10;
+ dev->fb_info.monspecs.vfmax = 100;
+
+ /* Allocate color map */
+ if (fb_alloc_cmap(&dev->fb_info.cmap, 256, 1)) {
+ pr_err("abort, unable to alloc cmap\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate the pseudo palette */
+ dev->fb_info.pseudo_palette = kmalloc_array(16, sizeof(u32), GFP_KERNEL);
+
+ return dev->fb_info.pseudo_palette ? 0 : -ENOMEM;
+}
+
+/* Release any memory we've grabbed */
+void vivid_fb_release_buffers(struct vivid_dev *dev)
+{
+ if (dev->video_vbase == NULL)
+ return;
+
+ /* Release cmap */
+ if (dev->fb_info.cmap.len)
+ fb_dealloc_cmap(&dev->fb_info.cmap);
+
+ /* Release pseudo palette */
+ kfree(dev->fb_info.pseudo_palette);
+ kfree((void *)dev->video_vbase);
+}
+
+/* Initialize the specified card */
+
+int vivid_fb_init(struct vivid_dev *dev)
+{
+ int ret;
+
+ dev->video_buffer_size = MAX_OSD_HEIGHT * MAX_OSD_WIDTH * 2;
+ dev->video_vbase = kzalloc(dev->video_buffer_size, GFP_KERNEL | GFP_DMA32);
+ if (dev->video_vbase == NULL)
+ return -ENOMEM;
+ dev->video_pbase = virt_to_phys(dev->video_vbase);
+
+ pr_info("Framebuffer at 0x%lx, mapped to 0x%p, size %dk\n",
+ dev->video_pbase, dev->video_vbase,
+ dev->video_buffer_size / 1024);
+
+ /* Set the startup video mode information */
+ ret = vivid_fb_init_vidmode(dev);
+ if (ret) {
+ vivid_fb_release_buffers(dev);
+ return ret;
+ }
+
+ vivid_clear_fb(dev);
+
+ /* Register the framebuffer */
+ if (register_framebuffer(&dev->fb_info) < 0) {
+ vivid_fb_release_buffers(dev);
+ return -EINVAL;
+ }
+
+ /* Set the card to the requested mode */
+ vivid_fb_set_par(&dev->fb_info);
+ return 0;
+
+}
diff --git a/drivers/media/platform/vivid/vivid-osd.h b/drivers/media/platform/vivid/vivid-osd.h
new file mode 100644
index 000000000000..57c9daa5940a
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-osd.h
@@ -0,0 +1,27 @@
+/*
+ * vivid-osd.h - output overlay support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_OSD_H_
+#define _VIVID_OSD_H_
+
+int vivid_fb_init(struct vivid_dev *dev);
+void vivid_fb_release_buffers(struct vivid_dev *dev);
+void vivid_clear_fb(struct vivid_dev *dev);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-radio-common.c b/drivers/media/platform/vivid/vivid-radio-common.c
new file mode 100644
index 000000000000..78c1e920670a
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-radio-common.c
@@ -0,0 +1,189 @@
+/*
+ * vivid-radio-common.c - common radio rx/tx support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+
+#include "vivid-core.h"
+#include "vivid-ctrls.h"
+#include "vivid-radio-common.h"
+#include "vivid-rds-gen.h"
+
+/*
+ * These functions are shared between the vivid receiver and transmitter
+ * since both use the same frequency bands.
+ */
+
+const struct v4l2_frequency_band vivid_radio_bands[TOT_BANDS] = {
+ /* Band FM */
+ {
+ .type = V4L2_TUNER_RADIO,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = FM_FREQ_RANGE_LOW,
+ .rangehigh = FM_FREQ_RANGE_HIGH,
+ .modulation = V4L2_BAND_MODULATION_FM,
+ },
+ /* Band AM */
+ {
+ .type = V4L2_TUNER_RADIO,
+ .index = 1,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = AM_FREQ_RANGE_LOW,
+ .rangehigh = AM_FREQ_RANGE_HIGH,
+ .modulation = V4L2_BAND_MODULATION_AM,
+ },
+ /* Band SW */
+ {
+ .type = V4L2_TUNER_RADIO,
+ .index = 2,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = SW_FREQ_RANGE_LOW,
+ .rangehigh = SW_FREQ_RANGE_HIGH,
+ .modulation = V4L2_BAND_MODULATION_AM,
+ },
+};
+
+/*
+ * Initialize the RDS generator. If we can loop, then the RDS generator
+ * is set up with the values from the RDS TX controls, otherwise it
+ * will fill in standard values using one of two alternates.
+ */
+void vivid_radio_rds_init(struct vivid_dev *dev)
+{
+ struct vivid_rds_gen *rds = &dev->rds_gen;
+ bool alt = dev->radio_rx_rds_use_alternates;
+
+ /* Do nothing, blocks will be filled by the transmitter */
+ if (dev->radio_rds_loop && !dev->radio_tx_rds_controls)
+ return;
+
+ if (dev->radio_rds_loop) {
+ v4l2_ctrl_lock(dev->radio_tx_rds_pi);
+ rds->picode = dev->radio_tx_rds_pi->cur.val;
+ rds->pty = dev->radio_tx_rds_pty->cur.val;
+ rds->mono_stereo = dev->radio_tx_rds_mono_stereo->cur.val;
+ rds->art_head = dev->radio_tx_rds_art_head->cur.val;
+ rds->compressed = dev->radio_tx_rds_compressed->cur.val;
+ rds->dyn_pty = dev->radio_tx_rds_dyn_pty->cur.val;
+ rds->ta = dev->radio_tx_rds_ta->cur.val;
+ rds->tp = dev->radio_tx_rds_tp->cur.val;
+ rds->ms = dev->radio_tx_rds_ms->cur.val;
+ strlcpy(rds->psname,
+ dev->radio_tx_rds_psname->p_cur.p_char,
+ sizeof(rds->psname));
+ strlcpy(rds->radiotext,
+ dev->radio_tx_rds_radiotext->p_cur.p_char + alt * 64,
+ sizeof(rds->radiotext));
+ v4l2_ctrl_unlock(dev->radio_tx_rds_pi);
+ } else {
+ vivid_rds_gen_fill(rds, dev->radio_rx_freq, alt);
+ }
+ if (dev->radio_rx_rds_controls) {
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_pty, rds->pty);
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ta, rds->ta);
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_tp, rds->tp);
+ v4l2_ctrl_s_ctrl(dev->radio_rx_rds_ms, rds->ms);
+ v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_psname, rds->psname);
+ v4l2_ctrl_s_ctrl_string(dev->radio_rx_rds_radiotext, rds->radiotext);
+ if (!dev->radio_rds_loop)
+ dev->radio_rx_rds_use_alternates = !dev->radio_rx_rds_use_alternates;
+ }
+ vivid_rds_generate(rds);
+}
+
+/*
+ * Calculate the emulated signal quality taking into account the frequency
+ * the transmitter is using.
+ */
+static void vivid_radio_calc_sig_qual(struct vivid_dev *dev)
+{
+ int mod = 16000;
+ int delta = 800;
+ int sig_qual, sig_qual_tx = mod;
+
+ /*
+ * For SW and FM there is a channel every 1000 kHz, for AM there is one
+ * every 100 kHz.
+ */
+ if (dev->radio_rx_freq <= AM_FREQ_RANGE_HIGH) {
+ mod /= 10;
+ delta /= 10;
+ }
+ sig_qual = (dev->radio_rx_freq + delta) % mod - delta;
+ if (dev->has_radio_tx)
+ sig_qual_tx = dev->radio_rx_freq - dev->radio_tx_freq;
+ if (abs(sig_qual_tx) <= abs(sig_qual)) {
+ sig_qual = sig_qual_tx;
+ /*
+ * Zero the internal rds buffer if we are going to loop
+ * rds blocks.
+ */
+ if (!dev->radio_rds_loop && !dev->radio_tx_rds_controls)
+ memset(dev->rds_gen.data, 0,
+ sizeof(dev->rds_gen.data));
+ dev->radio_rds_loop = dev->radio_rx_freq >= FM_FREQ_RANGE_LOW;
+ } else {
+ dev->radio_rds_loop = false;
+ }
+ if (dev->radio_rx_freq <= AM_FREQ_RANGE_HIGH)
+ sig_qual *= 10;
+ dev->radio_rx_sig_qual = sig_qual;
+}
+
+int vivid_radio_g_frequency(struct file *file, const unsigned *pfreq, struct v4l2_frequency *vf)
+{
+ if (vf->tuner != 0)
+ return -EINVAL;
+ vf->frequency = *pfreq;
+ return 0;
+}
+
+int vivid_radio_s_frequency(struct file *file, unsigned *pfreq, const struct v4l2_frequency *vf)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ unsigned freq;
+ unsigned band;
+
+ if (vf->tuner != 0)
+ return -EINVAL;
+
+ if (vf->frequency >= (FM_FREQ_RANGE_LOW + SW_FREQ_RANGE_HIGH) / 2)
+ band = BAND_FM;
+ else if (vf->frequency <= (AM_FREQ_RANGE_HIGH + SW_FREQ_RANGE_LOW) / 2)
+ band = BAND_AM;
+ else
+ band = BAND_SW;
+
+ freq = clamp_t(u32, vf->frequency, vivid_radio_bands[band].rangelow,
+ vivid_radio_bands[band].rangehigh);
+ *pfreq = freq;
+
+ /*
+ * For both receiver and transmitter recalculate the signal quality
+ * (since that depends on both frequencies) and re-init the rds
+ * generator.
+ */
+ vivid_radio_calc_sig_qual(dev);
+ vivid_radio_rds_init(dev);
+ return 0;
+}
diff --git a/drivers/media/platform/vivid/vivid-radio-common.h b/drivers/media/platform/vivid/vivid-radio-common.h
new file mode 100644
index 000000000000..92fe589141b7
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-radio-common.h
@@ -0,0 +1,40 @@
+/*
+ * vivid-radio-common.h - common radio rx/tx support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_RADIO_COMMON_H_
+#define _VIVID_RADIO_COMMON_H_
+
+/* The supported radio frequency ranges in kHz */
+#define FM_FREQ_RANGE_LOW (64000U * 16U)
+#define FM_FREQ_RANGE_HIGH (108000U * 16U)
+#define AM_FREQ_RANGE_LOW (520U * 16U)
+#define AM_FREQ_RANGE_HIGH (1710U * 16U)
+#define SW_FREQ_RANGE_LOW (2300U * 16U)
+#define SW_FREQ_RANGE_HIGH (26100U * 16U)
+
+enum { BAND_FM, BAND_AM, BAND_SW, TOT_BANDS };
+
+extern const struct v4l2_frequency_band vivid_radio_bands[TOT_BANDS];
+
+int vivid_radio_g_frequency(struct file *file, const unsigned *freq, struct v4l2_frequency *vf);
+int vivid_radio_s_frequency(struct file *file, unsigned *freq, const struct v4l2_frequency *vf);
+
+void vivid_radio_rds_init(struct vivid_dev *dev);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-radio-rx.c b/drivers/media/platform/vivid/vivid-radio-rx.c
new file mode 100644
index 000000000000..c7651a506668
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-radio-rx.c
@@ -0,0 +1,287 @@
+/*
+ * vivid-radio-rx.c - radio receiver support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-dv-timings.h>
+
+#include "vivid-core.h"
+#include "vivid-ctrls.h"
+#include "vivid-radio-common.h"
+#include "vivid-rds-gen.h"
+#include "vivid-radio-rx.h"
+
+ssize_t vivid_radio_rx_read(struct file *file, char __user *buf,
+ size_t size, loff_t *offset)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct timespec ts;
+ struct v4l2_rds_data *data = dev->rds_gen.data;
+ bool use_alternates;
+ unsigned blk;
+ int perc;
+ int i;
+
+ if (dev->radio_rx_rds_controls)
+ return -EINVAL;
+ if (size < sizeof(*data))
+ return 0;
+ size = sizeof(*data) * (size / sizeof(*data));
+
+ if (mutex_lock_interruptible(&dev->mutex))
+ return -ERESTARTSYS;
+ if (dev->radio_rx_rds_owner &&
+ file->private_data != dev->radio_rx_rds_owner) {
+ mutex_unlock(&dev->mutex);
+ return -EBUSY;
+ }
+ if (dev->radio_rx_rds_owner == NULL) {
+ vivid_radio_rds_init(dev);
+ dev->radio_rx_rds_owner = file->private_data;
+ }
+
+retry:
+ ktime_get_ts(&ts);
+ use_alternates = ts.tv_sec % 10 >= 5;
+ if (dev->radio_rx_rds_last_block == 0 ||
+ dev->radio_rx_rds_use_alternates != use_alternates) {
+ dev->radio_rx_rds_use_alternates = use_alternates;
+ /* Re-init the RDS generator */
+ vivid_radio_rds_init(dev);
+ }
+ ts = timespec_sub(ts, dev->radio_rds_init_ts);
+ blk = ts.tv_sec * 100 + ts.tv_nsec / 10000000;
+ blk = (blk * VIVID_RDS_GEN_BLOCKS) / 500;
+ if (blk >= dev->radio_rx_rds_last_block + VIVID_RDS_GEN_BLOCKS)
+ dev->radio_rx_rds_last_block = blk - VIVID_RDS_GEN_BLOCKS + 1;
+
+ /*
+ * No data is available if there hasn't been time to get new data,
+ * or if the RDS receiver has been disabled, or if we use the data
+ * from the RDS transmitter and that RDS transmitter has been disabled,
+ * or if the signal quality is too weak.
+ */
+ if (blk == dev->radio_rx_rds_last_block || !dev->radio_rx_rds_enabled ||
+ (dev->radio_rds_loop && !(dev->radio_tx_subchans & V4L2_TUNER_SUB_RDS)) ||
+ abs(dev->radio_rx_sig_qual) > 200) {
+ mutex_unlock(&dev->mutex);
+ if (file->f_flags & O_NONBLOCK)
+ return -EWOULDBLOCK;
+ if (msleep_interruptible(20) && signal_pending(current))
+ return -EINTR;
+ if (mutex_lock_interruptible(&dev->mutex))
+ return -ERESTARTSYS;
+ goto retry;
+ }
+
+ /* abs(dev->radio_rx_sig_qual) <= 200, map that to a 0-50% range */
+ perc = abs(dev->radio_rx_sig_qual) / 4;
+
+ for (i = 0; i < size && blk > dev->radio_rx_rds_last_block;
+ dev->radio_rx_rds_last_block++) {
+ unsigned data_blk = dev->radio_rx_rds_last_block % VIVID_RDS_GEN_BLOCKS;
+ struct v4l2_rds_data rds = data[data_blk];
+
+ if (data_blk == 0 && dev->radio_rds_loop)
+ vivid_radio_rds_init(dev);
+ if (perc && prandom_u32_max(100) < perc) {
+ switch (prandom_u32_max(4)) {
+ case 0:
+ rds.block |= V4L2_RDS_BLOCK_CORRECTED;
+ break;
+ case 1:
+ rds.block |= V4L2_RDS_BLOCK_INVALID;
+ break;
+ case 2:
+ rds.block |= V4L2_RDS_BLOCK_ERROR;
+ rds.lsb = prandom_u32_max(256);
+ rds.msb = prandom_u32_max(256);
+ break;
+ case 3: /* Skip block altogether */
+ if (i)
+ continue;
+ /*
+ * Must make sure at least one block is
+ * returned, otherwise the application
+ * might think that end-of-file occurred.
+ */
+ break;
+ }
+ }
+ if (copy_to_user(buf + i, &rds, sizeof(rds))) {
+ i = -EFAULT;
+ break;
+ }
+ i += sizeof(rds);
+ }
+ mutex_unlock(&dev->mutex);
+ return i;
+}
+
+unsigned int vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait)
+{
+ return POLLIN | POLLRDNORM | v4l2_ctrl_poll(file, wait);
+}
+
+int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band)
+{
+ if (band->tuner != 0)
+ return -EINVAL;
+
+ if (band->index >= TOT_BANDS)
+ return -EINVAL;
+
+ *band = vivid_radio_bands[band->index];
+ return 0;
+}
+
+int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ unsigned low, high;
+ unsigned freq;
+ unsigned spacing;
+ unsigned band;
+
+ if (a->tuner)
+ return -EINVAL;
+ if (a->wrap_around && dev->radio_rx_hw_seek_mode == VIVID_HW_SEEK_BOUNDED)
+ return -EINVAL;
+
+ if (!a->wrap_around && dev->radio_rx_hw_seek_mode == VIVID_HW_SEEK_WRAP)
+ return -EINVAL;
+ if (!a->rangelow ^ !a->rangehigh)
+ return -EINVAL;
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EWOULDBLOCK;
+
+ if (a->rangelow) {
+ for (band = 0; band < TOT_BANDS; band++)
+ if (a->rangelow >= vivid_radio_bands[band].rangelow &&
+ a->rangehigh <= vivid_radio_bands[band].rangehigh)
+ break;
+ if (band == TOT_BANDS)
+ return -EINVAL;
+ if (!dev->radio_rx_hw_seek_prog_lim &&
+ (a->rangelow != vivid_radio_bands[band].rangelow ||
+ a->rangehigh != vivid_radio_bands[band].rangehigh))
+ return -EINVAL;
+ low = a->rangelow;
+ high = a->rangehigh;
+ } else {
+ for (band = 0; band < TOT_BANDS; band++)
+ if (dev->radio_rx_freq >= vivid_radio_bands[band].rangelow &&
+ dev->radio_rx_freq <= vivid_radio_bands[band].rangehigh)
+ break;
+ low = vivid_radio_bands[band].rangelow;
+ high = vivid_radio_bands[band].rangehigh;
+ }
+ spacing = band == BAND_AM ? 1600 : 16000;
+ freq = clamp(dev->radio_rx_freq, low, high);
+
+ if (a->seek_upward) {
+ freq = spacing * (freq / spacing) + spacing;
+ if (freq > high) {
+ if (!a->wrap_around)
+ return -ENODATA;
+ freq = spacing * (low / spacing) + spacing;
+ if (freq >= dev->radio_rx_freq)
+ return -ENODATA;
+ }
+ } else {
+ freq = spacing * ((freq + spacing - 1) / spacing) - spacing;
+ if (freq < low) {
+ if (!a->wrap_around)
+ return -ENODATA;
+ freq = spacing * ((high + spacing - 1) / spacing) - spacing;
+ if (freq <= dev->radio_rx_freq)
+ return -ENODATA;
+ }
+ }
+ return 0;
+}
+
+int vivid_radio_rx_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ int delta = 800;
+ int sig_qual;
+
+ if (vt->index > 0)
+ return -EINVAL;
+
+ strlcpy(vt->name, "AM/FM/SW Receiver", sizeof(vt->name));
+ vt->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_RDS |
+ (dev->radio_rx_rds_controls ?
+ V4L2_TUNER_CAP_RDS_CONTROLS :
+ V4L2_TUNER_CAP_RDS_BLOCK_IO) |
+ (dev->radio_rx_hw_seek_prog_lim ?
+ V4L2_TUNER_CAP_HWSEEK_PROG_LIM : 0);
+ switch (dev->radio_rx_hw_seek_mode) {
+ case VIVID_HW_SEEK_BOUNDED:
+ vt->capability |= V4L2_TUNER_CAP_HWSEEK_BOUNDED;
+ break;
+ case VIVID_HW_SEEK_WRAP:
+ vt->capability |= V4L2_TUNER_CAP_HWSEEK_WRAP;
+ break;
+ case VIVID_HW_SEEK_BOTH:
+ vt->capability |= V4L2_TUNER_CAP_HWSEEK_WRAP |
+ V4L2_TUNER_CAP_HWSEEK_BOUNDED;
+ break;
+ }
+ vt->rangelow = AM_FREQ_RANGE_LOW;
+ vt->rangehigh = FM_FREQ_RANGE_HIGH;
+ sig_qual = dev->radio_rx_sig_qual;
+ vt->signal = abs(sig_qual) > delta ? 0 :
+ 0xffff - (abs(sig_qual) * 0xffff) / delta;
+ vt->afc = sig_qual > delta ? 0 : sig_qual;
+ if (abs(sig_qual) > delta)
+ vt->rxsubchans = 0;
+ else if (dev->radio_rx_freq < FM_FREQ_RANGE_LOW || vt->signal < 0x8000)
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO;
+ else if (dev->radio_rds_loop && !(dev->radio_tx_subchans & V4L2_TUNER_SUB_STEREO))
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO;
+ else
+ vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ if (dev->radio_rx_rds_enabled &&
+ (!dev->radio_rds_loop || (dev->radio_tx_subchans & V4L2_TUNER_SUB_RDS)) &&
+ dev->radio_rx_freq >= FM_FREQ_RANGE_LOW && vt->signal >= 0xc000)
+ vt->rxsubchans |= V4L2_TUNER_SUB_RDS;
+ if (dev->radio_rx_rds_controls)
+ vivid_radio_rds_init(dev);
+ vt->audmode = dev->radio_rx_audmode;
+ return 0;
+}
+
+int vivid_radio_rx_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (vt->index)
+ return -EINVAL;
+ dev->radio_rx_audmode = vt->audmode >= V4L2_TUNER_MODE_STEREO;
+ return 0;
+}
diff --git a/drivers/media/platform/vivid/vivid-radio-rx.h b/drivers/media/platform/vivid/vivid-radio-rx.h
new file mode 100644
index 000000000000..1077d8f061eb
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-radio-rx.h
@@ -0,0 +1,31 @@
+/*
+ * vivid-radio-rx.h - radio receiver support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_RADIO_RX_H_
+#define _VIVID_RADIO_RX_H_
+
+ssize_t vivid_radio_rx_read(struct file *, char __user *, size_t, loff_t *);
+unsigned int vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait);
+
+int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band);
+int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a);
+int vivid_radio_rx_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt);
+int vivid_radio_rx_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-radio-tx.c b/drivers/media/platform/vivid/vivid-radio-tx.c
new file mode 100644
index 000000000000..8c59d4f53200
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-radio-tx.c
@@ -0,0 +1,141 @@
+/*
+ * vivid-radio-tx.c - radio transmitter support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-dv-timings.h>
+
+#include "vivid-core.h"
+#include "vivid-ctrls.h"
+#include "vivid-radio-common.h"
+#include "vivid-radio-tx.h"
+
+ssize_t vivid_radio_tx_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *offset)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_rds_data *data = dev->rds_gen.data;
+ struct timespec ts;
+ unsigned blk;
+ int i;
+
+ if (dev->radio_tx_rds_controls)
+ return -EINVAL;
+
+ if (size < sizeof(*data))
+ return -EINVAL;
+ size = sizeof(*data) * (size / sizeof(*data));
+
+ if (mutex_lock_interruptible(&dev->mutex))
+ return -ERESTARTSYS;
+ if (dev->radio_tx_rds_owner &&
+ file->private_data != dev->radio_tx_rds_owner) {
+ mutex_unlock(&dev->mutex);
+ return -EBUSY;
+ }
+ dev->radio_tx_rds_owner = file->private_data;
+
+retry:
+ ktime_get_ts(&ts);
+ ts = timespec_sub(ts, dev->radio_rds_init_ts);
+ blk = ts.tv_sec * 100 + ts.tv_nsec / 10000000;
+ blk = (blk * VIVID_RDS_GEN_BLOCKS) / 500;
+ if (blk - VIVID_RDS_GEN_BLOCKS >= dev->radio_tx_rds_last_block)
+ dev->radio_tx_rds_last_block = blk - VIVID_RDS_GEN_BLOCKS + 1;
+
+ /*
+ * No data is available if there hasn't been time to get new data,
+ * or if the RDS receiver has been disabled, or if we use the data
+ * from the RDS transmitter and that RDS transmitter has been disabled,
+ * or if the signal quality is too weak.
+ */
+ if (blk == dev->radio_tx_rds_last_block ||
+ !(dev->radio_tx_subchans & V4L2_TUNER_SUB_RDS)) {
+ mutex_unlock(&dev->mutex);
+ if (file->f_flags & O_NONBLOCK)
+ return -EWOULDBLOCK;
+ if (msleep_interruptible(20) && signal_pending(current))
+ return -EINTR;
+ if (mutex_lock_interruptible(&dev->mutex))
+ return -ERESTARTSYS;
+ goto retry;
+ }
+
+ for (i = 0; i < size && blk > dev->radio_tx_rds_last_block;
+ dev->radio_tx_rds_last_block++) {
+ unsigned data_blk = dev->radio_tx_rds_last_block % VIVID_RDS_GEN_BLOCKS;
+ struct v4l2_rds_data rds;
+
+ if (copy_from_user(&rds, buf + i, sizeof(rds))) {
+ i = -EFAULT;
+ break;
+ }
+ i += sizeof(rds);
+ if (!dev->radio_rds_loop)
+ continue;
+ if ((rds.block & V4L2_RDS_BLOCK_MSK) == V4L2_RDS_BLOCK_INVALID ||
+ (rds.block & V4L2_RDS_BLOCK_ERROR))
+ continue;
+ rds.block &= V4L2_RDS_BLOCK_MSK;
+ data[data_blk] = rds;
+ }
+ mutex_unlock(&dev->mutex);
+ return i;
+}
+
+unsigned int vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait)
+{
+ return POLLOUT | POLLWRNORM | v4l2_ctrl_poll(file, wait);
+}
+
+int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (a->index > 0)
+ return -EINVAL;
+
+ strlcpy(a->name, "AM/FM/SW Transmitter", sizeof(a->name));
+ a->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_RDS |
+ (dev->radio_tx_rds_controls ?
+ V4L2_TUNER_CAP_RDS_CONTROLS :
+ V4L2_TUNER_CAP_RDS_BLOCK_IO);
+ a->rangelow = AM_FREQ_RANGE_LOW;
+ a->rangehigh = FM_FREQ_RANGE_HIGH;
+ a->txsubchans = dev->radio_tx_subchans;
+ return 0;
+}
+
+int vidioc_s_modulator(struct file *file, void *fh, const struct v4l2_modulator *a)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (a->index)
+ return -EINVAL;
+ if (a->txsubchans & ~0x13)
+ return -EINVAL;
+ dev->radio_tx_subchans = a->txsubchans;
+ return 0;
+}
diff --git a/drivers/media/platform/vivid/vivid-radio-tx.h b/drivers/media/platform/vivid/vivid-radio-tx.h
new file mode 100644
index 000000000000..7f8ff7547119
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-radio-tx.h
@@ -0,0 +1,29 @@
+/*
+ * vivid-radio-tx.h - radio transmitter support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_RADIO_TX_H_
+#define _VIVID_RADIO_TX_H_
+
+ssize_t vivid_radio_tx_write(struct file *, const char __user *, size_t, loff_t *);
+unsigned int vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait);
+
+int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a);
+int vidioc_s_modulator(struct file *file, void *fh, const struct v4l2_modulator *a);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-rds-gen.c b/drivers/media/platform/vivid/vivid-rds-gen.c
new file mode 100644
index 000000000000..c382343fdb66
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-rds-gen.c
@@ -0,0 +1,166 @@
+/*
+ * vivid-rds-gen.c - rds (radio data system) generator support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/string.h>
+#include <linux/videodev2.h>
+
+#include "vivid-rds-gen.h"
+
+static u8 vivid_get_di(const struct vivid_rds_gen *rds, unsigned grp)
+{
+ switch (grp) {
+ case 0:
+ return (rds->dyn_pty << 2) | (grp & 3);
+ case 1:
+ return (rds->compressed << 2) | (grp & 3);
+ case 2:
+ return (rds->art_head << 2) | (grp & 3);
+ case 3:
+ return (rds->mono_stereo << 2) | (grp & 3);
+ }
+ return 0;
+}
+
+/*
+ * This RDS generator creates 57 RDS groups (one group == four RDS blocks).
+ * Groups 0-3, 22-25 and 44-47 (spaced 22 groups apart) are filled with a
+ * standard 0B group containing the PI code and PS name.
+ *
+ * Groups 4-19 and 26-41 use group 2A for the radio text.
+ *
+ * Group 56 contains the time (group 4A).
+ *
+ * All remaining groups use a filler group 15B block that just repeats
+ * the PI and PTY codes.
+ */
+void vivid_rds_generate(struct vivid_rds_gen *rds)
+{
+ struct v4l2_rds_data *data = rds->data;
+ unsigned grp;
+ struct tm tm;
+ unsigned date;
+ unsigned time;
+ int l;
+
+ for (grp = 0; grp < VIVID_RDS_GEN_GROUPS; grp++, data += VIVID_RDS_GEN_BLKS_PER_GRP) {
+ data[0].lsb = rds->picode & 0xff;
+ data[0].msb = rds->picode >> 8;
+ data[0].block = V4L2_RDS_BLOCK_A | (V4L2_RDS_BLOCK_A << 3);
+ data[1].lsb = rds->pty << 5;
+ data[1].msb = (rds->pty >> 3) | (rds->tp << 2);
+ data[1].block = V4L2_RDS_BLOCK_B | (V4L2_RDS_BLOCK_B << 3);
+ data[3].block = V4L2_RDS_BLOCK_D | (V4L2_RDS_BLOCK_D << 3);
+
+ switch (grp) {
+ case 0 ... 3:
+ case 22 ... 25:
+ case 44 ... 47: /* Group 0B */
+ data[1].lsb |= (rds->ta << 4) | (rds->ms << 3);
+ data[1].lsb |= vivid_get_di(rds, grp % 22);
+ data[1].msb |= 1 << 3;
+ data[2].lsb = rds->picode & 0xff;
+ data[2].msb = rds->picode >> 8;
+ data[2].block = V4L2_RDS_BLOCK_C_ALT | (V4L2_RDS_BLOCK_C_ALT << 3);
+ data[3].lsb = rds->psname[2 * (grp % 22) + 1];
+ data[3].msb = rds->psname[2 * (grp % 22)];
+ break;
+ case 4 ... 19:
+ case 26 ... 41: /* Group 2A */
+ data[1].lsb |= (grp - 4) % 22;
+ data[1].msb |= 4 << 3;
+ data[2].msb = rds->radiotext[4 * ((grp - 4) % 22)];
+ data[2].lsb = rds->radiotext[4 * ((grp - 4) % 22) + 1];
+ data[2].block = V4L2_RDS_BLOCK_C | (V4L2_RDS_BLOCK_C << 3);
+ data[3].msb = rds->radiotext[4 * ((grp - 4) % 22) + 2];
+ data[3].lsb = rds->radiotext[4 * ((grp - 4) % 22) + 3];
+ break;
+ case 56:
+ /*
+ * Group 4A
+ *
+ * Uses the algorithm from Annex G of the RDS standard
+ * EN 50067:1998 to convert a UTC date to an RDS Modified
+ * Julian Day.
+ */
+ time_to_tm(get_seconds(), 0, &tm);
+ l = tm.tm_mon <= 1;
+ date = 14956 + tm.tm_mday + ((tm.tm_year - l) * 1461) / 4 +
+ ((tm.tm_mon + 2 + l * 12) * 306001) / 10000;
+ time = (tm.tm_hour << 12) |
+ (tm.tm_min << 6) |
+ (sys_tz.tz_minuteswest >= 0 ? 0x20 : 0) |
+ (abs(sys_tz.tz_minuteswest) / 30);
+ data[1].lsb &= ~3;
+ data[1].lsb |= date >> 15;
+ data[1].msb |= 8 << 3;
+ data[2].lsb = (date << 1) & 0xfe;
+ data[2].lsb |= (time >> 16) & 1;
+ data[2].msb = (date >> 7) & 0xff;
+ data[2].block = V4L2_RDS_BLOCK_C | (V4L2_RDS_BLOCK_C << 3);
+ data[3].lsb = time & 0xff;
+ data[3].msb = (time >> 8) & 0xff;
+ break;
+ default: /* Group 15B */
+ data[1].lsb |= (rds->ta << 4) | (rds->ms << 3);
+ data[1].lsb |= vivid_get_di(rds, grp % 22);
+ data[1].msb |= 0x1f << 3;
+ data[2].lsb = rds->picode & 0xff;
+ data[2].msb = rds->picode >> 8;
+ data[2].block = V4L2_RDS_BLOCK_C_ALT | (V4L2_RDS_BLOCK_C_ALT << 3);
+ data[3].lsb = rds->pty << 5;
+ data[3].lsb |= (rds->ta << 4) | (rds->ms << 3);
+ data[3].lsb |= vivid_get_di(rds, grp % 22);
+ data[3].msb |= rds->pty >> 3;
+ data[3].msb |= 0x1f << 3;
+ break;
+ }
+ }
+}
+
+void vivid_rds_gen_fill(struct vivid_rds_gen *rds, unsigned freq,
+ bool alt)
+{
+ /* Alternate PTY between Info and Weather */
+ if (rds->use_rbds) {
+ rds->picode = 0x2e75; /* 'KLNX' call sign */
+ rds->pty = alt ? 29 : 2;
+ } else {
+ rds->picode = 0x8088;
+ rds->pty = alt ? 16 : 3;
+ }
+ rds->mono_stereo = true;
+ rds->art_head = false;
+ rds->compressed = false;
+ rds->dyn_pty = false;
+ rds->tp = true;
+ rds->ta = alt;
+ rds->ms = true;
+ snprintf(rds->psname, sizeof(rds->psname), "%6d.%1d",
+ freq / 16, ((freq & 0xf) * 10) / 16);
+ if (alt)
+ strlcpy(rds->radiotext,
+ " The Radio Data System can switch between different Radio Texts ",
+ sizeof(rds->radiotext));
+ else
+ strlcpy(rds->radiotext,
+ "An example of Radio Text as transmitted by the Radio Data System",
+ sizeof(rds->radiotext));
+}
diff --git a/drivers/media/platform/vivid/vivid-rds-gen.h b/drivers/media/platform/vivid/vivid-rds-gen.h
new file mode 100644
index 000000000000..eff4bf552ed3
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-rds-gen.h
@@ -0,0 +1,53 @@
+/*
+ * vivid-rds-gen.h - rds (radio data system) generator support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_RDS_GEN_H_
+#define _VIVID_RDS_GEN_H_
+
+/*
+ * It takes almost exactly 5 seconds to transmit 57 RDS groups.
+ * Each group has 4 blocks and each block has a payload of 16 bits + a
+ * block identification. The driver will generate the contents of these
+ * 57 groups only when necessary and it will just be played continuously.
+ */
+#define VIVID_RDS_GEN_GROUPS 57
+#define VIVID_RDS_GEN_BLKS_PER_GRP 4
+#define VIVID_RDS_GEN_BLOCKS (VIVID_RDS_GEN_BLKS_PER_GRP * VIVID_RDS_GEN_GROUPS)
+
+struct vivid_rds_gen {
+ struct v4l2_rds_data data[VIVID_RDS_GEN_BLOCKS];
+ bool use_rbds;
+ u16 picode;
+ u8 pty;
+ bool mono_stereo;
+ bool art_head;
+ bool compressed;
+ bool dyn_pty;
+ bool ta;
+ bool tp;
+ bool ms;
+ char psname[8 + 1];
+ char radiotext[64 + 1];
+};
+
+void vivid_rds_gen_fill(struct vivid_rds_gen *rds, unsigned freq,
+ bool use_alternate);
+void vivid_rds_generate(struct vivid_rds_gen *rds);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
new file mode 100644
index 000000000000..8c5d661cfc49
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -0,0 +1,499 @@
+/*
+ * vivid-sdr-cap.c - software defined radio support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-dv-timings.h>
+
+#include "vivid-core.h"
+#include "vivid-ctrls.h"
+#include "vivid-sdr-cap.h"
+
+static const struct v4l2_frequency_band bands_adc[] = {
+ {
+ .tuner = 0,
+ .type = V4L2_TUNER_ADC,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 300000,
+ .rangehigh = 300000,
+ },
+ {
+ .tuner = 0,
+ .type = V4L2_TUNER_ADC,
+ .index = 1,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 900001,
+ .rangehigh = 2800000,
+ },
+ {
+ .tuner = 0,
+ .type = V4L2_TUNER_ADC,
+ .index = 2,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 3200000,
+ .rangehigh = 3200000,
+ },
+};
+
+/* ADC band midpoints */
+#define BAND_ADC_0 ((bands_adc[0].rangehigh + bands_adc[1].rangelow) / 2)
+#define BAND_ADC_1 ((bands_adc[1].rangehigh + bands_adc[2].rangelow) / 2)
+
+static const struct v4l2_frequency_band bands_fm[] = {
+ {
+ .tuner = 1,
+ .type = V4L2_TUNER_RF,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 50000000,
+ .rangehigh = 2000000000,
+ },
+};
+
+static void vivid_thread_sdr_cap_tick(struct vivid_dev *dev)
+{
+ struct vivid_buffer *sdr_cap_buf = NULL;
+
+ dprintk(dev, 1, "SDR Capture Thread Tick\n");
+
+ /* Drop a certain percentage of buffers. */
+ if (dev->perc_dropped_buffers &&
+ prandom_u32_max(100) < dev->perc_dropped_buffers)
+ return;
+
+ spin_lock(&dev->slock);
+ if (!list_empty(&dev->sdr_cap_active)) {
+ sdr_cap_buf = list_entry(dev->sdr_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&sdr_cap_buf->list);
+ }
+ spin_unlock(&dev->slock);
+
+ if (sdr_cap_buf) {
+ sdr_cap_buf->vb.v4l2_buf.sequence = dev->sdr_cap_seq_count;
+ vivid_sdr_cap_process(dev, sdr_cap_buf);
+ v4l2_get_timestamp(&sdr_cap_buf->vb.v4l2_buf.timestamp);
+ sdr_cap_buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
+ vb2_buffer_done(&sdr_cap_buf->vb, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dev->dqbuf_error = false;
+ }
+}
+
+static int vivid_thread_sdr_cap(void *data)
+{
+ struct vivid_dev *dev = data;
+ u64 samples_since_start;
+ u64 buffers_since_start;
+ u64 next_jiffies_since_start;
+ unsigned long jiffies_since_start;
+ unsigned long cur_jiffies;
+ unsigned wait_jiffies;
+
+ dprintk(dev, 1, "SDR Capture Thread Start\n");
+
+ set_freezable();
+
+ /* Resets frame counters */
+ dev->sdr_cap_seq_offset = 0;
+ if (dev->seq_wrap)
+ dev->sdr_cap_seq_offset = 0xffffff80U;
+ dev->jiffies_sdr_cap = jiffies;
+ dev->sdr_cap_seq_resync = false;
+
+ for (;;) {
+ try_to_freeze();
+ if (kthread_should_stop())
+ break;
+
+ mutex_lock(&dev->mutex);
+ cur_jiffies = jiffies;
+ if (dev->sdr_cap_seq_resync) {
+ dev->jiffies_sdr_cap = cur_jiffies;
+ dev->sdr_cap_seq_offset = dev->sdr_cap_seq_count + 1;
+ dev->sdr_cap_seq_count = 0;
+ dev->sdr_cap_seq_resync = false;
+ }
+ /* Calculate the number of jiffies since we started streaming */
+ jiffies_since_start = cur_jiffies - dev->jiffies_sdr_cap;
+ /* Get the number of buffers streamed since the start */
+ buffers_since_start = (u64)jiffies_since_start * dev->sdr_adc_freq +
+ (HZ * SDR_CAP_SAMPLES_PER_BUF) / 2;
+ do_div(buffers_since_start, HZ * SDR_CAP_SAMPLES_PER_BUF);
+
+ /*
+ * After more than 0xf0000000 (rounded down to a multiple of
+ * 'jiffies-per-day' to ease jiffies_to_msecs calculation)
+ * jiffies have passed since we started streaming reset the
+ * counters and keep track of the sequence offset.
+ */
+ if (jiffies_since_start > JIFFIES_RESYNC) {
+ dev->jiffies_sdr_cap = cur_jiffies;
+ dev->sdr_cap_seq_offset = buffers_since_start;
+ buffers_since_start = 0;
+ }
+ dev->sdr_cap_seq_count = buffers_since_start + dev->sdr_cap_seq_offset;
+
+ vivid_thread_sdr_cap_tick(dev);
+ mutex_unlock(&dev->mutex);
+
+ /*
+ * Calculate the number of samples streamed since we started,
+ * not including the current buffer.
+ */
+ samples_since_start = buffers_since_start * SDR_CAP_SAMPLES_PER_BUF;
+
+ /* And the number of jiffies since we started */
+ jiffies_since_start = jiffies - dev->jiffies_sdr_cap;
+
+ /* Increase by the number of samples in one buffer */
+ samples_since_start += SDR_CAP_SAMPLES_PER_BUF;
+ /*
+ * Calculate when that next buffer is supposed to start
+ * in jiffies since we started streaming.
+ */
+ next_jiffies_since_start = samples_since_start * HZ +
+ dev->sdr_adc_freq / 2;
+ do_div(next_jiffies_since_start, dev->sdr_adc_freq);
+ /* If it is in the past, then just schedule asap */
+ if (next_jiffies_since_start < jiffies_since_start)
+ next_jiffies_since_start = jiffies_since_start;
+
+ wait_jiffies = next_jiffies_since_start - jiffies_since_start;
+ schedule_timeout_interruptible(wait_jiffies ? wait_jiffies : 1);
+ }
+ dprintk(dev, 1, "SDR Capture Thread End\n");
+ return 0;
+}
+
+static int sdr_cap_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned *nbuffers, unsigned *nplanes,
+ unsigned sizes[], void *alloc_ctxs[])
+{
+ /* 2 = max 16-bit sample returned */
+ sizes[0] = SDR_CAP_SAMPLES_PER_BUF * 2;
+ *nplanes = 1;
+ return 0;
+}
+
+static int sdr_cap_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned size = SDR_CAP_SAMPLES_PER_BUF * 2;
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void sdr_cap_buf_queue(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->sdr_cap_active);
+ spin_unlock(&dev->slock);
+}
+
+static int sdr_cap_start_streaming(struct vb2_queue *vq, unsigned count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err = 0;
+
+ dprintk(dev, 1, "%s\n", __func__);
+ dev->sdr_cap_seq_count = 0;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else if (dev->kthread_sdr_cap == NULL) {
+ dev->kthread_sdr_cap = kthread_run(vivid_thread_sdr_cap, dev,
+ "%s-sdr-cap", dev->v4l2_dev.name);
+
+ if (IS_ERR(dev->kthread_sdr_cap)) {
+ v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+ err = PTR_ERR(dev->kthread_sdr_cap);
+ dev->kthread_sdr_cap = NULL;
+ }
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void sdr_cap_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ if (dev->kthread_sdr_cap == NULL)
+ return;
+
+ while (!list_empty(&dev->sdr_cap_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->sdr_cap_active.next, struct vivid_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ /* shutdown control thread */
+ mutex_unlock(&dev->mutex);
+ kthread_stop(dev->kthread_sdr_cap);
+ dev->kthread_sdr_cap = NULL;
+ mutex_lock(&dev->mutex);
+}
+
+const struct vb2_ops vivid_sdr_cap_qops = {
+ .queue_setup = sdr_cap_queue_setup,
+ .buf_prepare = sdr_cap_buf_prepare,
+ .buf_queue = sdr_cap_buf_queue,
+ .start_streaming = sdr_cap_start_streaming,
+ .stop_streaming = sdr_cap_stop_streaming,
+ .wait_prepare = vivid_unlock,
+ .wait_finish = vivid_lock,
+};
+
+int vivid_sdr_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band)
+{
+ switch (band->tuner) {
+ case 0:
+ if (band->index >= ARRAY_SIZE(bands_adc))
+ return -EINVAL;
+ *band = bands_adc[band->index];
+ return 0;
+ case 1:
+ if (band->index >= ARRAY_SIZE(bands_fm))
+ return -EINVAL;
+ *band = bands_fm[band->index];
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int vivid_sdr_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ switch (vf->tuner) {
+ case 0:
+ vf->frequency = dev->sdr_adc_freq;
+ vf->type = V4L2_TUNER_ADC;
+ return 0;
+ case 1:
+ vf->frequency = dev->sdr_fm_freq;
+ vf->type = V4L2_TUNER_RF;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int vivid_sdr_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ unsigned freq = vf->frequency;
+ unsigned band;
+
+ switch (vf->tuner) {
+ case 0:
+ if (vf->type != V4L2_TUNER_ADC)
+ return -EINVAL;
+ if (freq < BAND_ADC_0)
+ band = 0;
+ else if (freq < BAND_ADC_1)
+ band = 1;
+ else
+ band = 2;
+
+ freq = clamp_t(unsigned, freq,
+ bands_adc[band].rangelow,
+ bands_adc[band].rangehigh);
+
+ if (vb2_is_streaming(&dev->vb_sdr_cap_q) &&
+ freq != dev->sdr_adc_freq) {
+ /* resync the thread's timings */
+ dev->sdr_cap_seq_resync = true;
+ }
+ dev->sdr_adc_freq = freq;
+ return 0;
+ case 1:
+ if (vf->type != V4L2_TUNER_RF)
+ return -EINVAL;
+ dev->sdr_fm_freq = clamp_t(unsigned, freq,
+ bands_fm[0].rangelow,
+ bands_fm[0].rangehigh);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int vivid_sdr_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+{
+ switch (vt->index) {
+ case 0:
+ strlcpy(vt->name, "ADC", sizeof(vt->name));
+ vt->type = V4L2_TUNER_ADC;
+ vt->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+ vt->rangelow = bands_adc[0].rangelow;
+ vt->rangehigh = bands_adc[2].rangehigh;
+ return 0;
+ case 1:
+ strlcpy(vt->name, "RF", sizeof(vt->name));
+ vt->type = V4L2_TUNER_RF;
+ vt->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+ vt->rangelow = bands_fm[0].rangelow;
+ vt->rangehigh = bands_fm[0].rangehigh;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int vivid_sdr_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+{
+ if (vt->index > 1)
+ return -EINVAL;
+ return 0;
+}
+
+int vidioc_enum_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ if (f->index)
+ return -EINVAL;
+ f->pixelformat = V4L2_SDR_FMT_CU8;
+ strlcpy(f->description, "IQ U8", sizeof(f->description));
+ return 0;
+}
+
+int vidioc_g_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ f->fmt.sdr.pixelformat = V4L2_SDR_FMT_CU8;
+ f->fmt.sdr.buffersize = SDR_CAP_SAMPLES_PER_BUF * 2;
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+ return 0;
+}
+
+#define FIXP_FRAC (1 << 15)
+#define FIXP_PI ((int)(FIXP_FRAC * 3.141592653589))
+
+/* cos() from cx88 driver: cx88-dsp.c */
+static s32 fixp_cos(unsigned int x)
+{
+ u32 t2, t4, t6, t8;
+ u16 period = x / FIXP_PI;
+
+ if (period % 2)
+ return -fixp_cos(x - FIXP_PI);
+ x = x % FIXP_PI;
+ if (x > FIXP_PI/2)
+ return -fixp_cos(FIXP_PI/2 - (x % (FIXP_PI/2)));
+ /* Now x is between 0 and FIXP_PI/2.
+ * To calculate cos(x) we use it's Taylor polinom. */
+ t2 = x*x/FIXP_FRAC/2;
+ t4 = t2*x/FIXP_FRAC*x/FIXP_FRAC/3/4;
+ t6 = t4*x/FIXP_FRAC*x/FIXP_FRAC/5/6;
+ t8 = t6*x/FIXP_FRAC*x/FIXP_FRAC/7/8;
+ return FIXP_FRAC-t2+t4-t6+t8;
+}
+
+static inline s32 fixp_sin(unsigned int x)
+{
+ return -fixp_cos(x + (FIXP_PI / 2));
+}
+
+void vivid_sdr_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
+{
+ u8 *vbuf = vb2_plane_vaddr(&buf->vb, 0);
+ unsigned long i;
+ unsigned long plane_size = vb2_plane_size(&buf->vb, 0);
+ int fixp_src_phase_step, fixp_i, fixp_q;
+
+ /*
+ * TODO: Generated beep tone goes very crackly when sample rate is
+ * increased to ~1Msps or more. That is because of huge rounding error
+ * of phase angle caused by used cosine implementation.
+ */
+
+ /* calculate phase step */
+ #define BEEP_FREQ 1000 /* 1kHz beep */
+ fixp_src_phase_step = DIV_ROUND_CLOSEST(2 * FIXP_PI * BEEP_FREQ,
+ dev->sdr_adc_freq);
+
+ for (i = 0; i < plane_size; i += 2) {
+ dev->sdr_fixp_mod_phase += fixp_cos(dev->sdr_fixp_src_phase);
+ dev->sdr_fixp_src_phase += fixp_src_phase_step;
+
+ /*
+ * Transfer phases to [0 / 2xPI] in order to avoid variable
+ * overflow and make it suitable for cosine implementation
+ * used, which does not support negative angles.
+ */
+ while (dev->sdr_fixp_mod_phase < (0 * FIXP_PI))
+ dev->sdr_fixp_mod_phase += (2 * FIXP_PI);
+ while (dev->sdr_fixp_mod_phase > (2 * FIXP_PI))
+ dev->sdr_fixp_mod_phase -= (2 * FIXP_PI);
+
+ while (dev->sdr_fixp_src_phase > (2 * FIXP_PI))
+ dev->sdr_fixp_src_phase -= (2 * FIXP_PI);
+
+ fixp_i = fixp_cos(dev->sdr_fixp_mod_phase);
+ fixp_q = fixp_sin(dev->sdr_fixp_mod_phase);
+
+ /* convert 'fixp float' to u8 */
+ /* u8 = X * 127.5f + 127.5f; where X is float [-1.0 / +1.0] */
+ fixp_i = fixp_i * 1275 + FIXP_FRAC * 1275;
+ fixp_q = fixp_q * 1275 + FIXP_FRAC * 1275;
+ *vbuf++ = DIV_ROUND_CLOSEST(fixp_i, FIXP_FRAC * 10);
+ *vbuf++ = DIV_ROUND_CLOSEST(fixp_q, FIXP_FRAC * 10);
+ }
+}
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.h b/drivers/media/platform/vivid/vivid-sdr-cap.h
new file mode 100644
index 000000000000..79c1890de972
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.h
@@ -0,0 +1,34 @@
+/*
+ * vivid-sdr-cap.h - software defined radio support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_SDR_CAP_H_
+#define _VIVID_SDR_CAP_H_
+
+int vivid_sdr_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band);
+int vivid_sdr_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
+int vivid_sdr_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf);
+int vivid_sdr_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt);
+int vivid_sdr_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt);
+int vidioc_enum_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f);
+int vidioc_g_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f);
+void vivid_sdr_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf);
+
+extern const struct vb2_ops vivid_sdr_cap_qops;
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-tpg-colors.c b/drivers/media/platform/vivid/vivid-tpg-colors.c
new file mode 100644
index 000000000000..2adddc0ca662
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-tpg-colors.c
@@ -0,0 +1,310 @@
+/*
+ * vivid-color.c - A table that converts colors to various colorspaces
+ *
+ * The test pattern generator uses the tpg_colors for its test patterns.
+ * For testing colorspaces the first 8 colors of that table need to be
+ * converted to their equivalent in the target colorspace.
+ *
+ * The tpg_csc_colors[] table is the result of that conversion and since
+ * it is precalculated the colorspace conversion is just a simple table
+ * lookup.
+ *
+ * This source also contains the code used to generate the tpg_csc_colors
+ * table. Run the following command to compile it:
+ *
+ * gcc vivid-colors.c -DCOMPILE_APP -o gen-colors -lm
+ *
+ * and run the utility.
+ *
+ * Note that the converted colors are in the range 0x000-0xff0 (so times 16)
+ * in order to preserve precision.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/videodev2.h>
+
+#include "vivid-tpg-colors.h"
+
+/* sRGB colors with range [0-255] */
+const struct color tpg_colors[TPG_COLOR_MAX] = {
+ /*
+ * Colors to test colorspace conversion: converting these colors
+ * to other colorspaces will never lead to out-of-gamut colors.
+ */
+ { 191, 191, 191 }, /* TPG_COLOR_CSC_WHITE */
+ { 191, 191, 50 }, /* TPG_COLOR_CSC_YELLOW */
+ { 50, 191, 191 }, /* TPG_COLOR_CSC_CYAN */
+ { 50, 191, 50 }, /* TPG_COLOR_CSC_GREEN */
+ { 191, 50, 191 }, /* TPG_COLOR_CSC_MAGENTA */
+ { 191, 50, 50 }, /* TPG_COLOR_CSC_RED */
+ { 50, 50, 191 }, /* TPG_COLOR_CSC_BLUE */
+ { 50, 50, 50 }, /* TPG_COLOR_CSC_BLACK */
+
+ /* 75% colors */
+ { 191, 191, 0 }, /* TPG_COLOR_75_YELLOW */
+ { 0, 191, 191 }, /* TPG_COLOR_75_CYAN */
+ { 0, 191, 0 }, /* TPG_COLOR_75_GREEN */
+ { 191, 0, 191 }, /* TPG_COLOR_75_MAGENTA */
+ { 191, 0, 0 }, /* TPG_COLOR_75_RED */
+ { 0, 0, 191 }, /* TPG_COLOR_75_BLUE */
+
+ /* 100% colors */
+ { 255, 255, 255 }, /* TPG_COLOR_100_WHITE */
+ { 255, 255, 0 }, /* TPG_COLOR_100_YELLOW */
+ { 0, 255, 255 }, /* TPG_COLOR_100_CYAN */
+ { 0, 255, 0 }, /* TPG_COLOR_100_GREEN */
+ { 255, 0, 255 }, /* TPG_COLOR_100_MAGENTA */
+ { 255, 0, 0 }, /* TPG_COLOR_100_RED */
+ { 0, 0, 255 }, /* TPG_COLOR_100_BLUE */
+ { 0, 0, 0 }, /* TPG_COLOR_100_BLACK */
+
+ { 0, 0, 0 }, /* TPG_COLOR_RANDOM placeholder */
+};
+
+#ifndef COMPILE_APP
+
+/* Generated table */
+const struct color16 tpg_csc_colors[V4L2_COLORSPACE_SRGB + 1][TPG_COLOR_CSC_BLACK + 1] = {
+ [V4L2_COLORSPACE_SMPTE170M][0] = { 2953, 2939, 2939 },
+ [V4L2_COLORSPACE_SMPTE170M][1] = { 2954, 2963, 585 },
+ [V4L2_COLORSPACE_SMPTE170M][2] = { 84, 2967, 2937 },
+ [V4L2_COLORSPACE_SMPTE170M][3] = { 93, 2990, 575 },
+ [V4L2_COLORSPACE_SMPTE170M][4] = { 3030, 259, 2933 },
+ [V4L2_COLORSPACE_SMPTE170M][5] = { 3031, 406, 557 },
+ [V4L2_COLORSPACE_SMPTE170M][6] = { 544, 428, 2931 },
+ [V4L2_COLORSPACE_SMPTE170M][7] = { 551, 547, 547 },
+ [V4L2_COLORSPACE_SMPTE240M][0] = { 2926, 2926, 2926 },
+ [V4L2_COLORSPACE_SMPTE240M][1] = { 2926, 2926, 857 },
+ [V4L2_COLORSPACE_SMPTE240M][2] = { 1594, 2901, 2901 },
+ [V4L2_COLORSPACE_SMPTE240M][3] = { 1594, 2901, 774 },
+ [V4L2_COLORSPACE_SMPTE240M][4] = { 2484, 618, 2858 },
+ [V4L2_COLORSPACE_SMPTE240M][5] = { 2484, 618, 617 },
+ [V4L2_COLORSPACE_SMPTE240M][6] = { 507, 507, 2832 },
+ [V4L2_COLORSPACE_SMPTE240M][7] = { 507, 507, 507 },
+ [V4L2_COLORSPACE_REC709][0] = { 2939, 2939, 2939 },
+ [V4L2_COLORSPACE_REC709][1] = { 2939, 2939, 547 },
+ [V4L2_COLORSPACE_REC709][2] = { 547, 2939, 2939 },
+ [V4L2_COLORSPACE_REC709][3] = { 547, 2939, 547 },
+ [V4L2_COLORSPACE_REC709][4] = { 2939, 547, 2939 },
+ [V4L2_COLORSPACE_REC709][5] = { 2939, 547, 547 },
+ [V4L2_COLORSPACE_REC709][6] = { 547, 547, 2939 },
+ [V4L2_COLORSPACE_REC709][7] = { 547, 547, 547 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][0] = { 2894, 2988, 2808 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][1] = { 2847, 3070, 843 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][2] = { 1656, 2962, 2783 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][3] = { 1572, 3045, 763 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][4] = { 2477, 229, 2743 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][5] = { 2422, 672, 614 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][6] = { 725, 63, 2718 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][7] = { 534, 561, 509 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][0] = { 2939, 2939, 2939 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][1] = { 2939, 2939, 621 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][2] = { 786, 2939, 2939 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][3] = { 786, 2939, 621 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][4] = { 2879, 547, 2923 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][5] = { 2879, 547, 547 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][6] = { 547, 547, 2923 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][7] = { 547, 547, 547 },
+ [V4L2_COLORSPACE_SRGB][0] = { 3056, 3056, 3056 },
+ [V4L2_COLORSPACE_SRGB][1] = { 3056, 3056, 800 },
+ [V4L2_COLORSPACE_SRGB][2] = { 800, 3056, 3056 },
+ [V4L2_COLORSPACE_SRGB][3] = { 800, 3056, 800 },
+ [V4L2_COLORSPACE_SRGB][4] = { 3056, 800, 3056 },
+ [V4L2_COLORSPACE_SRGB][5] = { 3056, 800, 800 },
+ [V4L2_COLORSPACE_SRGB][6] = { 800, 800, 3056 },
+ [V4L2_COLORSPACE_SRGB][7] = { 800, 800, 800 },
+};
+
+#else
+
+/* This code generates the table above */
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+static const double rec709_to_ntsc1953[3][3] = {
+ { 0.6698, 0.2678, 0.0323 },
+ { 0.0185, 1.0742, -0.0603 },
+ { 0.0162, 0.0432, 0.8551 }
+};
+
+static const double rec709_to_ebu[3][3] = {
+ { 0.9578, 0.0422, 0 },
+ { 0 , 1 , 0 },
+ { 0 , 0.0118, 0.9882 }
+};
+
+static const double rec709_to_170m[3][3] = {
+ { 1.0654, -0.0554, -0.0010 },
+ { -0.0196, 1.0364, -0.0167 },
+ { 0.0016, 0.0044, 0.9940 }
+};
+
+static const double rec709_to_240m[3][3] = {
+ { 0.7151, 0.2849, 0 },
+ { 0.0179, 0.9821, 0 },
+ { 0.0177, 0.0472, 0.9350 }
+};
+
+
+static void mult_matrix(double *r, double *g, double *b, const double m[3][3])
+{
+ double ir, ig, ib;
+
+ ir = m[0][0] * (*r) + m[0][1] * (*g) + m[0][2] * (*b);
+ ig = m[1][0] * (*r) + m[1][1] * (*g) + m[1][2] * (*b);
+ ib = m[2][0] * (*r) + m[2][1] * (*g) + m[2][2] * (*b);
+ *r = ir;
+ *g = ig;
+ *b = ib;
+}
+
+static double transfer_srgb_to_rgb(double v)
+{
+ return (v <= 0.03928) ? v / 12.92 : pow((v + 0.055) / 1.055, 2.4);
+}
+
+static double transfer_rgb_to_smpte240m(double v)
+{
+ return (v <= 0.0228) ? v * 4.0 : 1.1115 * pow(v, 0.45) - 0.1115;
+}
+
+static double transfer_rgb_to_rec709(double v)
+{
+ return (v < 0.018) ? v * 4.5 : 1.099 * pow(v, 0.45) - 0.099;
+}
+
+static double transfer_srgb_to_rec709(double v)
+{
+ return transfer_rgb_to_rec709(transfer_srgb_to_rgb(v));
+}
+
+static void csc(enum v4l2_colorspace colorspace, double *r, double *g, double *b)
+{
+ /* Convert the primaries of Rec. 709 Linear RGB */
+ switch (colorspace) {
+ case V4L2_COLORSPACE_SMPTE240M:
+ *r = transfer_srgb_to_rgb(*r);
+ *g = transfer_srgb_to_rgb(*g);
+ *b = transfer_srgb_to_rgb(*b);
+ mult_matrix(r, g, b, rec709_to_240m);
+ break;
+ case V4L2_COLORSPACE_SMPTE170M:
+ *r = transfer_srgb_to_rgb(*r);
+ *g = transfer_srgb_to_rgb(*g);
+ *b = transfer_srgb_to_rgb(*b);
+ mult_matrix(r, g, b, rec709_to_170m);
+ break;
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ *r = transfer_srgb_to_rgb(*r);
+ *g = transfer_srgb_to_rgb(*g);
+ *b = transfer_srgb_to_rgb(*b);
+ mult_matrix(r, g, b, rec709_to_ebu);
+ break;
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ *r = transfer_srgb_to_rgb(*r);
+ *g = transfer_srgb_to_rgb(*g);
+ *b = transfer_srgb_to_rgb(*b);
+ mult_matrix(r, g, b, rec709_to_ntsc1953);
+ break;
+ case V4L2_COLORSPACE_SRGB:
+ case V4L2_COLORSPACE_REC709:
+ default:
+ break;
+ }
+
+ *r = ((*r) < 0) ? 0 : (((*r) > 1) ? 1 : (*r));
+ *g = ((*g) < 0) ? 0 : (((*g) > 1) ? 1 : (*g));
+ *b = ((*b) < 0) ? 0 : (((*b) > 1) ? 1 : (*b));
+
+ /* Encode to gamma corrected colorspace */
+ switch (colorspace) {
+ case V4L2_COLORSPACE_SMPTE240M:
+ *r = transfer_rgb_to_smpte240m(*r);
+ *g = transfer_rgb_to_smpte240m(*g);
+ *b = transfer_rgb_to_smpte240m(*b);
+ break;
+ case V4L2_COLORSPACE_SMPTE170M:
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ *r = transfer_rgb_to_rec709(*r);
+ *g = transfer_rgb_to_rec709(*g);
+ *b = transfer_rgb_to_rec709(*b);
+ break;
+ case V4L2_COLORSPACE_SRGB:
+ break;
+ case V4L2_COLORSPACE_REC709:
+ default:
+ *r = transfer_srgb_to_rec709(*r);
+ *g = transfer_srgb_to_rec709(*g);
+ *b = transfer_srgb_to_rec709(*b);
+ break;
+ }
+}
+
+int main(int argc, char **argv)
+{
+ static const unsigned colorspaces[] = {
+ 0,
+ V4L2_COLORSPACE_SMPTE170M,
+ V4L2_COLORSPACE_SMPTE240M,
+ V4L2_COLORSPACE_REC709,
+ 0,
+ V4L2_COLORSPACE_470_SYSTEM_M,
+ V4L2_COLORSPACE_470_SYSTEM_BG,
+ 0,
+ V4L2_COLORSPACE_SRGB,
+ };
+ static const char * const colorspace_names[] = {
+ "",
+ "V4L2_COLORSPACE_SMPTE170M",
+ "V4L2_COLORSPACE_SMPTE240M",
+ "V4L2_COLORSPACE_REC709",
+ "",
+ "V4L2_COLORSPACE_470_SYSTEM_M",
+ "V4L2_COLORSPACE_470_SYSTEM_BG",
+ "",
+ "V4L2_COLORSPACE_SRGB",
+ };
+ int i;
+ int c;
+
+ printf("/* Generated table */\n");
+ printf("const struct color16 tpg_csc_colors[V4L2_COLORSPACE_SRGB + 1][TPG_COLOR_CSC_BLACK + 1] = {\n");
+ for (c = 0; c <= V4L2_COLORSPACE_SRGB; c++) {
+ for (i = 0; i <= TPG_COLOR_CSC_BLACK; i++) {
+ double r, g, b;
+
+ if (colorspaces[c] == 0)
+ continue;
+
+ r = tpg_colors[i].r / 255.0;
+ g = tpg_colors[i].g / 255.0;
+ b = tpg_colors[i].b / 255.0;
+
+ csc(c, &r, &g, &b);
+
+ printf("\t[%s][%d] = { %d, %d, %d },\n", colorspace_names[c], i,
+ (int)(r * 4080), (int)(g * 4080), (int)(b * 4080));
+ }
+ }
+ printf("};\n\n");
+ return 0;
+}
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-tpg-colors.h b/drivers/media/platform/vivid/vivid-tpg-colors.h
new file mode 100644
index 000000000000..a2678fbec256
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-tpg-colors.h
@@ -0,0 +1,64 @@
+/*
+ * vivid-color.h - Color definitions for the test pattern generator
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_COLORS_H_
+#define _VIVID_COLORS_H_
+
+struct color {
+ unsigned char r, g, b;
+};
+
+struct color16 {
+ int r, g, b;
+};
+
+enum tpg_color {
+ TPG_COLOR_CSC_WHITE,
+ TPG_COLOR_CSC_YELLOW,
+ TPG_COLOR_CSC_CYAN,
+ TPG_COLOR_CSC_GREEN,
+ TPG_COLOR_CSC_MAGENTA,
+ TPG_COLOR_CSC_RED,
+ TPG_COLOR_CSC_BLUE,
+ TPG_COLOR_CSC_BLACK,
+ TPG_COLOR_75_YELLOW,
+ TPG_COLOR_75_CYAN,
+ TPG_COLOR_75_GREEN,
+ TPG_COLOR_75_MAGENTA,
+ TPG_COLOR_75_RED,
+ TPG_COLOR_75_BLUE,
+ TPG_COLOR_100_WHITE,
+ TPG_COLOR_100_YELLOW,
+ TPG_COLOR_100_CYAN,
+ TPG_COLOR_100_GREEN,
+ TPG_COLOR_100_MAGENTA,
+ TPG_COLOR_100_RED,
+ TPG_COLOR_100_BLUE,
+ TPG_COLOR_100_BLACK,
+ TPG_COLOR_TEXTFG,
+ TPG_COLOR_TEXTBG,
+ TPG_COLOR_RANDOM,
+ TPG_COLOR_RAMP,
+ TPG_COLOR_MAX = TPG_COLOR_RAMP + 256
+};
+
+extern const struct color tpg_colors[TPG_COLOR_MAX];
+extern const struct color16 tpg_csc_colors[V4L2_COLORSPACE_SRGB + 1][TPG_COLOR_CSC_BLACK + 1];
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-tpg.c b/drivers/media/platform/vivid/vivid-tpg.c
new file mode 100644
index 000000000000..0c6fa53fa646
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-tpg.c
@@ -0,0 +1,1439 @@
+/*
+ * vivid-tpg.c - Test Pattern Generator
+ *
+ * Note: gen_twopix and tpg_gen_text are based on code from vivi.c. See the
+ * vivi.c source for the copyright information of those functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "vivid-tpg.h"
+
+/* Must remain in sync with enum tpg_pattern */
+const char * const tpg_pattern_strings[] = {
+ "75% Colorbar",
+ "100% Colorbar",
+ "CSC Colorbar",
+ "Horizontal 100% Colorbar",
+ "100% Color Squares",
+ "100% Black",
+ "100% White",
+ "100% Red",
+ "100% Green",
+ "100% Blue",
+ "16x16 Checkers",
+ "1x1 Checkers",
+ "Alternating Hor Lines",
+ "Alternating Vert Lines",
+ "One Pixel Wide Cross",
+ "Two Pixels Wide Cross",
+ "Ten Pixels Wide Cross",
+ "Gray Ramp",
+ "Noise",
+ NULL
+};
+
+/* Must remain in sync with enum tpg_aspect */
+const char * const tpg_aspect_strings[] = {
+ "Source Width x Height",
+ "4x3",
+ "14x9",
+ "16x9",
+ "16x9 Anamorphic",
+ NULL
+};
+
+/*
+ * Sine table: sin[0] = 127 * sin(-180 degrees)
+ * sin[128] = 127 * sin(0 degrees)
+ * sin[256] = 127 * sin(180 degrees)
+ */
+static const s8 sin[257] = {
+ 0, -4, -7, -11, -13, -18, -20, -22, -26, -29, -33, -35, -37, -41, -43, -48,
+ -50, -52, -56, -58, -62, -63, -65, -69, -71, -75, -76, -78, -82, -83, -87, -88,
+ -90, -93, -94, -97, -99, -101, -103, -104, -107, -108, -110, -111, -112, -114, -115, -117,
+ -118, -119, -120, -121, -122, -123, -123, -124, -125, -125, -126, -126, -127, -127, -127, -127,
+ -127, -127, -127, -127, -126, -126, -125, -125, -124, -124, -123, -122, -121, -120, -119, -118,
+ -117, -116, -114, -113, -111, -110, -109, -107, -105, -103, -101, -100, -97, -96, -93, -91,
+ -90, -87, -85, -82, -80, -76, -75, -73, -69, -67, -63, -62, -60, -56, -54, -50,
+ -48, -46, -41, -39, -35, -33, -31, -26, -24, -20, -18, -15, -11, -9, -4, -2,
+ 0, 2, 4, 9, 11, 15, 18, 20, 24, 26, 31, 33, 35, 39, 41, 46,
+ 48, 50, 54, 56, 60, 62, 64, 67, 69, 73, 75, 76, 80, 82, 85, 87,
+ 90, 91, 93, 96, 97, 100, 101, 103, 105, 107, 109, 110, 111, 113, 114, 116,
+ 117, 118, 119, 120, 121, 122, 123, 124, 124, 125, 125, 126, 126, 127, 127, 127,
+ 127, 127, 127, 127, 127, 126, 126, 125, 125, 124, 123, 123, 122, 121, 120, 119,
+ 118, 117, 115, 114, 112, 111, 110, 108, 107, 104, 103, 101, 99, 97, 94, 93,
+ 90, 88, 87, 83, 82, 78, 76, 75, 71, 69, 65, 64, 62, 58, 56, 52,
+ 50, 48, 43, 41, 37, 35, 33, 29, 26, 22, 20, 18, 13, 11, 7, 4,
+ 0,
+};
+
+#define cos(idx) sin[((idx) + 64) % sizeof(sin)]
+
+/* Global font descriptor */
+static const u8 *font8x16;
+
+void tpg_set_font(const u8 *f)
+{
+ font8x16 = f;
+}
+
+void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h)
+{
+ memset(tpg, 0, sizeof(*tpg));
+ tpg->scaled_width = tpg->src_width = w;
+ tpg->src_height = tpg->buf_height = h;
+ tpg->crop.width = tpg->compose.width = w;
+ tpg->crop.height = tpg->compose.height = h;
+ tpg->recalc_colors = true;
+ tpg->recalc_square_border = true;
+ tpg->brightness = 128;
+ tpg->contrast = 128;
+ tpg->saturation = 128;
+ tpg->hue = 0;
+ tpg->mv_hor_mode = TPG_MOVE_NONE;
+ tpg->mv_vert_mode = TPG_MOVE_NONE;
+ tpg->field = V4L2_FIELD_NONE;
+ tpg_s_fourcc(tpg, V4L2_PIX_FMT_RGB24);
+ tpg->colorspace = V4L2_COLORSPACE_SRGB;
+ tpg->perc_fill = 100;
+}
+
+int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
+{
+ unsigned pat;
+ unsigned plane;
+
+ tpg->max_line_width = max_w;
+ for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) {
+ for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+ unsigned pixelsz = plane ? 1 : 4;
+
+ tpg->lines[pat][plane] = vzalloc(max_w * 2 * pixelsz);
+ if (!tpg->lines[pat][plane])
+ return -ENOMEM;
+ }
+ }
+ for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+ unsigned pixelsz = plane ? 1 : 4;
+
+ tpg->contrast_line[plane] = vzalloc(max_w * pixelsz);
+ if (!tpg->contrast_line[plane])
+ return -ENOMEM;
+ tpg->black_line[plane] = vzalloc(max_w * pixelsz);
+ if (!tpg->black_line[plane])
+ return -ENOMEM;
+ tpg->random_line[plane] = vzalloc(max_w * pixelsz);
+ if (!tpg->random_line[plane])
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void tpg_free(struct tpg_data *tpg)
+{
+ unsigned pat;
+ unsigned plane;
+
+ for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++)
+ for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+ vfree(tpg->lines[pat][plane]);
+ tpg->lines[pat][plane] = NULL;
+ }
+ for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+ vfree(tpg->contrast_line[plane]);
+ vfree(tpg->black_line[plane]);
+ vfree(tpg->random_line[plane]);
+ tpg->contrast_line[plane] = NULL;
+ tpg->black_line[plane] = NULL;
+ tpg->random_line[plane] = NULL;
+ }
+}
+
+bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
+{
+ tpg->fourcc = fourcc;
+ tpg->planes = 1;
+ tpg->recalc_colors = true;
+ switch (fourcc) {
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
+ case V4L2_PIX_FMT_RGB555:
+ case V4L2_PIX_FMT_XRGB555:
+ case V4L2_PIX_FMT_ARGB555:
+ case V4L2_PIX_FMT_RGB555X:
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_ARGB32:
+ case V4L2_PIX_FMT_ABGR32:
+ tpg->is_yuv = false;
+ break;
+ case V4L2_PIX_FMT_NV16M:
+ case V4L2_PIX_FMT_NV61M:
+ tpg->planes = 2;
+ /* fall-through */
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ tpg->is_yuv = true;
+ break;
+ default:
+ return false;
+ }
+
+ switch (fourcc) {
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
+ case V4L2_PIX_FMT_RGB555:
+ case V4L2_PIX_FMT_XRGB555:
+ case V4L2_PIX_FMT_ARGB555:
+ case V4L2_PIX_FMT_RGB555X:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ tpg->twopixelsize[0] = 2 * 2;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ tpg->twopixelsize[0] = 2 * 3;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_ARGB32:
+ case V4L2_PIX_FMT_ABGR32:
+ tpg->twopixelsize[0] = 2 * 4;
+ break;
+ case V4L2_PIX_FMT_NV16M:
+ case V4L2_PIX_FMT_NV61M:
+ tpg->twopixelsize[0] = 2;
+ tpg->twopixelsize[1] = 2;
+ break;
+ }
+ return true;
+}
+
+void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop,
+ const struct v4l2_rect *compose)
+{
+ tpg->crop = *crop;
+ tpg->compose = *compose;
+ tpg->scaled_width = (tpg->src_width * tpg->compose.width +
+ tpg->crop.width - 1) / tpg->crop.width;
+ tpg->scaled_width &= ~1;
+ if (tpg->scaled_width > tpg->max_line_width)
+ tpg->scaled_width = tpg->max_line_width;
+ if (tpg->scaled_width < 2)
+ tpg->scaled_width = 2;
+ tpg->recalc_lines = true;
+}
+
+void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height,
+ u32 field)
+{
+ unsigned p;
+
+ tpg->src_width = width;
+ tpg->src_height = height;
+ tpg->field = field;
+ tpg->buf_height = height;
+ if (V4L2_FIELD_HAS_T_OR_B(field))
+ tpg->buf_height /= 2;
+ tpg->scaled_width = width;
+ tpg->crop.top = tpg->crop.left = 0;
+ tpg->crop.width = width;
+ tpg->crop.height = height;
+ tpg->compose.top = tpg->compose.left = 0;
+ tpg->compose.width = width;
+ tpg->compose.height = tpg->buf_height;
+ for (p = 0; p < tpg->planes; p++)
+ tpg->bytesperline[p] = width * tpg->twopixelsize[p] / 2;
+ tpg->recalc_square_border = true;
+}
+
+static enum tpg_color tpg_get_textbg_color(struct tpg_data *tpg)
+{
+ switch (tpg->pattern) {
+ case TPG_PAT_BLACK:
+ return TPG_COLOR_100_WHITE;
+ case TPG_PAT_CSC_COLORBAR:
+ return TPG_COLOR_CSC_BLACK;
+ default:
+ return TPG_COLOR_100_BLACK;
+ }
+}
+
+static enum tpg_color tpg_get_textfg_color(struct tpg_data *tpg)
+{
+ switch (tpg->pattern) {
+ case TPG_PAT_75_COLORBAR:
+ case TPG_PAT_CSC_COLORBAR:
+ return TPG_COLOR_CSC_WHITE;
+ case TPG_PAT_BLACK:
+ return TPG_COLOR_100_BLACK;
+ default:
+ return TPG_COLOR_100_WHITE;
+ }
+}
+
+static u16 color_to_y(struct tpg_data *tpg, int r, int g, int b)
+{
+ switch (tpg->colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ return ((16829 * r + 33039 * g + 6416 * b + 16 * 32768) >> 16) + (16 << 4);
+ case V4L2_COLORSPACE_SMPTE240M:
+ return ((11932 * r + 39455 * g + 4897 * b + 16 * 32768) >> 16) + (16 << 4);
+ case V4L2_COLORSPACE_REC709:
+ case V4L2_COLORSPACE_SRGB:
+ default:
+ return ((11966 * r + 40254 * g + 4064 * b + 16 * 32768) >> 16) + (16 << 4);
+ }
+}
+
+static u16 color_to_cb(struct tpg_data *tpg, int r, int g, int b)
+{
+ switch (tpg->colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ return ((-9714 * r - 19070 * g + 28784 * b + 16 * 32768) >> 16) + (128 << 4);
+ case V4L2_COLORSPACE_SMPTE240M:
+ return ((-6684 * r - 22100 * g + 28784 * b + 16 * 32768) >> 16) + (128 << 4);
+ case V4L2_COLORSPACE_REC709:
+ case V4L2_COLORSPACE_SRGB:
+ default:
+ return ((-6596 * r - 22189 * g + 28784 * b + 16 * 32768) >> 16) + (128 << 4);
+ }
+}
+
+static u16 color_to_cr(struct tpg_data *tpg, int r, int g, int b)
+{
+ switch (tpg->colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ return ((28784 * r - 24103 * g - 4681 * b + 16 * 32768) >> 16) + (128 << 4);
+ case V4L2_COLORSPACE_SMPTE240M:
+ return ((28784 * r - 25606 * g - 3178 * b + 16 * 32768) >> 16) + (128 << 4);
+ case V4L2_COLORSPACE_REC709:
+ case V4L2_COLORSPACE_SRGB:
+ default:
+ return ((28784 * r - 26145 * g - 2639 * b + 16 * 32768) >> 16) + (128 << 4);
+ }
+}
+
+static u16 ycbcr_to_r(struct tpg_data *tpg, int y, int cb, int cr)
+{
+ int r;
+
+ y -= 16 << 4;
+ cb -= 128 << 4;
+ cr -= 128 << 4;
+ switch (tpg->colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ r = 4769 * y + 6537 * cr;
+ break;
+ case V4L2_COLORSPACE_SMPTE240M:
+ r = 4769 * y + 7376 * cr;
+ break;
+ case V4L2_COLORSPACE_REC709:
+ case V4L2_COLORSPACE_SRGB:
+ default:
+ r = 4769 * y + 7343 * cr;
+ break;
+ }
+ return clamp(r >> 12, 0, 0xff0);
+}
+
+static u16 ycbcr_to_g(struct tpg_data *tpg, int y, int cb, int cr)
+{
+ int g;
+
+ y -= 16 << 4;
+ cb -= 128 << 4;
+ cr -= 128 << 4;
+ switch (tpg->colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ g = 4769 * y - 1605 * cb - 3330 * cr;
+ break;
+ case V4L2_COLORSPACE_SMPTE240M:
+ g = 4769 * y - 1055 * cb - 2341 * cr;
+ break;
+ case V4L2_COLORSPACE_REC709:
+ case V4L2_COLORSPACE_SRGB:
+ default:
+ g = 4769 * y - 873 * cb - 2183 * cr;
+ break;
+ }
+ return clamp(g >> 12, 0, 0xff0);
+}
+
+static u16 ycbcr_to_b(struct tpg_data *tpg, int y, int cb, int cr)
+{
+ int b;
+
+ y -= 16 << 4;
+ cb -= 128 << 4;
+ cr -= 128 << 4;
+ switch (tpg->colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ b = 4769 * y + 7343 * cb;
+ break;
+ case V4L2_COLORSPACE_SMPTE240M:
+ b = 4769 * y + 8552 * cb;
+ break;
+ case V4L2_COLORSPACE_REC709:
+ case V4L2_COLORSPACE_SRGB:
+ default:
+ b = 4769 * y + 8652 * cb;
+ break;
+ }
+ return clamp(b >> 12, 0, 0xff0);
+}
+
+/* precalculate color bar values to speed up rendering */
+static void precalculate_color(struct tpg_data *tpg, int k)
+{
+ int col = k;
+ int r = tpg_colors[col].r;
+ int g = tpg_colors[col].g;
+ int b = tpg_colors[col].b;
+
+ if (k == TPG_COLOR_TEXTBG) {
+ col = tpg_get_textbg_color(tpg);
+
+ r = tpg_colors[col].r;
+ g = tpg_colors[col].g;
+ b = tpg_colors[col].b;
+ } else if (k == TPG_COLOR_TEXTFG) {
+ col = tpg_get_textfg_color(tpg);
+
+ r = tpg_colors[col].r;
+ g = tpg_colors[col].g;
+ b = tpg_colors[col].b;
+ } else if (tpg->pattern == TPG_PAT_NOISE) {
+ r = g = b = prandom_u32_max(256);
+ } else if (k == TPG_COLOR_RANDOM) {
+ r = g = b = tpg->qual_offset + prandom_u32_max(196);
+ } else if (k >= TPG_COLOR_RAMP) {
+ r = g = b = k - TPG_COLOR_RAMP;
+ }
+
+ if (tpg->pattern == TPG_PAT_CSC_COLORBAR && col <= TPG_COLOR_CSC_BLACK) {
+ r = tpg_csc_colors[tpg->colorspace][col].r;
+ g = tpg_csc_colors[tpg->colorspace][col].g;
+ b = tpg_csc_colors[tpg->colorspace][col].b;
+ } else {
+ r <<= 4;
+ g <<= 4;
+ b <<= 4;
+ }
+ if (tpg->qual == TPG_QUAL_GRAY)
+ r = g = b = color_to_y(tpg, r, g, b);
+
+ /*
+ * The assumption is that the RGB output is always full range,
+ * so only if the rgb_range overrides the 'real' rgb range do
+ * we need to convert the RGB values.
+ *
+ * Currently there is no way of signalling to userspace if you
+ * are actually giving it limited range RGB (or full range
+ * YUV for that matter).
+ *
+ * Remember that r, g and b are still in the 0 - 0xff0 range.
+ */
+ if (tpg->real_rgb_range == V4L2_DV_RGB_RANGE_LIMITED &&
+ tpg->rgb_range == V4L2_DV_RGB_RANGE_FULL) {
+ /*
+ * Convert from full range (which is what r, g and b are)
+ * to limited range (which is the 'real' RGB range), which
+ * is then interpreted as full range.
+ */
+ r = (r * 219) / 255 + (16 << 4);
+ g = (g * 219) / 255 + (16 << 4);
+ b = (b * 219) / 255 + (16 << 4);
+ } else if (tpg->real_rgb_range != V4L2_DV_RGB_RANGE_LIMITED &&
+ tpg->rgb_range == V4L2_DV_RGB_RANGE_LIMITED) {
+ /*
+ * Clamp r, g and b to the limited range and convert to full
+ * range since that's what we deliver.
+ */
+ r = clamp(r, 16 << 4, 235 << 4);
+ g = clamp(g, 16 << 4, 235 << 4);
+ b = clamp(b, 16 << 4, 235 << 4);
+ r = (r - (16 << 4)) * 255 / 219;
+ g = (g - (16 << 4)) * 255 / 219;
+ b = (b - (16 << 4)) * 255 / 219;
+ }
+
+ if (tpg->brightness != 128 || tpg->contrast != 128 ||
+ tpg->saturation != 128 || tpg->hue) {
+ /* Implement these operations */
+
+ /* First convert to YCbCr */
+ int y = color_to_y(tpg, r, g, b); /* Luma */
+ int cb = color_to_cb(tpg, r, g, b); /* Cb */
+ int cr = color_to_cr(tpg, r, g, b); /* Cr */
+ int tmp_cb, tmp_cr;
+
+ y = (16 << 4) + ((y - (16 << 4)) * tpg->contrast) / 128;
+ y += (tpg->brightness << 4) - (128 << 4);
+
+ cb -= 128 << 4;
+ cr -= 128 << 4;
+ tmp_cb = (cb * cos(128 + tpg->hue)) / 127 + (cr * sin[128 + tpg->hue]) / 127;
+ tmp_cr = (cr * cos(128 + tpg->hue)) / 127 - (cb * sin[128 + tpg->hue]) / 127;
+
+ cb = (128 << 4) + (tmp_cb * tpg->contrast * tpg->saturation) / (128 * 128);
+ cr = (128 << 4) + (tmp_cr * tpg->contrast * tpg->saturation) / (128 * 128);
+ if (tpg->is_yuv) {
+ tpg->colors[k][0] = clamp(y >> 4, 1, 254);
+ tpg->colors[k][1] = clamp(cb >> 4, 1, 254);
+ tpg->colors[k][2] = clamp(cr >> 4, 1, 254);
+ return;
+ }
+ r = ycbcr_to_r(tpg, y, cb, cr);
+ g = ycbcr_to_g(tpg, y, cb, cr);
+ b = ycbcr_to_b(tpg, y, cb, cr);
+ }
+
+ if (tpg->is_yuv) {
+ /* Convert to YCbCr */
+ u16 y = color_to_y(tpg, r, g, b); /* Luma */
+ u16 cb = color_to_cb(tpg, r, g, b); /* Cb */
+ u16 cr = color_to_cr(tpg, r, g, b); /* Cr */
+
+ tpg->colors[k][0] = clamp(y >> 4, 1, 254);
+ tpg->colors[k][1] = clamp(cb >> 4, 1, 254);
+ tpg->colors[k][2] = clamp(cr >> 4, 1, 254);
+ } else {
+ switch (tpg->fourcc) {
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB565X:
+ r >>= 7;
+ g >>= 6;
+ b >>= 7;
+ break;
+ case V4L2_PIX_FMT_RGB555:
+ case V4L2_PIX_FMT_XRGB555:
+ case V4L2_PIX_FMT_ARGB555:
+ case V4L2_PIX_FMT_RGB555X:
+ r >>= 7;
+ g >>= 7;
+ b >>= 7;
+ break;
+ default:
+ r >>= 4;
+ g >>= 4;
+ b >>= 4;
+ break;
+ }
+
+ tpg->colors[k][0] = r;
+ tpg->colors[k][1] = g;
+ tpg->colors[k][2] = b;
+ }
+}
+
+static void tpg_precalculate_colors(struct tpg_data *tpg)
+{
+ int k;
+
+ for (k = 0; k < TPG_COLOR_MAX; k++)
+ precalculate_color(tpg, k);
+}
+
+/* 'odd' is true for pixels 1, 3, 5, etc. and false for pixels 0, 2, 4, etc. */
+static void gen_twopix(struct tpg_data *tpg,
+ u8 buf[TPG_MAX_PLANES][8], int color, bool odd)
+{
+ unsigned offset = odd * tpg->twopixelsize[0] / 2;
+ u8 alpha = tpg->alpha_component;
+ u8 r_y, g_u, b_v;
+
+ if (tpg->alpha_red_only && color != TPG_COLOR_CSC_RED &&
+ color != TPG_COLOR_100_RED &&
+ color != TPG_COLOR_75_RED)
+ alpha = 0;
+ if (color == TPG_COLOR_RANDOM)
+ precalculate_color(tpg, color);
+ r_y = tpg->colors[color][0]; /* R or precalculated Y */
+ g_u = tpg->colors[color][1]; /* G or precalculated U */
+ b_v = tpg->colors[color][2]; /* B or precalculated V */
+
+ switch (tpg->fourcc) {
+ case V4L2_PIX_FMT_NV16M:
+ buf[0][offset] = r_y;
+ buf[1][offset] = odd ? b_v : g_u;
+ break;
+ case V4L2_PIX_FMT_NV61M:
+ buf[0][offset] = r_y;
+ buf[1][offset] = odd ? g_u : b_v;
+ break;
+
+ case V4L2_PIX_FMT_YUYV:
+ buf[0][offset] = r_y;
+ buf[0][offset + 1] = odd ? b_v : g_u;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ buf[0][offset] = odd ? b_v : g_u;
+ buf[0][offset + 1] = r_y;
+ break;
+ case V4L2_PIX_FMT_YVYU:
+ buf[0][offset] = r_y;
+ buf[0][offset + 1] = odd ? g_u : b_v;
+ break;
+ case V4L2_PIX_FMT_VYUY:
+ buf[0][offset] = odd ? g_u : b_v;
+ buf[0][offset + 1] = r_y;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ buf[0][offset] = (g_u << 5) | b_v;
+ buf[0][offset + 1] = (r_y << 3) | (g_u >> 3);
+ break;
+ case V4L2_PIX_FMT_RGB565X:
+ buf[0][offset] = (r_y << 3) | (g_u >> 3);
+ buf[0][offset + 1] = (g_u << 5) | b_v;
+ break;
+ case V4L2_PIX_FMT_RGB555:
+ case V4L2_PIX_FMT_XRGB555:
+ alpha = 0;
+ /* fall through */
+ case V4L2_PIX_FMT_ARGB555:
+ buf[0][offset] = (g_u << 5) | b_v;
+ buf[0][offset + 1] = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
+ break;
+ case V4L2_PIX_FMT_RGB555X:
+ buf[0][offset] = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
+ buf[0][offset + 1] = (g_u << 5) | b_v;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ buf[0][offset] = r_y;
+ buf[0][offset + 1] = g_u;
+ buf[0][offset + 2] = b_v;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ buf[0][offset] = b_v;
+ buf[0][offset + 1] = g_u;
+ buf[0][offset + 2] = r_y;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_XRGB32:
+ alpha = 0;
+ /* fall through */
+ case V4L2_PIX_FMT_ARGB32:
+ buf[0][offset] = alpha;
+ buf[0][offset + 1] = r_y;
+ buf[0][offset + 2] = g_u;
+ buf[0][offset + 3] = b_v;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_XBGR32:
+ alpha = 0;
+ /* fall through */
+ case V4L2_PIX_FMT_ABGR32:
+ buf[0][offset] = b_v;
+ buf[0][offset + 1] = g_u;
+ buf[0][offset + 2] = r_y;
+ buf[0][offset + 3] = alpha;
+ break;
+ }
+}
+
+/* Return how many pattern lines are used by the current pattern. */
+static unsigned tpg_get_pat_lines(struct tpg_data *tpg)
+{
+ switch (tpg->pattern) {
+ case TPG_PAT_CHECKERS_16X16:
+ case TPG_PAT_CHECKERS_1X1:
+ case TPG_PAT_ALTERNATING_HLINES:
+ case TPG_PAT_CROSS_1_PIXEL:
+ case TPG_PAT_CROSS_2_PIXELS:
+ case TPG_PAT_CROSS_10_PIXELS:
+ return 2;
+ case TPG_PAT_100_COLORSQUARES:
+ case TPG_PAT_100_HCOLORBAR:
+ return 8;
+ default:
+ return 1;
+ }
+}
+
+/* Which pattern line should be used for the given frame line. */
+static unsigned tpg_get_pat_line(struct tpg_data *tpg, unsigned line)
+{
+ switch (tpg->pattern) {
+ case TPG_PAT_CHECKERS_16X16:
+ return (line >> 4) & 1;
+ case TPG_PAT_CHECKERS_1X1:
+ case TPG_PAT_ALTERNATING_HLINES:
+ return line & 1;
+ case TPG_PAT_100_COLORSQUARES:
+ case TPG_PAT_100_HCOLORBAR:
+ return (line * 8) / tpg->src_height;
+ case TPG_PAT_CROSS_1_PIXEL:
+ return line == tpg->src_height / 2;
+ case TPG_PAT_CROSS_2_PIXELS:
+ return (line + 1) / 2 == tpg->src_height / 4;
+ case TPG_PAT_CROSS_10_PIXELS:
+ return (line + 10) / 20 == tpg->src_height / 40;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Which color should be used for the given pattern line and X coordinate.
+ * Note: x is in the range 0 to 2 * tpg->src_width.
+ */
+static enum tpg_color tpg_get_color(struct tpg_data *tpg, unsigned pat_line, unsigned x)
+{
+ /* Maximum number of bars are TPG_COLOR_MAX - otherwise, the input print code
+ should be modified */
+ static const enum tpg_color bars[3][8] = {
+ /* Standard ITU-R 75% color bar sequence */
+ { TPG_COLOR_CSC_WHITE, TPG_COLOR_75_YELLOW,
+ TPG_COLOR_75_CYAN, TPG_COLOR_75_GREEN,
+ TPG_COLOR_75_MAGENTA, TPG_COLOR_75_RED,
+ TPG_COLOR_75_BLUE, TPG_COLOR_100_BLACK, },
+ /* Standard ITU-R 100% color bar sequence */
+ { TPG_COLOR_100_WHITE, TPG_COLOR_100_YELLOW,
+ TPG_COLOR_100_CYAN, TPG_COLOR_100_GREEN,
+ TPG_COLOR_100_MAGENTA, TPG_COLOR_100_RED,
+ TPG_COLOR_100_BLUE, TPG_COLOR_100_BLACK, },
+ /* Color bar sequence suitable to test CSC */
+ { TPG_COLOR_CSC_WHITE, TPG_COLOR_CSC_YELLOW,
+ TPG_COLOR_CSC_CYAN, TPG_COLOR_CSC_GREEN,
+ TPG_COLOR_CSC_MAGENTA, TPG_COLOR_CSC_RED,
+ TPG_COLOR_CSC_BLUE, TPG_COLOR_CSC_BLACK, },
+ };
+
+ switch (tpg->pattern) {
+ case TPG_PAT_75_COLORBAR:
+ case TPG_PAT_100_COLORBAR:
+ case TPG_PAT_CSC_COLORBAR:
+ return bars[tpg->pattern][((x * 8) / tpg->src_width) % 8];
+ case TPG_PAT_100_COLORSQUARES:
+ return bars[1][(pat_line + (x * 8) / tpg->src_width) % 8];
+ case TPG_PAT_100_HCOLORBAR:
+ return bars[1][pat_line];
+ case TPG_PAT_BLACK:
+ return TPG_COLOR_100_BLACK;
+ case TPG_PAT_WHITE:
+ return TPG_COLOR_100_WHITE;
+ case TPG_PAT_RED:
+ return TPG_COLOR_100_RED;
+ case TPG_PAT_GREEN:
+ return TPG_COLOR_100_GREEN;
+ case TPG_PAT_BLUE:
+ return TPG_COLOR_100_BLUE;
+ case TPG_PAT_CHECKERS_16X16:
+ return (((x >> 4) & 1) ^ (pat_line & 1)) ?
+ TPG_COLOR_100_BLACK : TPG_COLOR_100_WHITE;
+ case TPG_PAT_CHECKERS_1X1:
+ return ((x & 1) ^ (pat_line & 1)) ?
+ TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK;
+ case TPG_PAT_ALTERNATING_HLINES:
+ return pat_line ? TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK;
+ case TPG_PAT_ALTERNATING_VLINES:
+ return (x & 1) ? TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK;
+ case TPG_PAT_CROSS_1_PIXEL:
+ if (pat_line || (x % tpg->src_width) == tpg->src_width / 2)
+ return TPG_COLOR_100_BLACK;
+ return TPG_COLOR_100_WHITE;
+ case TPG_PAT_CROSS_2_PIXELS:
+ if (pat_line || ((x % tpg->src_width) + 1) / 2 == tpg->src_width / 4)
+ return TPG_COLOR_100_BLACK;
+ return TPG_COLOR_100_WHITE;
+ case TPG_PAT_CROSS_10_PIXELS:
+ if (pat_line || ((x % tpg->src_width) + 10) / 20 == tpg->src_width / 40)
+ return TPG_COLOR_100_BLACK;
+ return TPG_COLOR_100_WHITE;
+ case TPG_PAT_GRAY_RAMP:
+ return TPG_COLOR_RAMP + ((x % tpg->src_width) * 256) / tpg->src_width;
+ default:
+ return TPG_COLOR_100_RED;
+ }
+}
+
+/*
+ * Given the pixel aspect ratio and video aspect ratio calculate the
+ * coordinates of a centered square and the coordinates of the border of
+ * the active video area. The coordinates are relative to the source
+ * frame rectangle.
+ */
+static void tpg_calculate_square_border(struct tpg_data *tpg)
+{
+ unsigned w = tpg->src_width;
+ unsigned h = tpg->src_height;
+ unsigned sq_w, sq_h;
+
+ sq_w = (w * 2 / 5) & ~1;
+ if (((w - sq_w) / 2) & 1)
+ sq_w += 2;
+ sq_h = sq_w;
+ tpg->square.width = sq_w;
+ if (tpg->vid_aspect == TPG_VIDEO_ASPECT_16X9_ANAMORPHIC) {
+ unsigned ana_sq_w = (sq_w / 4) * 3;
+
+ if (((w - ana_sq_w) / 2) & 1)
+ ana_sq_w += 2;
+ tpg->square.width = ana_sq_w;
+ }
+ tpg->square.left = (w - tpg->square.width) / 2;
+ if (tpg->pix_aspect == TPG_PIXEL_ASPECT_NTSC)
+ sq_h = sq_w * 10 / 11;
+ else if (tpg->pix_aspect == TPG_PIXEL_ASPECT_PAL)
+ sq_h = sq_w * 59 / 54;
+ tpg->square.height = sq_h;
+ tpg->square.top = (h - sq_h) / 2;
+ tpg->border.left = 0;
+ tpg->border.width = w;
+ tpg->border.top = 0;
+ tpg->border.height = h;
+ switch (tpg->vid_aspect) {
+ case TPG_VIDEO_ASPECT_4X3:
+ if (tpg->pix_aspect)
+ return;
+ if (3 * w >= 4 * h) {
+ tpg->border.width = ((4 * h) / 3) & ~1;
+ if (((w - tpg->border.width) / 2) & ~1)
+ tpg->border.width -= 2;
+ tpg->border.left = (w - tpg->border.width) / 2;
+ break;
+ }
+ tpg->border.height = ((3 * w) / 4) & ~1;
+ tpg->border.top = (h - tpg->border.height) / 2;
+ break;
+ case TPG_VIDEO_ASPECT_14X9_CENTRE:
+ if (tpg->pix_aspect) {
+ tpg->border.height = tpg->pix_aspect == TPG_PIXEL_ASPECT_NTSC ? 420 : 506;
+ tpg->border.top = (h - tpg->border.height) / 2;
+ break;
+ }
+ if (9 * w >= 14 * h) {
+ tpg->border.width = ((14 * h) / 9) & ~1;
+ if (((w - tpg->border.width) / 2) & ~1)
+ tpg->border.width -= 2;
+ tpg->border.left = (w - tpg->border.width) / 2;
+ break;
+ }
+ tpg->border.height = ((9 * w) / 14) & ~1;
+ tpg->border.top = (h - tpg->border.height) / 2;
+ break;
+ case TPG_VIDEO_ASPECT_16X9_CENTRE:
+ if (tpg->pix_aspect) {
+ tpg->border.height = tpg->pix_aspect == TPG_PIXEL_ASPECT_NTSC ? 368 : 442;
+ tpg->border.top = (h - tpg->border.height) / 2;
+ break;
+ }
+ if (9 * w >= 16 * h) {
+ tpg->border.width = ((16 * h) / 9) & ~1;
+ if (((w - tpg->border.width) / 2) & ~1)
+ tpg->border.width -= 2;
+ tpg->border.left = (w - tpg->border.width) / 2;
+ break;
+ }
+ tpg->border.height = ((9 * w) / 16) & ~1;
+ tpg->border.top = (h - tpg->border.height) / 2;
+ break;
+ default:
+ break;
+ }
+}
+
+static void tpg_precalculate_line(struct tpg_data *tpg)
+{
+ enum tpg_color contrast;
+ unsigned pat;
+ unsigned p;
+ unsigned x;
+
+ switch (tpg->pattern) {
+ case TPG_PAT_GREEN:
+ contrast = TPG_COLOR_100_RED;
+ break;
+ case TPG_PAT_CSC_COLORBAR:
+ contrast = TPG_COLOR_CSC_GREEN;
+ break;
+ default:
+ contrast = TPG_COLOR_100_GREEN;
+ break;
+ }
+
+ for (pat = 0; pat < tpg_get_pat_lines(tpg); pat++) {
+ /* Coarse scaling with Bresenham */
+ unsigned int_part = tpg->src_width / tpg->scaled_width;
+ unsigned fract_part = tpg->src_width % tpg->scaled_width;
+ unsigned src_x = 0;
+ unsigned error = 0;
+
+ for (x = 0; x < tpg->scaled_width * 2; x += 2) {
+ unsigned real_x = src_x;
+ enum tpg_color color1, color2;
+ u8 pix[TPG_MAX_PLANES][8];
+
+ real_x = tpg->hflip ? tpg->src_width * 2 - real_x - 2 : real_x;
+ color1 = tpg_get_color(tpg, pat, real_x);
+
+ src_x += int_part;
+ error += fract_part;
+ if (error >= tpg->scaled_width) {
+ error -= tpg->scaled_width;
+ src_x++;
+ }
+
+ real_x = src_x;
+ real_x = tpg->hflip ? tpg->src_width * 2 - real_x - 2 : real_x;
+ color2 = tpg_get_color(tpg, pat, real_x);
+
+ src_x += int_part;
+ error += fract_part;
+ if (error >= tpg->scaled_width) {
+ error -= tpg->scaled_width;
+ src_x++;
+ }
+
+ gen_twopix(tpg, pix, tpg->hflip ? color2 : color1, 0);
+ gen_twopix(tpg, pix, tpg->hflip ? color1 : color2, 1);
+ for (p = 0; p < tpg->planes; p++) {
+ unsigned twopixsize = tpg->twopixelsize[p];
+ u8 *pos = tpg->lines[pat][p] + x * twopixsize / 2;
+
+ memcpy(pos, pix[p], twopixsize);
+ }
+ }
+ }
+ for (x = 0; x < tpg->scaled_width; x += 2) {
+ u8 pix[TPG_MAX_PLANES][8];
+
+ gen_twopix(tpg, pix, contrast, 0);
+ gen_twopix(tpg, pix, contrast, 1);
+ for (p = 0; p < tpg->planes; p++) {
+ unsigned twopixsize = tpg->twopixelsize[p];
+ u8 *pos = tpg->contrast_line[p] + x * twopixsize / 2;
+
+ memcpy(pos, pix[p], twopixsize);
+ }
+ }
+ for (x = 0; x < tpg->scaled_width; x += 2) {
+ u8 pix[TPG_MAX_PLANES][8];
+
+ gen_twopix(tpg, pix, TPG_COLOR_100_BLACK, 0);
+ gen_twopix(tpg, pix, TPG_COLOR_100_BLACK, 1);
+ for (p = 0; p < tpg->planes; p++) {
+ unsigned twopixsize = tpg->twopixelsize[p];
+ u8 *pos = tpg->black_line[p] + x * twopixsize / 2;
+
+ memcpy(pos, pix[p], twopixsize);
+ }
+ }
+ for (x = 0; x < tpg->scaled_width * 2; x += 2) {
+ u8 pix[TPG_MAX_PLANES][8];
+
+ gen_twopix(tpg, pix, TPG_COLOR_RANDOM, 0);
+ gen_twopix(tpg, pix, TPG_COLOR_RANDOM, 1);
+ for (p = 0; p < tpg->planes; p++) {
+ unsigned twopixsize = tpg->twopixelsize[p];
+ u8 *pos = tpg->random_line[p] + x * twopixsize / 2;
+
+ memcpy(pos, pix[p], twopixsize);
+ }
+ }
+ gen_twopix(tpg, tpg->textbg, TPG_COLOR_TEXTBG, 0);
+ gen_twopix(tpg, tpg->textbg, TPG_COLOR_TEXTBG, 1);
+ gen_twopix(tpg, tpg->textfg, TPG_COLOR_TEXTFG, 0);
+ gen_twopix(tpg, tpg->textfg, TPG_COLOR_TEXTFG, 1);
+}
+
+/* need this to do rgb24 rendering */
+typedef struct { u16 __; u8 _; } __packed x24;
+
+void tpg_gen_text(struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
+ int y, int x, char *text)
+{
+ int line;
+ unsigned step = V4L2_FIELD_HAS_T_OR_B(tpg->field) ? 2 : 1;
+ unsigned div = step;
+ unsigned first = 0;
+ unsigned len = strlen(text);
+ unsigned p;
+
+ if (font8x16 == NULL || basep == NULL)
+ return;
+
+ /* Checks if it is possible to show string */
+ if (y + 16 >= tpg->compose.height || x + 8 >= tpg->compose.width)
+ return;
+
+ if (len > (tpg->compose.width - x) / 8)
+ len = (tpg->compose.width - x) / 8;
+ if (tpg->vflip)
+ y = tpg->compose.height - y - 16;
+ if (tpg->hflip)
+ x = tpg->compose.width - x - 8;
+ y += tpg->compose.top;
+ x += tpg->compose.left;
+ if (tpg->field == V4L2_FIELD_BOTTOM)
+ first = 1;
+ else if (tpg->field == V4L2_FIELD_SEQ_TB || tpg->field == V4L2_FIELD_SEQ_BT)
+ div = 2;
+
+ for (p = 0; p < tpg->planes; p++) {
+ /* Print stream time */
+#define PRINTSTR(PIXTYPE) do { \
+ PIXTYPE fg; \
+ PIXTYPE bg; \
+ memcpy(&fg, tpg->textfg[p], sizeof(PIXTYPE)); \
+ memcpy(&bg, tpg->textbg[p], sizeof(PIXTYPE)); \
+ \
+ for (line = first; line < 16; line += step) { \
+ int l = tpg->vflip ? 15 - line : line; \
+ PIXTYPE *pos = (PIXTYPE *)(basep[p][line & 1] + \
+ ((y * step + l) / div) * tpg->bytesperline[p] + \
+ x * sizeof(PIXTYPE)); \
+ unsigned s; \
+ \
+ for (s = 0; s < len; s++) { \
+ u8 chr = font8x16[text[s] * 16 + line]; \
+ \
+ if (tpg->hflip) { \
+ pos[7] = (chr & (0x01 << 7) ? fg : bg); \
+ pos[6] = (chr & (0x01 << 6) ? fg : bg); \
+ pos[5] = (chr & (0x01 << 5) ? fg : bg); \
+ pos[4] = (chr & (0x01 << 4) ? fg : bg); \
+ pos[3] = (chr & (0x01 << 3) ? fg : bg); \
+ pos[2] = (chr & (0x01 << 2) ? fg : bg); \
+ pos[1] = (chr & (0x01 << 1) ? fg : bg); \
+ pos[0] = (chr & (0x01 << 0) ? fg : bg); \
+ } else { \
+ pos[0] = (chr & (0x01 << 7) ? fg : bg); \
+ pos[1] = (chr & (0x01 << 6) ? fg : bg); \
+ pos[2] = (chr & (0x01 << 5) ? fg : bg); \
+ pos[3] = (chr & (0x01 << 4) ? fg : bg); \
+ pos[4] = (chr & (0x01 << 3) ? fg : bg); \
+ pos[5] = (chr & (0x01 << 2) ? fg : bg); \
+ pos[6] = (chr & (0x01 << 1) ? fg : bg); \
+ pos[7] = (chr & (0x01 << 0) ? fg : bg); \
+ } \
+ \
+ pos += tpg->hflip ? -8 : 8; \
+ } \
+ } \
+} while (0)
+
+ switch (tpg->twopixelsize[p]) {
+ case 2:
+ PRINTSTR(u8); break;
+ case 4:
+ PRINTSTR(u16); break;
+ case 6:
+ PRINTSTR(x24); break;
+ case 8:
+ PRINTSTR(u32); break;
+ }
+ }
+}
+
+void tpg_update_mv_step(struct tpg_data *tpg)
+{
+ int factor = tpg->mv_hor_mode > TPG_MOVE_NONE ? -1 : 1;
+
+ if (tpg->hflip)
+ factor = -factor;
+ switch (tpg->mv_hor_mode) {
+ case TPG_MOVE_NEG_FAST:
+ case TPG_MOVE_POS_FAST:
+ tpg->mv_hor_step = ((tpg->src_width + 319) / 320) * 4;
+ break;
+ case TPG_MOVE_NEG:
+ case TPG_MOVE_POS:
+ tpg->mv_hor_step = ((tpg->src_width + 639) / 640) * 4;
+ break;
+ case TPG_MOVE_NEG_SLOW:
+ case TPG_MOVE_POS_SLOW:
+ tpg->mv_hor_step = 2;
+ break;
+ case TPG_MOVE_NONE:
+ tpg->mv_hor_step = 0;
+ break;
+ }
+ if (factor < 0)
+ tpg->mv_hor_step = tpg->src_width - tpg->mv_hor_step;
+
+ factor = tpg->mv_vert_mode > TPG_MOVE_NONE ? -1 : 1;
+ switch (tpg->mv_vert_mode) {
+ case TPG_MOVE_NEG_FAST:
+ case TPG_MOVE_POS_FAST:
+ tpg->mv_vert_step = ((tpg->src_width + 319) / 320) * 4;
+ break;
+ case TPG_MOVE_NEG:
+ case TPG_MOVE_POS:
+ tpg->mv_vert_step = ((tpg->src_width + 639) / 640) * 4;
+ break;
+ case TPG_MOVE_NEG_SLOW:
+ case TPG_MOVE_POS_SLOW:
+ tpg->mv_vert_step = 1;
+ break;
+ case TPG_MOVE_NONE:
+ tpg->mv_vert_step = 0;
+ break;
+ }
+ if (factor < 0)
+ tpg->mv_vert_step = tpg->src_height - tpg->mv_vert_step;
+}
+
+/* Map the line number relative to the crop rectangle to a frame line number */
+static unsigned tpg_calc_frameline(struct tpg_data *tpg, unsigned src_y,
+ unsigned field)
+{
+ switch (field) {
+ case V4L2_FIELD_TOP:
+ return tpg->crop.top + src_y * 2;
+ case V4L2_FIELD_BOTTOM:
+ return tpg->crop.top + src_y * 2 + 1;
+ default:
+ return src_y + tpg->crop.top;
+ }
+}
+
+/*
+ * Map the line number relative to the compose rectangle to a destination
+ * buffer line number.
+ */
+static unsigned tpg_calc_buffer_line(struct tpg_data *tpg, unsigned y,
+ unsigned field)
+{
+ y += tpg->compose.top;
+ switch (field) {
+ case V4L2_FIELD_SEQ_TB:
+ if (y & 1)
+ return tpg->buf_height / 2 + y / 2;
+ return y / 2;
+ case V4L2_FIELD_SEQ_BT:
+ if (y & 1)
+ return y / 2;
+ return tpg->buf_height / 2 + y / 2;
+ default:
+ return y;
+ }
+}
+
+static void tpg_recalc(struct tpg_data *tpg)
+{
+ if (tpg->recalc_colors) {
+ tpg->recalc_colors = false;
+ tpg->recalc_lines = true;
+ tpg_precalculate_colors(tpg);
+ }
+ if (tpg->recalc_square_border) {
+ tpg->recalc_square_border = false;
+ tpg_calculate_square_border(tpg);
+ }
+ if (tpg->recalc_lines) {
+ tpg->recalc_lines = false;
+ tpg_precalculate_line(tpg);
+ }
+}
+
+void tpg_calc_text_basep(struct tpg_data *tpg,
+ u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf)
+{
+ unsigned stride = tpg->bytesperline[p];
+
+ tpg_recalc(tpg);
+
+ basep[p][0] = vbuf;
+ basep[p][1] = vbuf;
+ if (tpg->field == V4L2_FIELD_SEQ_TB)
+ basep[p][1] += tpg->buf_height * stride / 2;
+ else if (tpg->field == V4L2_FIELD_SEQ_BT)
+ basep[p][0] += tpg->buf_height * stride / 2;
+}
+
+void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std, unsigned p, u8 *vbuf)
+{
+ bool is_tv = std;
+ bool is_60hz = is_tv && (std & V4L2_STD_525_60);
+ unsigned mv_hor_old = tpg->mv_hor_count % tpg->src_width;
+ unsigned mv_hor_new = (tpg->mv_hor_count + tpg->mv_hor_step) % tpg->src_width;
+ unsigned mv_vert_old = tpg->mv_vert_count % tpg->src_height;
+ unsigned mv_vert_new = (tpg->mv_vert_count + tpg->mv_vert_step) % tpg->src_height;
+ unsigned wss_width;
+ unsigned f;
+ int hmax = (tpg->compose.height * tpg->perc_fill) / 100;
+ int h;
+ unsigned twopixsize = tpg->twopixelsize[p];
+ unsigned img_width = tpg->compose.width * twopixsize / 2;
+ unsigned line_offset;
+ unsigned left_pillar_width = 0;
+ unsigned right_pillar_start = img_width;
+ unsigned stride = tpg->bytesperline[p];
+ unsigned factor = V4L2_FIELD_HAS_T_OR_B(tpg->field) ? 2 : 1;
+ u8 *orig_vbuf = vbuf;
+
+ /* Coarse scaling with Bresenham */
+ unsigned int_part = (tpg->crop.height / factor) / tpg->compose.height;
+ unsigned fract_part = (tpg->crop.height / factor) % tpg->compose.height;
+ unsigned src_y = 0;
+ unsigned error = 0;
+
+ tpg_recalc(tpg);
+
+ mv_hor_old = (mv_hor_old * tpg->scaled_width / tpg->src_width) & ~1;
+ mv_hor_new = (mv_hor_new * tpg->scaled_width / tpg->src_width) & ~1;
+ wss_width = tpg->crop.left < tpg->src_width / 2 ?
+ tpg->src_width / 2 - tpg->crop.left : 0;
+ if (wss_width > tpg->crop.width)
+ wss_width = tpg->crop.width;
+ wss_width = wss_width * tpg->scaled_width / tpg->src_width;
+
+ vbuf += tpg->compose.left * twopixsize / 2;
+ line_offset = tpg->crop.left * tpg->scaled_width / tpg->src_width;
+ line_offset = (line_offset & ~1) * twopixsize / 2;
+ if (tpg->crop.left < tpg->border.left) {
+ left_pillar_width = tpg->border.left - tpg->crop.left;
+ if (left_pillar_width > tpg->crop.width)
+ left_pillar_width = tpg->crop.width;
+ left_pillar_width = (left_pillar_width * tpg->scaled_width) / tpg->src_width;
+ left_pillar_width = (left_pillar_width & ~1) * twopixsize / 2;
+ }
+ if (tpg->crop.left + tpg->crop.width > tpg->border.left + tpg->border.width) {
+ right_pillar_start = tpg->border.left + tpg->border.width - tpg->crop.left;
+ right_pillar_start = (right_pillar_start * tpg->scaled_width) / tpg->src_width;
+ right_pillar_start = (right_pillar_start & ~1) * twopixsize / 2;
+ if (right_pillar_start > img_width)
+ right_pillar_start = img_width;
+ }
+
+ f = tpg->field == (is_60hz ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM);
+
+ for (h = 0; h < tpg->compose.height; h++) {
+ bool even;
+ bool fill_blank = false;
+ unsigned frame_line;
+ unsigned buf_line;
+ unsigned pat_line_old;
+ unsigned pat_line_new;
+ u8 *linestart_older;
+ u8 *linestart_newer;
+ u8 *linestart_top;
+ u8 *linestart_bottom;
+
+ frame_line = tpg_calc_frameline(tpg, src_y, tpg->field);
+ even = !(frame_line & 1);
+ buf_line = tpg_calc_buffer_line(tpg, h, tpg->field);
+ src_y += int_part;
+ error += fract_part;
+ if (error >= tpg->compose.height) {
+ error -= tpg->compose.height;
+ src_y++;
+ }
+
+ if (h >= hmax) {
+ if (hmax == tpg->compose.height)
+ continue;
+ if (!tpg->perc_fill_blank)
+ continue;
+ fill_blank = true;
+ }
+
+ if (tpg->vflip)
+ frame_line = tpg->src_height - frame_line - 1;
+
+ if (fill_blank) {
+ linestart_older = tpg->contrast_line[p];
+ linestart_newer = tpg->contrast_line[p];
+ } else if (tpg->qual != TPG_QUAL_NOISE &&
+ (frame_line < tpg->border.top ||
+ frame_line >= tpg->border.top + tpg->border.height)) {
+ linestart_older = tpg->black_line[p];
+ linestart_newer = tpg->black_line[p];
+ } else if (tpg->pattern == TPG_PAT_NOISE || tpg->qual == TPG_QUAL_NOISE) {
+ linestart_older = tpg->random_line[p] +
+ twopixsize * prandom_u32_max(tpg->src_width / 2);
+ linestart_newer = tpg->random_line[p] +
+ twopixsize * prandom_u32_max(tpg->src_width / 2);
+ } else {
+ pat_line_old = tpg_get_pat_line(tpg,
+ (frame_line + mv_vert_old) % tpg->src_height);
+ pat_line_new = tpg_get_pat_line(tpg,
+ (frame_line + mv_vert_new) % tpg->src_height);
+ linestart_older = tpg->lines[pat_line_old][p] +
+ mv_hor_old * twopixsize / 2;
+ linestart_newer = tpg->lines[pat_line_new][p] +
+ mv_hor_new * twopixsize / 2;
+ linestart_older += line_offset;
+ linestart_newer += line_offset;
+ }
+ if (is_60hz) {
+ linestart_top = linestart_newer;
+ linestart_bottom = linestart_older;
+ } else {
+ linestart_top = linestart_older;
+ linestart_bottom = linestart_newer;
+ }
+
+ switch (tpg->field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ if (even)
+ memcpy(vbuf + buf_line * stride, linestart_top, img_width);
+ else
+ memcpy(vbuf + buf_line * stride, linestart_bottom, img_width);
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ if (even)
+ memcpy(vbuf + buf_line * stride, linestart_bottom, img_width);
+ else
+ memcpy(vbuf + buf_line * stride, linestart_top, img_width);
+ break;
+ case V4L2_FIELD_TOP:
+ memcpy(vbuf + buf_line * stride, linestart_top, img_width);
+ break;
+ case V4L2_FIELD_BOTTOM:
+ memcpy(vbuf + buf_line * stride, linestart_bottom, img_width);
+ break;
+ case V4L2_FIELD_NONE:
+ default:
+ memcpy(vbuf + buf_line * stride, linestart_older, img_width);
+ break;
+ }
+
+ if (is_tv && !is_60hz && frame_line == 0 && wss_width) {
+ /*
+ * Replace the first half of the top line of a 50 Hz frame
+ * with random data to simulate a WSS signal.
+ */
+ u8 *wss = tpg->random_line[p] +
+ twopixsize * prandom_u32_max(tpg->src_width / 2);
+
+ memcpy(vbuf + buf_line * stride, wss, wss_width * twopixsize / 2);
+ }
+ }
+
+ vbuf = orig_vbuf;
+ vbuf += tpg->compose.left * twopixsize / 2;
+ src_y = 0;
+ error = 0;
+ for (h = 0; h < tpg->compose.height; h++) {
+ unsigned frame_line = tpg_calc_frameline(tpg, src_y, tpg->field);
+ unsigned buf_line = tpg_calc_buffer_line(tpg, h, tpg->field);
+ const struct v4l2_rect *sq = &tpg->square;
+ const struct v4l2_rect *b = &tpg->border;
+ const struct v4l2_rect *c = &tpg->crop;
+
+ src_y += int_part;
+ error += fract_part;
+ if (error >= tpg->compose.height) {
+ error -= tpg->compose.height;
+ src_y++;
+ }
+
+ if (tpg->show_border && frame_line >= b->top &&
+ frame_line < b->top + b->height) {
+ unsigned bottom = b->top + b->height - 1;
+ unsigned left = left_pillar_width;
+ unsigned right = right_pillar_start;
+
+ if (frame_line == b->top || frame_line == b->top + 1 ||
+ frame_line == bottom || frame_line == bottom - 1) {
+ memcpy(vbuf + buf_line * stride + left, tpg->contrast_line[p],
+ right - left);
+ } else {
+ if (b->left >= c->left &&
+ b->left < c->left + c->width)
+ memcpy(vbuf + buf_line * stride + left,
+ tpg->contrast_line[p], twopixsize);
+ if (b->left + b->width > c->left &&
+ b->left + b->width <= c->left + c->width)
+ memcpy(vbuf + buf_line * stride + right - twopixsize,
+ tpg->contrast_line[p], twopixsize);
+ }
+ }
+ if (tpg->qual != TPG_QUAL_NOISE && frame_line >= b->top &&
+ frame_line < b->top + b->height) {
+ memcpy(vbuf + buf_line * stride, tpg->black_line[p], left_pillar_width);
+ memcpy(vbuf + buf_line * stride + right_pillar_start, tpg->black_line[p],
+ img_width - right_pillar_start);
+ }
+ if (tpg->show_square && frame_line >= sq->top &&
+ frame_line < sq->top + sq->height &&
+ sq->left < c->left + c->width &&
+ sq->left + sq->width >= c->left) {
+ unsigned left = sq->left;
+ unsigned width = sq->width;
+
+ if (c->left > left) {
+ width -= c->left - left;
+ left = c->left;
+ }
+ if (c->left + c->width < left + width)
+ width -= left + width - c->left - c->width;
+ left -= c->left;
+ left = (left * tpg->scaled_width) / tpg->src_width;
+ left = (left & ~1) * twopixsize / 2;
+ width = (width * tpg->scaled_width) / tpg->src_width;
+ width = (width & ~1) * twopixsize / 2;
+ memcpy(vbuf + buf_line * stride + left, tpg->contrast_line[p], width);
+ }
+ if (tpg->insert_sav) {
+ unsigned offset = (tpg->compose.width / 6) * twopixsize;
+ u8 *p = vbuf + buf_line * stride + offset;
+ unsigned vact = 0, hact = 0;
+
+ p[0] = 0xff;
+ p[1] = 0;
+ p[2] = 0;
+ p[3] = 0x80 | (f << 6) | (vact << 5) | (hact << 4) |
+ ((hact ^ vact) << 3) |
+ ((hact ^ f) << 2) |
+ ((f ^ vact) << 1) |
+ (hact ^ vact ^ f);
+ }
+ if (tpg->insert_eav) {
+ unsigned offset = (tpg->compose.width / 6) * 2 * twopixsize;
+ u8 *p = vbuf + buf_line * stride + offset;
+ unsigned vact = 0, hact = 1;
+
+ p[0] = 0xff;
+ p[1] = 0;
+ p[2] = 0;
+ p[3] = 0x80 | (f << 6) | (vact << 5) | (hact << 4) |
+ ((hact ^ vact) << 3) |
+ ((hact ^ f) << 2) |
+ ((f ^ vact) << 1) |
+ (hact ^ vact ^ f);
+ }
+ }
+}
diff --git a/drivers/media/platform/vivid/vivid-tpg.h b/drivers/media/platform/vivid/vivid-tpg.h
new file mode 100644
index 000000000000..8ef3e52ba3be
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-tpg.h
@@ -0,0 +1,439 @@
+/*
+ * vivid-tpg.h - Test Pattern Generator
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_TPG_H_
+#define _VIVID_TPG_H_
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/videodev2.h>
+
+#include "vivid-tpg-colors.h"
+
+enum tpg_pattern {
+ TPG_PAT_75_COLORBAR,
+ TPG_PAT_100_COLORBAR,
+ TPG_PAT_CSC_COLORBAR,
+ TPG_PAT_100_HCOLORBAR,
+ TPG_PAT_100_COLORSQUARES,
+ TPG_PAT_BLACK,
+ TPG_PAT_WHITE,
+ TPG_PAT_RED,
+ TPG_PAT_GREEN,
+ TPG_PAT_BLUE,
+ TPG_PAT_CHECKERS_16X16,
+ TPG_PAT_CHECKERS_1X1,
+ TPG_PAT_ALTERNATING_HLINES,
+ TPG_PAT_ALTERNATING_VLINES,
+ TPG_PAT_CROSS_1_PIXEL,
+ TPG_PAT_CROSS_2_PIXELS,
+ TPG_PAT_CROSS_10_PIXELS,
+ TPG_PAT_GRAY_RAMP,
+
+ /* Must be the last pattern */
+ TPG_PAT_NOISE,
+};
+
+extern const char * const tpg_pattern_strings[];
+
+enum tpg_quality {
+ TPG_QUAL_COLOR,
+ TPG_QUAL_GRAY,
+ TPG_QUAL_NOISE
+};
+
+enum tpg_video_aspect {
+ TPG_VIDEO_ASPECT_IMAGE,
+ TPG_VIDEO_ASPECT_4X3,
+ TPG_VIDEO_ASPECT_14X9_CENTRE,
+ TPG_VIDEO_ASPECT_16X9_CENTRE,
+ TPG_VIDEO_ASPECT_16X9_ANAMORPHIC,
+};
+
+enum tpg_pixel_aspect {
+ TPG_PIXEL_ASPECT_SQUARE,
+ TPG_PIXEL_ASPECT_NTSC,
+ TPG_PIXEL_ASPECT_PAL,
+};
+
+enum tpg_move_mode {
+ TPG_MOVE_NEG_FAST,
+ TPG_MOVE_NEG,
+ TPG_MOVE_NEG_SLOW,
+ TPG_MOVE_NONE,
+ TPG_MOVE_POS_SLOW,
+ TPG_MOVE_POS,
+ TPG_MOVE_POS_FAST,
+};
+
+extern const char * const tpg_aspect_strings[];
+
+#define TPG_MAX_PLANES 2
+#define TPG_MAX_PAT_LINES 8
+
+struct tpg_data {
+ /* Source frame size */
+ unsigned src_width, src_height;
+ /* Buffer height */
+ unsigned buf_height;
+ /* Scaled output frame size */
+ unsigned scaled_width;
+ u32 field;
+ /* crop coordinates are frame-based */
+ struct v4l2_rect crop;
+ /* compose coordinates are format-based */
+ struct v4l2_rect compose;
+ /* border and square coordinates are frame-based */
+ struct v4l2_rect border;
+ struct v4l2_rect square;
+
+ /* Color-related fields */
+ enum tpg_quality qual;
+ unsigned qual_offset;
+ u8 alpha_component;
+ bool alpha_red_only;
+ u8 brightness;
+ u8 contrast;
+ u8 saturation;
+ s16 hue;
+ u32 fourcc;
+ bool is_yuv;
+ u32 colorspace;
+ enum tpg_video_aspect vid_aspect;
+ enum tpg_pixel_aspect pix_aspect;
+ unsigned rgb_range;
+ unsigned real_rgb_range;
+ unsigned planes;
+ /* Used to store the colors in native format, either RGB or YUV */
+ u8 colors[TPG_COLOR_MAX][3];
+ u8 textfg[TPG_MAX_PLANES][8], textbg[TPG_MAX_PLANES][8];
+ /* size in bytes for two pixels in each plane */
+ unsigned twopixelsize[TPG_MAX_PLANES];
+ unsigned bytesperline[TPG_MAX_PLANES];
+
+ /* Configuration */
+ enum tpg_pattern pattern;
+ bool hflip;
+ bool vflip;
+ unsigned perc_fill;
+ bool perc_fill_blank;
+ bool show_border;
+ bool show_square;
+ bool insert_sav;
+ bool insert_eav;
+
+ /* Test pattern movement */
+ enum tpg_move_mode mv_hor_mode;
+ int mv_hor_count;
+ int mv_hor_step;
+ enum tpg_move_mode mv_vert_mode;
+ int mv_vert_count;
+ int mv_vert_step;
+
+ bool recalc_colors;
+ bool recalc_lines;
+ bool recalc_square_border;
+
+ /* Used to store TPG_MAX_PAT_LINES lines, each with up to two planes */
+ unsigned max_line_width;
+ u8 *lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES];
+ u8 *random_line[TPG_MAX_PLANES];
+ u8 *contrast_line[TPG_MAX_PLANES];
+ u8 *black_line[TPG_MAX_PLANES];
+};
+
+void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h);
+int tpg_alloc(struct tpg_data *tpg, unsigned max_w);
+void tpg_free(struct tpg_data *tpg);
+void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height,
+ u32 field);
+
+void tpg_set_font(const u8 *f);
+void tpg_gen_text(struct tpg_data *tpg,
+ u8 *basep[TPG_MAX_PLANES][2], int y, int x, char *text);
+void tpg_calc_text_basep(struct tpg_data *tpg,
+ u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf);
+void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std, unsigned p, u8 *vbuf);
+bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc);
+void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop,
+ const struct v4l2_rect *compose);
+
+static inline void tpg_s_pattern(struct tpg_data *tpg, enum tpg_pattern pattern)
+{
+ if (tpg->pattern == pattern)
+ return;
+ tpg->pattern = pattern;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_quality(struct tpg_data *tpg,
+ enum tpg_quality qual, unsigned qual_offset)
+{
+ if (tpg->qual == qual && tpg->qual_offset == qual_offset)
+ return;
+ tpg->qual = qual;
+ tpg->qual_offset = qual_offset;
+ tpg->recalc_colors = true;
+}
+
+static inline enum tpg_quality tpg_g_quality(const struct tpg_data *tpg)
+{
+ return tpg->qual;
+}
+
+static inline void tpg_s_alpha_component(struct tpg_data *tpg,
+ u8 alpha_component)
+{
+ if (tpg->alpha_component == alpha_component)
+ return;
+ tpg->alpha_component = alpha_component;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_alpha_mode(struct tpg_data *tpg,
+ bool red_only)
+{
+ if (tpg->alpha_red_only == red_only)
+ return;
+ tpg->alpha_red_only = red_only;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_brightness(struct tpg_data *tpg,
+ u8 brightness)
+{
+ if (tpg->brightness == brightness)
+ return;
+ tpg->brightness = brightness;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_contrast(struct tpg_data *tpg,
+ u8 contrast)
+{
+ if (tpg->contrast == contrast)
+ return;
+ tpg->contrast = contrast;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_saturation(struct tpg_data *tpg,
+ u8 saturation)
+{
+ if (tpg->saturation == saturation)
+ return;
+ tpg->saturation = saturation;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_hue(struct tpg_data *tpg,
+ s16 hue)
+{
+ if (tpg->hue == hue)
+ return;
+ tpg->hue = hue;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_rgb_range(struct tpg_data *tpg,
+ unsigned rgb_range)
+{
+ if (tpg->rgb_range == rgb_range)
+ return;
+ tpg->rgb_range = rgb_range;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_real_rgb_range(struct tpg_data *tpg,
+ unsigned rgb_range)
+{
+ if (tpg->real_rgb_range == rgb_range)
+ return;
+ tpg->real_rgb_range = rgb_range;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_colorspace(struct tpg_data *tpg, u32 colorspace)
+{
+ if (tpg->colorspace == colorspace)
+ return;
+ tpg->colorspace = colorspace;
+ tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_colorspace(const struct tpg_data *tpg)
+{
+ return tpg->colorspace;
+}
+
+static inline unsigned tpg_g_planes(const struct tpg_data *tpg)
+{
+ return tpg->planes;
+}
+
+static inline unsigned tpg_g_twopixelsize(const struct tpg_data *tpg, unsigned plane)
+{
+ return tpg->twopixelsize[plane];
+}
+
+static inline unsigned tpg_g_bytesperline(const struct tpg_data *tpg, unsigned plane)
+{
+ return tpg->bytesperline[plane];
+}
+
+static inline void tpg_s_bytesperline(struct tpg_data *tpg, unsigned plane, unsigned bpl)
+{
+ tpg->bytesperline[plane] = bpl;
+}
+
+static inline void tpg_s_buf_height(struct tpg_data *tpg, unsigned h)
+{
+ tpg->buf_height = h;
+}
+
+static inline void tpg_s_field(struct tpg_data *tpg, unsigned field)
+{
+ tpg->field = field;
+}
+
+static inline void tpg_s_perc_fill(struct tpg_data *tpg,
+ unsigned perc_fill)
+{
+ tpg->perc_fill = perc_fill;
+}
+
+static inline unsigned tpg_g_perc_fill(const struct tpg_data *tpg)
+{
+ return tpg->perc_fill;
+}
+
+static inline void tpg_s_perc_fill_blank(struct tpg_data *tpg,
+ bool perc_fill_blank)
+{
+ tpg->perc_fill_blank = perc_fill_blank;
+}
+
+static inline void tpg_s_video_aspect(struct tpg_data *tpg,
+ enum tpg_video_aspect vid_aspect)
+{
+ if (tpg->vid_aspect == vid_aspect)
+ return;
+ tpg->vid_aspect = vid_aspect;
+ tpg->recalc_square_border = true;
+}
+
+static inline enum tpg_video_aspect tpg_g_video_aspect(const struct tpg_data *tpg)
+{
+ return tpg->vid_aspect;
+}
+
+static inline void tpg_s_pixel_aspect(struct tpg_data *tpg,
+ enum tpg_pixel_aspect pix_aspect)
+{
+ if (tpg->pix_aspect == pix_aspect)
+ return;
+ tpg->pix_aspect = pix_aspect;
+ tpg->recalc_square_border = true;
+}
+
+static inline void tpg_s_show_border(struct tpg_data *tpg,
+ bool show_border)
+{
+ tpg->show_border = show_border;
+}
+
+static inline void tpg_s_show_square(struct tpg_data *tpg,
+ bool show_square)
+{
+ tpg->show_square = show_square;
+}
+
+static inline void tpg_s_insert_sav(struct tpg_data *tpg, bool insert_sav)
+{
+ tpg->insert_sav = insert_sav;
+}
+
+static inline void tpg_s_insert_eav(struct tpg_data *tpg, bool insert_eav)
+{
+ tpg->insert_eav = insert_eav;
+}
+
+void tpg_update_mv_step(struct tpg_data *tpg);
+
+static inline void tpg_s_mv_hor_mode(struct tpg_data *tpg,
+ enum tpg_move_mode mv_hor_mode)
+{
+ tpg->mv_hor_mode = mv_hor_mode;
+ tpg_update_mv_step(tpg);
+}
+
+static inline void tpg_s_mv_vert_mode(struct tpg_data *tpg,
+ enum tpg_move_mode mv_vert_mode)
+{
+ tpg->mv_vert_mode = mv_vert_mode;
+ tpg_update_mv_step(tpg);
+}
+
+static inline void tpg_init_mv_count(struct tpg_data *tpg)
+{
+ tpg->mv_hor_count = tpg->mv_vert_count = 0;
+}
+
+static inline void tpg_update_mv_count(struct tpg_data *tpg, bool frame_is_field)
+{
+ tpg->mv_hor_count += tpg->mv_hor_step * (frame_is_field ? 1 : 2);
+ tpg->mv_vert_count += tpg->mv_vert_step * (frame_is_field ? 1 : 2);
+}
+
+static inline void tpg_s_hflip(struct tpg_data *tpg, bool hflip)
+{
+ if (tpg->hflip == hflip)
+ return;
+ tpg->hflip = hflip;
+ tpg_update_mv_step(tpg);
+ tpg->recalc_lines = true;
+}
+
+static inline bool tpg_g_hflip(const struct tpg_data *tpg)
+{
+ return tpg->hflip;
+}
+
+static inline void tpg_s_vflip(struct tpg_data *tpg, bool vflip)
+{
+ tpg->vflip = vflip;
+}
+
+static inline bool tpg_g_vflip(const struct tpg_data *tpg)
+{
+ return tpg->vflip;
+}
+
+static inline bool tpg_pattern_is_static(const struct tpg_data *tpg)
+{
+ return tpg->pattern != TPG_PAT_NOISE &&
+ tpg->mv_hor_mode == TPG_MOVE_NONE &&
+ tpg->mv_vert_mode == TPG_MOVE_NONE;
+}
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c
new file mode 100644
index 000000000000..2166d0bf6fe2
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vbi-cap.c
@@ -0,0 +1,371 @@
+/*
+ * vivid-vbi-cap.c - vbi capture support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+
+#include "vivid-core.h"
+#include "vivid-kthread-cap.h"
+#include "vivid-vbi-cap.h"
+#include "vivid-vbi-gen.h"
+
+static void vivid_sliced_vbi_cap_fill(struct vivid_dev *dev, unsigned seqnr)
+{
+ struct vivid_vbi_gen_data *vbi_gen = &dev->vbi_gen;
+ bool is_60hz = dev->std_cap & V4L2_STD_525_60;
+
+ vivid_vbi_gen_sliced(vbi_gen, is_60hz, seqnr);
+
+ if (!is_60hz) {
+ if (dev->loop_video) {
+ if (dev->vbi_out_have_wss) {
+ vbi_gen->data[12].data[0] = dev->vbi_out_wss[0];
+ vbi_gen->data[12].data[1] = dev->vbi_out_wss[1];
+ } else {
+ vbi_gen->data[12].id = 0;
+ }
+ } else {
+ switch (tpg_g_video_aspect(&dev->tpg)) {
+ case TPG_VIDEO_ASPECT_14X9_CENTRE:
+ vbi_gen->data[12].data[0] = 0x01;
+ break;
+ case TPG_VIDEO_ASPECT_16X9_CENTRE:
+ vbi_gen->data[12].data[0] = 0x0b;
+ break;
+ case TPG_VIDEO_ASPECT_16X9_ANAMORPHIC:
+ vbi_gen->data[12].data[0] = 0x07;
+ break;
+ case TPG_VIDEO_ASPECT_4X3:
+ default:
+ vbi_gen->data[12].data[0] = 0x08;
+ break;
+ }
+ }
+ } else if (dev->loop_video && is_60hz) {
+ if (dev->vbi_out_have_cc[0]) {
+ vbi_gen->data[0].data[0] = dev->vbi_out_cc[0][0];
+ vbi_gen->data[0].data[1] = dev->vbi_out_cc[0][1];
+ } else {
+ vbi_gen->data[0].id = 0;
+ }
+ if (dev->vbi_out_have_cc[1]) {
+ vbi_gen->data[1].data[0] = dev->vbi_out_cc[1][0];
+ vbi_gen->data[1].data[1] = dev->vbi_out_cc[1][1];
+ } else {
+ vbi_gen->data[1].id = 0;
+ }
+ }
+}
+
+static void vivid_g_fmt_vbi_cap(struct vivid_dev *dev, struct v4l2_vbi_format *vbi)
+{
+ bool is_60hz = dev->std_cap & V4L2_STD_525_60;
+
+ vbi->sampling_rate = 27000000;
+ vbi->offset = 24;
+ vbi->samples_per_line = 1440;
+ vbi->sample_format = V4L2_PIX_FMT_GREY;
+ vbi->start[0] = is_60hz ? V4L2_VBI_ITU_525_F1_START + 9 : V4L2_VBI_ITU_625_F1_START + 5;
+ vbi->start[1] = is_60hz ? V4L2_VBI_ITU_525_F2_START + 9 : V4L2_VBI_ITU_625_F2_START + 5;
+ vbi->count[0] = vbi->count[1] = is_60hz ? 12 : 18;
+ vbi->flags = dev->vbi_cap_interlaced ? V4L2_VBI_INTERLACED : 0;
+ vbi->reserved[0] = 0;
+ vbi->reserved[1] = 0;
+}
+
+void vivid_raw_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
+{
+ struct v4l2_vbi_format vbi;
+ u8 *vbuf = vb2_plane_vaddr(&buf->vb, 0);
+
+ vivid_g_fmt_vbi_cap(dev, &vbi);
+ buf->vb.v4l2_buf.sequence = dev->vbi_cap_seq_count;
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE)
+ buf->vb.v4l2_buf.sequence /= 2;
+
+ vivid_sliced_vbi_cap_fill(dev, buf->vb.v4l2_buf.sequence);
+
+ memset(vbuf, 0x10, vb2_plane_size(&buf->vb, 0));
+
+ if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode))
+ vivid_vbi_gen_raw(&dev->vbi_gen, &vbi, vbuf);
+
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
+}
+
+
+void vivid_sliced_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
+{
+ struct v4l2_sliced_vbi_data *vbuf = vb2_plane_vaddr(&buf->vb, 0);
+
+ buf->vb.v4l2_buf.sequence = dev->vbi_cap_seq_count;
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE)
+ buf->vb.v4l2_buf.sequence /= 2;
+
+ vivid_sliced_vbi_cap_fill(dev, buf->vb.v4l2_buf.sequence);
+
+ memset(vbuf, 0, vb2_plane_size(&buf->vb, 0));
+ if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode)) {
+ unsigned i;
+
+ for (i = 0; i < 25; i++)
+ vbuf[i] = dev->vbi_gen.data[i];
+ }
+
+ v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
+ buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
+}
+
+static int vbi_cap_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned *nbuffers, unsigned *nplanes,
+ unsigned sizes[], void *alloc_ctxs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ bool is_60hz = dev->std_cap & V4L2_STD_525_60;
+ unsigned size = vq->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE ?
+ 36 * sizeof(struct v4l2_sliced_vbi_data) :
+ 1440 * 2 * (is_60hz ? 12 : 18);
+
+ if (!vivid_is_sdtv_cap(dev))
+ return -EINVAL;
+
+ sizes[0] = size;
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int vbi_cap_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ bool is_60hz = dev->std_cap & V4L2_STD_525_60;
+ unsigned size = vb->vb2_queue->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE ?
+ 36 * sizeof(struct v4l2_sliced_vbi_data) :
+ 1440 * 2 * (is_60hz ? 12 : 18);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void vbi_cap_buf_queue(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->vbi_cap_active);
+ spin_unlock(&dev->slock);
+}
+
+static int vbi_cap_start_streaming(struct vb2_queue *vq, unsigned count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
+ dprintk(dev, 1, "%s\n", __func__);
+ dev->vbi_cap_seq_count = 0;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_cap(dev, &dev->vbi_cap_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void vbi_cap_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_cap(dev, &dev->vbi_cap_streaming);
+}
+
+const struct vb2_ops vivid_vbi_cap_qops = {
+ .queue_setup = vbi_cap_queue_setup,
+ .buf_prepare = vbi_cap_buf_prepare,
+ .buf_queue = vbi_cap_buf_queue,
+ .start_streaming = vbi_cap_start_streaming,
+ .stop_streaming = vbi_cap_stop_streaming,
+ .wait_prepare = vivid_unlock,
+ .wait_finish = vivid_lock,
+};
+
+int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_vbi_format *vbi = &f->fmt.vbi;
+
+ if (!vivid_is_sdtv_cap(dev) || !dev->has_raw_vbi_cap)
+ return -EINVAL;
+
+ vivid_g_fmt_vbi_cap(dev, vbi);
+ return 0;
+}
+
+int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ int ret = vidioc_g_fmt_vbi_cap(file, priv, f);
+
+ if (ret)
+ return ret;
+ if (dev->stream_sliced_vbi_cap && vb2_is_busy(&dev->vb_vbi_cap_q))
+ return -EBUSY;
+ dev->stream_sliced_vbi_cap = false;
+ dev->vbi_cap_dev.queue->type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ return 0;
+}
+
+void vivid_fill_service_lines(struct v4l2_sliced_vbi_format *vbi, u32 service_set)
+{
+ vbi->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36;
+ vbi->service_set = service_set;
+ memset(vbi->service_lines, 0, sizeof(vbi->service_lines));
+ memset(vbi->reserved, 0, sizeof(vbi->reserved));
+
+ if (vbi->service_set == 0)
+ return;
+
+ if (vbi->service_set & V4L2_SLICED_CAPTION_525) {
+ vbi->service_lines[0][21] = V4L2_SLICED_CAPTION_525;
+ vbi->service_lines[1][21] = V4L2_SLICED_CAPTION_525;
+ }
+ if (vbi->service_set & V4L2_SLICED_WSS_625) {
+ unsigned i;
+
+ for (i = 7; i <= 18; i++)
+ vbi->service_lines[0][i] =
+ vbi->service_lines[1][i] = V4L2_SLICED_TELETEXT_B;
+ vbi->service_lines[0][23] = V4L2_SLICED_WSS_625;
+ }
+}
+
+int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
+
+ if (!vivid_is_sdtv_cap(dev) || !dev->has_sliced_vbi_cap)
+ return -EINVAL;
+
+ vivid_fill_service_lines(vbi, dev->service_set_cap);
+ return 0;
+}
+
+int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
+ bool is_60hz = dev->std_cap & V4L2_STD_525_60;
+ u32 service_set = vbi->service_set;
+
+ if (!vivid_is_sdtv_cap(dev) || !dev->has_sliced_vbi_cap)
+ return -EINVAL;
+
+ service_set &= is_60hz ? V4L2_SLICED_CAPTION_525 :
+ V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
+ vivid_fill_service_lines(vbi, service_set);
+ return 0;
+}
+
+int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
+ int ret = vidioc_try_fmt_sliced_vbi_cap(file, fh, fmt);
+
+ if (ret)
+ return ret;
+ if (!dev->stream_sliced_vbi_cap && vb2_is_busy(&dev->vb_vbi_cap_q))
+ return -EBUSY;
+ dev->service_set_cap = vbi->service_set;
+ dev->stream_sliced_vbi_cap = true;
+ dev->vbi_cap_dev.queue->type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
+ return 0;
+}
+
+int vidioc_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+ bool is_60hz;
+
+ if (vdev->vfl_dir == VFL_DIR_RX) {
+ is_60hz = dev->std_cap & V4L2_STD_525_60;
+ if (!vivid_is_sdtv_cap(dev) || !dev->has_sliced_vbi_cap ||
+ cap->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+ return -EINVAL;
+ } else {
+ is_60hz = dev->std_out & V4L2_STD_525_60;
+ if (!vivid_is_svid_out(dev) || !dev->has_sliced_vbi_out ||
+ cap->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT)
+ return -EINVAL;
+ }
+
+ cap->service_set = is_60hz ? V4L2_SLICED_CAPTION_525 :
+ V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
+ if (is_60hz) {
+ cap->service_lines[0][21] = V4L2_SLICED_CAPTION_525;
+ cap->service_lines[1][21] = V4L2_SLICED_CAPTION_525;
+ } else {
+ unsigned i;
+
+ for (i = 7; i <= 18; i++)
+ cap->service_lines[0][i] =
+ cap->service_lines[1][i] = V4L2_SLICED_TELETEXT_B;
+ cap->service_lines[0][23] = V4L2_SLICED_WSS_625;
+ }
+ return 0;
+}
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.h b/drivers/media/platform/vivid/vivid-vbi-cap.h
new file mode 100644
index 000000000000..2d8ea0bac743
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vbi-cap.h
@@ -0,0 +1,40 @@
+/*
+ * vivid-vbi-cap.h - vbi capture support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_VBI_CAP_H_
+#define _VIVID_VBI_CAP_H_
+
+void vivid_fill_time_of_day_packet(u8 *packet);
+void vivid_raw_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf);
+void vivid_sliced_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf);
+void vivid_sliced_vbi_out_process(struct vivid_dev *dev, struct vivid_buffer *buf);
+int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
+ struct v4l2_format *f);
+int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
+ struct v4l2_format *f);
+int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt);
+int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt);
+int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt);
+int vidioc_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap);
+
+void vivid_fill_service_lines(struct v4l2_sliced_vbi_format *vbi, u32 service_set);
+
+extern const struct vb2_ops vivid_vbi_cap_qops;
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vbi-gen.c b/drivers/media/platform/vivid/vivid-vbi-gen.c
new file mode 100644
index 000000000000..a2159de83d0b
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vbi-gen.c
@@ -0,0 +1,323 @@
+/*
+ * vivid-vbi-gen.c - vbi generator support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/string.h>
+#include <linux/videodev2.h>
+
+#include "vivid-vbi-gen.h"
+
+static void wss_insert(u8 *wss, u32 val, unsigned size)
+{
+ while (size--)
+ *wss++ = (val & (1 << size)) ? 0xc0 : 0x10;
+}
+
+static void vivid_vbi_gen_wss_raw(const struct v4l2_sliced_vbi_data *data,
+ u8 *buf, unsigned sampling_rate)
+{
+ const unsigned rate = 5000000; /* WSS has a 5 MHz transmission rate */
+ u8 wss[29 + 24 + 24 + 24 + 18 + 18] = { 0 };
+ const unsigned zero = 0x07;
+ const unsigned one = 0x38;
+ unsigned bit = 0;
+ u16 wss_data;
+ int i;
+
+ wss_insert(wss + bit, 0x1f1c71c7, 29); bit += 29;
+ wss_insert(wss + bit, 0x1e3c1f, 24); bit += 24;
+
+ wss_data = (data->data[1] << 8) | data->data[0];
+ for (i = 0; i <= 13; i++, bit += 6)
+ wss_insert(wss + bit, (wss_data & (1 << i)) ? one : zero, 6);
+
+ for (i = 0, bit = 0; bit < sizeof(wss); bit++) {
+ unsigned n = ((bit + 1) * sampling_rate) / rate;
+
+ while (i < n)
+ buf[i++] = wss[bit];
+ }
+}
+
+static void vivid_vbi_gen_teletext_raw(const struct v4l2_sliced_vbi_data *data,
+ u8 *buf, unsigned sampling_rate)
+{
+ const unsigned rate = 6937500 / 10; /* Teletext has a 6.9375 MHz transmission rate */
+ u8 teletext[45] = { 0x55, 0x55, 0x27 };
+ unsigned bit = 0;
+ int i;
+
+ memcpy(teletext + 3, data->data, sizeof(teletext) - 3);
+ /* prevents 32 bit overflow */
+ sampling_rate /= 10;
+
+ for (i = 0, bit = 0; bit < sizeof(teletext) * 8; bit++) {
+ unsigned n = ((bit + 1) * sampling_rate) / rate;
+ u8 val = (teletext[bit / 8] & (1 << (bit & 7))) ? 0xc0 : 0x10;
+
+ while (i < n)
+ buf[i++] = val;
+ }
+}
+
+static void cc_insert(u8 *cc, u8 ch)
+{
+ unsigned tot = 0;
+ unsigned i;
+
+ for (i = 0; i < 7; i++) {
+ cc[2 * i] = cc[2 * i + 1] = (ch & (1 << i)) ? 1 : 0;
+ tot += cc[2 * i];
+ }
+ cc[14] = cc[15] = !(tot & 1);
+}
+
+#define CC_PREAMBLE_BITS (14 + 4 + 2)
+
+static void vivid_vbi_gen_cc_raw(const struct v4l2_sliced_vbi_data *data,
+ u8 *buf, unsigned sampling_rate)
+{
+ const unsigned rate = 1000000; /* CC has a 1 MHz transmission rate */
+
+ u8 cc[CC_PREAMBLE_BITS + 2 * 16] = {
+ /* Clock run-in: 7 cycles */
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ /* 2 cycles of 0 */
+ 0, 0, 0, 0,
+ /* Start bit of 1 (each bit is two cycles) */
+ 1, 1
+ };
+ unsigned bit, i;
+
+ cc_insert(cc + CC_PREAMBLE_BITS, data->data[0]);
+ cc_insert(cc + CC_PREAMBLE_BITS + 16, data->data[1]);
+
+ for (i = 0, bit = 0; bit < sizeof(cc); bit++) {
+ unsigned n = ((bit + 1) * sampling_rate) / rate;
+
+ while (i < n)
+ buf[i++] = cc[bit] ? 0xc0 : 0x10;
+ }
+}
+
+void vivid_vbi_gen_raw(const struct vivid_vbi_gen_data *vbi,
+ const struct v4l2_vbi_format *vbi_fmt, u8 *buf)
+{
+ unsigned idx;
+
+ for (idx = 0; idx < 25; idx++) {
+ const struct v4l2_sliced_vbi_data *data = vbi->data + idx;
+ unsigned start_2nd_field;
+ unsigned line = data->line;
+ u8 *linebuf = buf;
+
+ start_2nd_field = (data->id & V4L2_SLICED_VBI_525) ? 263 : 313;
+ if (data->field)
+ line += start_2nd_field;
+ line -= vbi_fmt->start[data->field];
+
+ if (vbi_fmt->flags & V4L2_VBI_INTERLACED)
+ linebuf += (line * 2 + data->field) *
+ vbi_fmt->samples_per_line;
+ else
+ linebuf += (line + data->field * vbi_fmt->count[0]) *
+ vbi_fmt->samples_per_line;
+ if (data->id == V4L2_SLICED_CAPTION_525)
+ vivid_vbi_gen_cc_raw(data, linebuf, vbi_fmt->sampling_rate);
+ else if (data->id == V4L2_SLICED_WSS_625)
+ vivid_vbi_gen_wss_raw(data, linebuf, vbi_fmt->sampling_rate);
+ else if (data->id == V4L2_SLICED_TELETEXT_B)
+ vivid_vbi_gen_teletext_raw(data, linebuf, vbi_fmt->sampling_rate);
+ }
+}
+
+static const u8 vivid_cc_sequence1[30] = {
+ 0x14, 0x20, /* Resume Caption Loading */
+ 'H', 'e',
+ 'l', 'l',
+ 'o', ' ',
+ 'w', 'o',
+ 'r', 'l',
+ 'd', '!',
+ 0x14, 0x2f, /* End of Caption */
+};
+
+static const u8 vivid_cc_sequence2[30] = {
+ 0x14, 0x20, /* Resume Caption Loading */
+ 'C', 'l',
+ 'o', 's',
+ 'e', 'd',
+ ' ', 'c',
+ 'a', 'p',
+ 't', 'i',
+ 'o', 'n',
+ 's', ' ',
+ 't', 'e',
+ 's', 't',
+ 0x14, 0x2f, /* End of Caption */
+};
+
+static u8 calc_parity(u8 val)
+{
+ unsigned i;
+ unsigned tot = 0;
+
+ for (i = 0; i < 7; i++)
+ tot += (val & (1 << i)) ? 1 : 0;
+ return val | ((tot & 1) ? 0 : 0x80);
+}
+
+static void vivid_vbi_gen_set_time_of_day(u8 *packet)
+{
+ struct tm tm;
+ u8 checksum, i;
+
+ time_to_tm(get_seconds(), 0, &tm);
+ packet[0] = calc_parity(0x07);
+ packet[1] = calc_parity(0x01);
+ packet[2] = calc_parity(0x40 | tm.tm_min);
+ packet[3] = calc_parity(0x40 | tm.tm_hour);
+ packet[4] = calc_parity(0x40 | tm.tm_mday);
+ if (tm.tm_mday == 1 && tm.tm_mon == 2 &&
+ sys_tz.tz_minuteswest > tm.tm_min + tm.tm_hour * 60)
+ packet[4] = calc_parity(0x60 | tm.tm_mday);
+ packet[5] = calc_parity(0x40 | (1 + tm.tm_mon));
+ packet[6] = calc_parity(0x40 | (1 + tm.tm_wday));
+ packet[7] = calc_parity(0x40 | ((tm.tm_year - 90) & 0x3f));
+ packet[8] = calc_parity(0x0f);
+ for (checksum = i = 0; i <= 8; i++)
+ checksum += packet[i] & 0x7f;
+ packet[9] = calc_parity(0x100 - checksum);
+ checksum = 0;
+ packet[10] = calc_parity(0x07);
+ packet[11] = calc_parity(0x04);
+ if (sys_tz.tz_minuteswest >= 0)
+ packet[12] = calc_parity(0x40 | ((sys_tz.tz_minuteswest / 60) & 0x1f));
+ else
+ packet[12] = calc_parity(0x40 | ((24 + sys_tz.tz_minuteswest / 60) & 0x1f));
+ packet[13] = calc_parity(0);
+ packet[14] = calc_parity(0x0f);
+ for (checksum = 0, i = 10; i <= 14; i++)
+ checksum += packet[i] & 0x7f;
+ packet[15] = calc_parity(0x100 - checksum);
+}
+
+static const u8 hamming[16] = {
+ 0x15, 0x02, 0x49, 0x5e, 0x64, 0x73, 0x38, 0x2f,
+ 0xd0, 0xc7, 0x8c, 0x9b, 0xa1, 0xb6, 0xfd, 0xea
+};
+
+static void vivid_vbi_gen_teletext(u8 *packet, unsigned line, unsigned frame)
+{
+ unsigned offset = 2;
+ unsigned i;
+
+ packet[0] = hamming[1 + ((line & 1) << 3)];
+ packet[1] = hamming[line >> 1];
+ memset(packet + 2, 0x20, 40);
+ if (line == 0) {
+ /* subcode */
+ packet[2] = hamming[frame % 10];
+ packet[3] = hamming[frame / 10];
+ packet[4] = hamming[0];
+ packet[5] = hamming[0];
+ packet[6] = hamming[0];
+ packet[7] = hamming[0];
+ packet[8] = hamming[0];
+ packet[9] = hamming[1];
+ offset = 10;
+ }
+ packet += offset;
+ memcpy(packet, "Page: 100 Row: 10", 17);
+ packet[7] = '0' + frame / 10;
+ packet[8] = '0' + frame % 10;
+ packet[15] = '0' + line / 10;
+ packet[16] = '0' + line % 10;
+ for (i = 0; i < 42 - offset; i++)
+ packet[i] = calc_parity(packet[i]);
+}
+
+void vivid_vbi_gen_sliced(struct vivid_vbi_gen_data *vbi,
+ bool is_60hz, unsigned seqnr)
+{
+ struct v4l2_sliced_vbi_data *data0 = vbi->data;
+ struct v4l2_sliced_vbi_data *data1 = vbi->data + 1;
+ unsigned frame = seqnr % 60;
+
+ memset(vbi->data, 0, sizeof(vbi->data));
+
+ if (!is_60hz) {
+ unsigned i;
+
+ for (i = 0; i <= 11; i++) {
+ data0->id = V4L2_SLICED_TELETEXT_B;
+ data0->line = 7 + i;
+ vivid_vbi_gen_teletext(data0->data, i, frame);
+ data0++;
+ }
+ data0->id = V4L2_SLICED_WSS_625;
+ data0->line = 23;
+ /* 4x3 video aspect ratio */
+ data0->data[0] = 0x08;
+ data0++;
+ for (i = 0; i <= 11; i++) {
+ data0->id = V4L2_SLICED_TELETEXT_B;
+ data0->field = 1;
+ data0->line = 7 + i;
+ vivid_vbi_gen_teletext(data0->data, 12 + i, frame);
+ data0++;
+ }
+ return;
+ }
+
+ data0->id = V4L2_SLICED_CAPTION_525;
+ data0->line = 21;
+ data1->id = V4L2_SLICED_CAPTION_525;
+ data1->field = 1;
+ data1->line = 21;
+
+ if (frame < 15) {
+ data0->data[0] = calc_parity(vivid_cc_sequence1[2 * frame]);
+ data0->data[1] = calc_parity(vivid_cc_sequence1[2 * frame + 1]);
+ } else if (frame >= 30 && frame < 45) {
+ frame -= 30;
+ data0->data[0] = calc_parity(vivid_cc_sequence2[2 * frame]);
+ data0->data[1] = calc_parity(vivid_cc_sequence2[2 * frame + 1]);
+ } else {
+ data0->data[0] = calc_parity(0);
+ data0->data[1] = calc_parity(0);
+ }
+
+ frame = seqnr % (30 * 60);
+ switch (frame) {
+ case 0:
+ vivid_vbi_gen_set_time_of_day(vbi->time_of_day_packet);
+ /* fall through */
+ case 1 ... 7:
+ data1->data[0] = vbi->time_of_day_packet[frame * 2];
+ data1->data[1] = vbi->time_of_day_packet[frame * 2 + 1];
+ break;
+ default:
+ data1->data[0] = calc_parity(0);
+ data1->data[1] = calc_parity(0);
+ break;
+ }
+}
diff --git a/drivers/media/platform/vivid/vivid-vbi-gen.h b/drivers/media/platform/vivid/vivid-vbi-gen.h
new file mode 100644
index 000000000000..8444abe905ea
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vbi-gen.h
@@ -0,0 +1,33 @@
+/*
+ * vivid-vbi-gen.h - vbi generator support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_VBI_GEN_H_
+#define _VIVID_VBI_GEN_H_
+
+struct vivid_vbi_gen_data {
+ struct v4l2_sliced_vbi_data data[25];
+ u8 time_of_day_packet[16];
+};
+
+void vivid_vbi_gen_sliced(struct vivid_vbi_gen_data *vbi,
+ bool is_60hz, unsigned seqnr);
+void vivid_vbi_gen_raw(const struct vivid_vbi_gen_data *vbi,
+ const struct v4l2_vbi_format *vbi_fmt, u8 *buf);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/platform/vivid/vivid-vbi-out.c
new file mode 100644
index 000000000000..9d00a07ecdcd
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vbi-out.c
@@ -0,0 +1,248 @@
+/*
+ * vivid-vbi-out.c - vbi output support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+
+#include "vivid-core.h"
+#include "vivid-kthread-out.h"
+#include "vivid-vbi-out.h"
+#include "vivid-vbi-cap.h"
+
+static int vbi_out_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned *nbuffers, unsigned *nplanes,
+ unsigned sizes[], void *alloc_ctxs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ bool is_60hz = dev->std_out & V4L2_STD_525_60;
+ unsigned size = vq->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT ?
+ 36 * sizeof(struct v4l2_sliced_vbi_data) :
+ 1440 * 2 * (is_60hz ? 12 : 18);
+
+ if (!vivid_is_svid_out(dev))
+ return -EINVAL;
+
+ sizes[0] = size;
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int vbi_out_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ bool is_60hz = dev->std_out & V4L2_STD_525_60;
+ unsigned size = vb->vb2_queue->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT ?
+ 36 * sizeof(struct v4l2_sliced_vbi_data) :
+ 1440 * 2 * (is_60hz ? 12 : 18);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void vbi_out_buf_queue(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->vbi_out_active);
+ spin_unlock(&dev->slock);
+}
+
+static int vbi_out_start_streaming(struct vb2_queue *vq, unsigned count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
+ dprintk(dev, 1, "%s\n", __func__);
+ dev->vbi_out_seq_count = 0;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_out(dev, &dev->vbi_out_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void vbi_out_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_out(dev, &dev->vbi_out_streaming);
+ dev->vbi_out_have_wss = false;
+ dev->vbi_out_have_cc[0] = false;
+ dev->vbi_out_have_cc[1] = false;
+}
+
+const struct vb2_ops vivid_vbi_out_qops = {
+ .queue_setup = vbi_out_queue_setup,
+ .buf_prepare = vbi_out_buf_prepare,
+ .buf_queue = vbi_out_buf_queue,
+ .start_streaming = vbi_out_start_streaming,
+ .stop_streaming = vbi_out_stop_streaming,
+ .wait_prepare = vivid_unlock,
+ .wait_finish = vivid_lock,
+};
+
+int vidioc_g_fmt_vbi_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_vbi_format *vbi = &f->fmt.vbi;
+ bool is_60hz = dev->std_out & V4L2_STD_525_60;
+
+ if (!vivid_is_svid_out(dev) || !dev->has_raw_vbi_out)
+ return -EINVAL;
+
+ vbi->sampling_rate = 25000000;
+ vbi->offset = 24;
+ vbi->samples_per_line = 1440;
+ vbi->sample_format = V4L2_PIX_FMT_GREY;
+ vbi->start[0] = is_60hz ? V4L2_VBI_ITU_525_F1_START + 9 : V4L2_VBI_ITU_625_F1_START + 5;
+ vbi->start[1] = is_60hz ? V4L2_VBI_ITU_525_F2_START + 9 : V4L2_VBI_ITU_625_F2_START + 5;
+ vbi->count[0] = vbi->count[1] = is_60hz ? 12 : 18;
+ vbi->flags = dev->vbi_cap_interlaced ? V4L2_VBI_INTERLACED : 0;
+ vbi->reserved[0] = 0;
+ vbi->reserved[1] = 0;
+ return 0;
+}
+
+int vidioc_s_fmt_vbi_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ int ret = vidioc_g_fmt_vbi_out(file, priv, f);
+
+ if (ret)
+ return ret;
+ if (vb2_is_busy(&dev->vb_vbi_out_q))
+ return -EBUSY;
+ dev->stream_sliced_vbi_out = false;
+ dev->vbi_out_dev.queue->type = V4L2_BUF_TYPE_VBI_OUTPUT;
+ return 0;
+}
+
+int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
+
+ if (!vivid_is_svid_out(dev) || !dev->has_sliced_vbi_out)
+ return -EINVAL;
+
+ vivid_fill_service_lines(vbi, dev->service_set_out);
+ return 0;
+}
+
+int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
+ bool is_60hz = dev->std_out & V4L2_STD_525_60;
+ u32 service_set = vbi->service_set;
+
+ if (!vivid_is_svid_out(dev) || !dev->has_sliced_vbi_out)
+ return -EINVAL;
+
+ service_set &= is_60hz ? V4L2_SLICED_CAPTION_525 :
+ V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
+ vivid_fill_service_lines(vbi, service_set);
+ return 0;
+}
+
+int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
+ int ret = vidioc_try_fmt_sliced_vbi_out(file, fh, fmt);
+
+ if (ret)
+ return ret;
+ if (vb2_is_busy(&dev->vb_vbi_out_q))
+ return -EBUSY;
+ dev->service_set_out = vbi->service_set;
+ dev->stream_sliced_vbi_out = true;
+ dev->vbi_out_dev.queue->type = V4L2_BUF_TYPE_SLICED_VBI_OUTPUT;
+ return 0;
+}
+
+void vivid_sliced_vbi_out_process(struct vivid_dev *dev, struct vivid_buffer *buf)
+{
+ struct v4l2_sliced_vbi_data *vbi = vb2_plane_vaddr(&buf->vb, 0);
+ unsigned elems = vb2_get_plane_payload(&buf->vb, 0) / sizeof(*vbi);
+
+ dev->vbi_out_have_cc[0] = false;
+ dev->vbi_out_have_cc[1] = false;
+ dev->vbi_out_have_wss = false;
+ while (elems--) {
+ switch (vbi->id) {
+ case V4L2_SLICED_CAPTION_525:
+ if ((dev->std_out & V4L2_STD_525_60) && vbi->line == 21) {
+ dev->vbi_out_have_cc[!!vbi->field] = true;
+ dev->vbi_out_cc[!!vbi->field][0] = vbi->data[0];
+ dev->vbi_out_cc[!!vbi->field][1] = vbi->data[1];
+ }
+ break;
+ case V4L2_SLICED_WSS_625:
+ if ((dev->std_out & V4L2_STD_625_50) &&
+ vbi->field == 0 && vbi->line == 23) {
+ dev->vbi_out_have_wss = true;
+ dev->vbi_out_wss[0] = vbi->data[0];
+ dev->vbi_out_wss[1] = vbi->data[1];
+ }
+ break;
+ }
+ vbi++;
+ }
+}
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.h b/drivers/media/platform/vivid/vivid-vbi-out.h
new file mode 100644
index 000000000000..6555ba9d2860
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vbi-out.h
@@ -0,0 +1,34 @@
+/*
+ * vivid-vbi-out.h - vbi output support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_VBI_OUT_H_
+#define _VIVID_VBI_OUT_H_
+
+void vivid_sliced_vbi_out_process(struct vivid_dev *dev, struct vivid_buffer *buf);
+int vidioc_g_fmt_vbi_out(struct file *file, void *priv,
+ struct v4l2_format *f);
+int vidioc_s_fmt_vbi_out(struct file *file, void *priv,
+ struct v4l2_format *f);
+int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt);
+int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt);
+int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt);
+
+extern const struct vb2_ops vivid_vbi_out_qops;
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
new file mode 100644
index 000000000000..331c54429b40
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -0,0 +1,1730 @@
+/*
+ * vivid-vid-cap.c - video capture support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-dv-timings.h>
+
+#include "vivid-core.h"
+#include "vivid-vid-common.h"
+#include "vivid-kthread-cap.h"
+#include "vivid-vid-cap.h"
+
+/* timeperframe: min/max and default */
+static const struct v4l2_fract
+ tpf_min = {.numerator = 1, .denominator = FPS_MAX},
+ tpf_max = {.numerator = FPS_MAX, .denominator = 1},
+ tpf_default = {.numerator = 1, .denominator = 30};
+
+static const struct vivid_fmt formats_ovl[] = {
+ {
+ .name = "RGB565 (LE)",
+ .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
+ .depth = 16,
+ .planes = 1,
+ },
+ {
+ .name = "XRGB555 (LE)",
+ .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */
+ .depth = 16,
+ .planes = 1,
+ },
+ {
+ .name = "ARGB555 (LE)",
+ .fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */
+ .depth = 16,
+ .planes = 1,
+ },
+};
+
+/* The number of discrete webcam framesizes */
+#define VIVID_WEBCAM_SIZES 3
+/* The number of discrete webcam frameintervals */
+#define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2)
+
+/* Sizes must be in increasing order */
+static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = {
+ { 320, 180 },
+ { 640, 360 },
+ { 1280, 720 },
+};
+
+/*
+ * Intervals must be in increasing order and there must be twice as many
+ * elements in this array as there are in webcam_sizes.
+ */
+static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = {
+ { 1, 10 },
+ { 1, 15 },
+ { 1, 25 },
+ { 1, 30 },
+ { 1, 50 },
+ { 1, 60 },
+};
+
+static const struct v4l2_discrete_probe webcam_probe = {
+ webcam_sizes,
+ VIVID_WEBCAM_SIZES
+};
+
+static int vid_cap_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned *nbuffers, unsigned *nplanes,
+ unsigned sizes[], void *alloc_ctxs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ unsigned planes = tpg_g_planes(&dev->tpg);
+ unsigned h = dev->fmt_cap_rect.height;
+ unsigned p;
+
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
+ /*
+ * You cannot use read() with FIELD_ALTERNATE since the field
+ * information (TOP/BOTTOM) cannot be passed back to the user.
+ */
+ if (vb2_fileio_is_active(vq))
+ return -EINVAL;
+ }
+
+ if (dev->queue_setup_error) {
+ /*
+ * Error injection: test what happens if queue_setup() returns
+ * an error.
+ */
+ dev->queue_setup_error = false;
+ return -EINVAL;
+ }
+ if (fmt) {
+ const struct v4l2_pix_format_mplane *mp;
+ struct v4l2_format mp_fmt;
+ const struct vivid_fmt *vfmt;
+
+ if (!V4L2_TYPE_IS_MULTIPLANAR(fmt->type)) {
+ fmt_sp2mp(fmt, &mp_fmt);
+ fmt = &mp_fmt;
+ }
+ mp = &fmt->fmt.pix_mp;
+ /*
+ * Check if the number of planes in the specified format match
+ * the number of planes in the current format. You can't mix that.
+ */
+ if (mp->num_planes != planes)
+ return -EINVAL;
+ vfmt = vivid_get_format(dev, mp->pixelformat);
+ for (p = 0; p < planes; p++) {
+ sizes[p] = mp->plane_fmt[p].sizeimage;
+ if (sizes[0] < tpg_g_bytesperline(&dev->tpg, 0) * h +
+ vfmt->data_offset[p])
+ return -EINVAL;
+ }
+ } else {
+ for (p = 0; p < planes; p++)
+ sizes[p] = tpg_g_bytesperline(&dev->tpg, p) * h +
+ dev->fmt_cap->data_offset[p];
+ }
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = planes;
+
+ /*
+ * videobuf2-vmalloc allocator is context-less so no need to set
+ * alloc_ctxs array.
+ */
+
+ if (planes == 2)
+ dprintk(dev, 1, "%s, count=%d, sizes=%u, %u\n", __func__,
+ *nbuffers, sizes[0], sizes[1]);
+ else
+ dprintk(dev, 1, "%s, count=%d, size=%u\n", __func__,
+ *nbuffers, sizes[0]);
+
+ return 0;
+}
+
+static int vid_cap_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size;
+ unsigned planes = tpg_g_planes(&dev->tpg);
+ unsigned p;
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (WARN_ON(NULL == dev->fmt_cap))
+ return -EINVAL;
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ for (p = 0; p < planes; p++) {
+ size = tpg_g_bytesperline(&dev->tpg, p) * dev->fmt_cap_rect.height +
+ dev->fmt_cap->data_offset[p];
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n",
+ __func__, p, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, p, size);
+ vb->v4l2_planes[p].data_offset = dev->fmt_cap->data_offset[p];
+ }
+
+ return 0;
+}
+
+static void vid_cap_buf_finish(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_timecode *tc = &vb->v4l2_buf.timecode;
+ unsigned fps = 25;
+ unsigned seq = vb->v4l2_buf.sequence;
+
+ if (!vivid_is_sdtv_cap(dev))
+ return;
+
+ /*
+ * Set the timecode. Rarely used, so it is interesting to
+ * test this.
+ */
+ vb->v4l2_buf.flags |= V4L2_BUF_FLAG_TIMECODE;
+ if (dev->std_cap & V4L2_STD_525_60)
+ fps = 30;
+ tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS;
+ tc->flags = 0;
+ tc->frames = seq % fps;
+ tc->seconds = (seq / fps) % 60;
+ tc->minutes = (seq / (60 * fps)) % 60;
+ tc->hours = (seq / (60 * 60 * fps)) % 24;
+}
+
+static void vid_cap_buf_queue(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->vid_cap_active);
+ spin_unlock(&dev->slock);
+}
+
+static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ unsigned i;
+ int err;
+
+ if (vb2_is_streaming(&dev->vb_vid_out_q))
+ dev->can_loop_video = vivid_vid_can_loop(dev);
+
+ if (dev->kthread_vid_cap)
+ return 0;
+
+ dev->vid_cap_seq_count = 0;
+ dprintk(dev, 1, "%s\n", __func__);
+ for (i = 0; i < VIDEO_MAX_FRAME; i++)
+ dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void vid_cap_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming);
+ dev->can_loop_video = false;
+}
+
+const struct vb2_ops vivid_vid_cap_qops = {
+ .queue_setup = vid_cap_queue_setup,
+ .buf_prepare = vid_cap_buf_prepare,
+ .buf_finish = vid_cap_buf_finish,
+ .buf_queue = vid_cap_buf_queue,
+ .start_streaming = vid_cap_start_streaming,
+ .stop_streaming = vid_cap_stop_streaming,
+ .wait_prepare = vivid_unlock,
+ .wait_finish = vivid_lock,
+};
+
+/*
+ * Determine the 'picture' quality based on the current TV frequency: either
+ * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off
+ * signal or NOISE for no signal.
+ */
+void vivid_update_quality(struct vivid_dev *dev)
+{
+ unsigned freq_modulus;
+
+ if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) {
+ /*
+ * The 'noise' will only be replaced by the actual video
+ * if the output video matches the input video settings.
+ */
+ tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
+ return;
+ }
+ if (vivid_is_hdmi_cap(dev) && VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode)) {
+ tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
+ return;
+ }
+ if (vivid_is_sdtv_cap(dev) && VIVID_INVALID_SIGNAL(dev->std_signal_mode)) {
+ tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
+ return;
+ }
+ if (!vivid_is_tv_cap(dev)) {
+ tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
+ return;
+ }
+
+ /*
+ * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
+ * From +/- 0.25 MHz around the channel there is color, and from
+ * +/- 1 MHz there is grayscale (chroma is lost).
+ * Everywhere else it is just noise.
+ */
+ freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
+ if (freq_modulus > 2 * 16) {
+ tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE,
+ next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f);
+ return;
+ }
+ if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/)
+ tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0);
+ else
+ tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
+}
+
+/*
+ * Get the current picture quality and the associated afc value.
+ */
+static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc)
+{
+ unsigned freq_modulus;
+
+ if (afc)
+ *afc = 0;
+ if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR ||
+ tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE)
+ return tpg_g_quality(&dev->tpg);
+
+ /*
+ * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
+ * From +/- 0.25 MHz around the channel there is color, and from
+ * +/- 1 MHz there is grayscale (chroma is lost).
+ * Everywhere else it is just gray.
+ */
+ freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
+ if (afc)
+ *afc = freq_modulus - 1 * 16;
+ return TPG_QUAL_GRAY;
+}
+
+enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev)
+{
+ if (vivid_is_sdtv_cap(dev))
+ return dev->std_aspect_ratio;
+
+ if (vivid_is_hdmi_cap(dev))
+ return dev->dv_timings_aspect_ratio;
+
+ return TPG_VIDEO_ASPECT_IMAGE;
+}
+
+static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev)
+{
+ if (vivid_is_sdtv_cap(dev))
+ return (dev->std_cap & V4L2_STD_525_60) ?
+ TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
+
+ if (vivid_is_hdmi_cap(dev) &&
+ dev->src_rect.width == 720 && dev->src_rect.height <= 576)
+ return dev->src_rect.height == 480 ?
+ TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
+
+ return TPG_PIXEL_ASPECT_SQUARE;
+}
+
+/*
+ * Called whenever the format has to be reset which can occur when
+ * changing inputs, standard, timings, etc.
+ */
+void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
+{
+ struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt;
+ unsigned size;
+
+ switch (dev->input_type[dev->input]) {
+ case WEBCAM:
+ default:
+ dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width;
+ dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height;
+ dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx];
+ dev->field_cap = V4L2_FIELD_NONE;
+ tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
+ break;
+ case TV:
+ case SVID:
+ dev->field_cap = dev->tv_field_cap;
+ dev->src_rect.width = 720;
+ if (dev->std_cap & V4L2_STD_525_60) {
+ dev->src_rect.height = 480;
+ dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 };
+ dev->service_set_cap = V4L2_SLICED_CAPTION_525;
+ } else {
+ dev->src_rect.height = 576;
+ dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 };
+ dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
+ }
+ tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
+ break;
+ case HDMI:
+ dev->src_rect.width = bt->width;
+ dev->src_rect.height = bt->height;
+ size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt);
+ dev->timeperframe_vid_cap = (struct v4l2_fract) {
+ size / 100, (u32)bt->pixelclock / 100
+ };
+ if (bt->interlaced)
+ dev->field_cap = V4L2_FIELD_ALTERNATE;
+ else
+ dev->field_cap = V4L2_FIELD_NONE;
+
+ /*
+ * We can be called from within s_ctrl, in that case we can't
+ * set/get controls. Luckily we don't need to in that case.
+ */
+ if (keep_controls || !dev->colorspace)
+ break;
+ if (bt->standards & V4L2_DV_BT_STD_CEA861) {
+ if (bt->width == 720 && bt->height <= 576)
+ v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SMPTE170M);
+ else
+ v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_REC709);
+ v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1);
+ } else {
+ v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SRGB);
+ v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0);
+ }
+ tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
+ break;
+ }
+ vivid_update_quality(dev);
+ tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
+ dev->crop_cap = dev->src_rect;
+ dev->crop_bounds_cap = dev->src_rect;
+ dev->compose_cap = dev->crop_cap;
+ if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap))
+ dev->compose_cap.height /= 2;
+ dev->fmt_cap_rect = dev->compose_cap;
+ tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
+ tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev));
+ tpg_update_mv_step(&dev->tpg);
+}
+
+/* Map the field to something that is valid for the current input */
+static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field)
+{
+ if (vivid_is_sdtv_cap(dev)) {
+ switch (field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_ALTERNATE:
+ return field;
+ case V4L2_FIELD_INTERLACED:
+ default:
+ return V4L2_FIELD_INTERLACED;
+ }
+ }
+ if (vivid_is_hdmi_cap(dev))
+ return dev->dv_timings_cap.bt.interlaced ? V4L2_FIELD_ALTERNATE :
+ V4L2_FIELD_NONE;
+ return V4L2_FIELD_NONE;
+}
+
+static unsigned vivid_colorspace_cap(struct vivid_dev *dev)
+{
+ if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
+ return tpg_g_colorspace(&dev->tpg);
+ return dev->colorspace_out;
+}
+
+int vivid_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
+ unsigned p;
+
+ mp->width = dev->fmt_cap_rect.width;
+ mp->height = dev->fmt_cap_rect.height;
+ mp->field = dev->field_cap;
+ mp->pixelformat = dev->fmt_cap->fourcc;
+ mp->colorspace = vivid_colorspace_cap(dev);
+ mp->num_planes = dev->fmt_cap->planes;
+ for (p = 0; p < mp->num_planes; p++) {
+ mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p);
+ mp->plane_fmt[p].sizeimage =
+ mp->plane_fmt[p].bytesperline * mp->height +
+ dev->fmt_cap->data_offset[p];
+ }
+ return 0;
+}
+
+int vivid_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *pfmt = mp->plane_fmt;
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct vivid_fmt *fmt;
+ unsigned bytesperline, max_bpl;
+ unsigned factor = 1;
+ unsigned w, h;
+ unsigned p;
+
+ fmt = vivid_get_format(dev, mp->pixelformat);
+ if (!fmt) {
+ dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n",
+ mp->pixelformat);
+ mp->pixelformat = V4L2_PIX_FMT_YUYV;
+ fmt = vivid_get_format(dev, mp->pixelformat);
+ }
+
+ mp->field = vivid_field_cap(dev, mp->field);
+ if (vivid_is_webcam(dev)) {
+ const struct v4l2_frmsize_discrete *sz =
+ v4l2_find_nearest_format(&webcam_probe, mp->width, mp->height);
+
+ w = sz->width;
+ h = sz->height;
+ } else if (vivid_is_sdtv_cap(dev)) {
+ w = 720;
+ h = (dev->std_cap & V4L2_STD_525_60) ? 480 : 576;
+ } else {
+ w = dev->src_rect.width;
+ h = dev->src_rect.height;
+ }
+ if (V4L2_FIELD_HAS_T_OR_B(mp->field))
+ factor = 2;
+ if (vivid_is_webcam(dev) ||
+ (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) {
+ mp->width = w;
+ mp->height = h / factor;
+ } else {
+ struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor };
+
+ rect_set_min_size(&r, &vivid_min_rect);
+ rect_set_max_size(&r, &vivid_max_rect);
+ if (dev->has_scaler_cap && !dev->has_compose_cap) {
+ struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h };
+
+ rect_set_max_size(&r, &max_r);
+ } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) {
+ rect_set_max_size(&r, &dev->src_rect);
+ } else if (!dev->has_scaler_cap && !dev->has_crop_cap) {
+ rect_set_min_size(&r, &dev->src_rect);
+ }
+ mp->width = r.width;
+ mp->height = r.height / factor;
+ }
+
+ /* This driver supports custom bytesperline values */
+
+ /* Calculate the minimum supported bytesperline value */
+ bytesperline = (mp->width * fmt->depth) >> 3;
+ /* Calculate the maximum supported bytesperline value */
+ max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->depth) >> 3;
+ mp->num_planes = fmt->planes;
+ for (p = 0; p < mp->num_planes; p++) {
+ if (pfmt[p].bytesperline > max_bpl)
+ pfmt[p].bytesperline = max_bpl;
+ if (pfmt[p].bytesperline < bytesperline)
+ pfmt[p].bytesperline = bytesperline;
+ pfmt[p].sizeimage = pfmt[p].bytesperline * mp->height +
+ fmt->data_offset[p];
+ memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
+ }
+ mp->colorspace = vivid_colorspace_cap(dev);
+ memset(mp->reserved, 0, sizeof(mp->reserved));
+ return 0;
+}
+
+int vivid_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_rect *crop = &dev->crop_cap;
+ struct v4l2_rect *compose = &dev->compose_cap;
+ struct vb2_queue *q = &dev->vb_vid_cap_q;
+ int ret = vivid_try_fmt_vid_cap(file, priv, f);
+ unsigned factor = 1;
+ unsigned i;
+
+ if (ret < 0)
+ return ret;
+
+ if (vb2_is_busy(q)) {
+ dprintk(dev, 1, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ if (dev->overlay_cap_owner && dev->fb_cap.fmt.pixelformat != mp->pixelformat) {
+ dprintk(dev, 1, "overlay is active, can't change pixelformat\n");
+ return -EBUSY;
+ }
+
+ dev->fmt_cap = vivid_get_format(dev, mp->pixelformat);
+ if (V4L2_FIELD_HAS_T_OR_B(mp->field))
+ factor = 2;
+
+ /* Note: the webcam input doesn't support scaling, cropping or composing */
+
+ if (!vivid_is_webcam(dev) &&
+ (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) {
+ struct v4l2_rect r = { 0, 0, mp->width, mp->height };
+
+ if (dev->has_scaler_cap) {
+ if (dev->has_compose_cap)
+ rect_map_inside(compose, &r);
+ else
+ *compose = r;
+ if (dev->has_crop_cap && !dev->has_compose_cap) {
+ struct v4l2_rect min_r = {
+ 0, 0,
+ r.width / MAX_ZOOM,
+ factor * r.height / MAX_ZOOM
+ };
+ struct v4l2_rect max_r = {
+ 0, 0,
+ r.width * MAX_ZOOM,
+ factor * r.height * MAX_ZOOM
+ };
+
+ rect_set_min_size(crop, &min_r);
+ rect_set_max_size(crop, &max_r);
+ rect_map_inside(crop, &dev->crop_bounds_cap);
+ } else if (dev->has_crop_cap) {
+ struct v4l2_rect min_r = {
+ 0, 0,
+ compose->width / MAX_ZOOM,
+ factor * compose->height / MAX_ZOOM
+ };
+ struct v4l2_rect max_r = {
+ 0, 0,
+ compose->width * MAX_ZOOM,
+ factor * compose->height * MAX_ZOOM
+ };
+
+ rect_set_min_size(crop, &min_r);
+ rect_set_max_size(crop, &max_r);
+ rect_map_inside(crop, &dev->crop_bounds_cap);
+ }
+ } else if (dev->has_crop_cap && !dev->has_compose_cap) {
+ r.height *= factor;
+ rect_set_size_to(crop, &r);
+ rect_map_inside(crop, &dev->crop_bounds_cap);
+ r = *crop;
+ r.height /= factor;
+ rect_set_size_to(compose, &r);
+ } else if (!dev->has_crop_cap) {
+ rect_map_inside(compose, &r);
+ } else {
+ r.height *= factor;
+ rect_set_max_size(crop, &r);
+ rect_map_inside(crop, &dev->crop_bounds_cap);
+ compose->top *= factor;
+ compose->height *= factor;
+ rect_set_size_to(compose, crop);
+ rect_map_inside(compose, &r);
+ compose->top /= factor;
+ compose->height /= factor;
+ }
+ } else if (vivid_is_webcam(dev)) {
+ /* Guaranteed to be a match */
+ for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
+ if (webcam_sizes[i].width == mp->width &&
+ webcam_sizes[i].height == mp->height)
+ break;
+ dev->webcam_size_idx = i;
+ if (dev->webcam_ival_idx >= 2 * (3 - i))
+ dev->webcam_ival_idx = 2 * (3 - i) - 1;
+ vivid_update_format_cap(dev, false);
+ } else {
+ struct v4l2_rect r = { 0, 0, mp->width, mp->height };
+
+ rect_set_size_to(compose, &r);
+ r.height *= factor;
+ rect_set_size_to(crop, &r);
+ }
+
+ dev->fmt_cap_rect.width = mp->width;
+ dev->fmt_cap_rect.height = mp->height;
+ tpg_s_buf_height(&dev->tpg, mp->height);
+ tpg_s_bytesperline(&dev->tpg, 0, mp->plane_fmt[0].bytesperline);
+ if (tpg_g_planes(&dev->tpg) > 1)
+ tpg_s_bytesperline(&dev->tpg, 1, mp->plane_fmt[1].bytesperline);
+ dev->field_cap = mp->field;
+ tpg_s_field(&dev->tpg, dev->field_cap);
+ tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap);
+ tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
+ if (vivid_is_sdtv_cap(dev))
+ dev->tv_field_cap = mp->field;
+ tpg_update_mv_step(&dev->tpg);
+ return 0;
+}
+
+int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+ return vivid_g_fmt_vid_cap(file, priv, f);
+}
+
+int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+ return vivid_try_fmt_vid_cap(file, priv, f);
+}
+
+int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+ return vivid_s_fmt_vid_cap(file, priv, f);
+}
+
+int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+ return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap);
+}
+
+int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+ return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap);
+}
+
+int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+ return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap);
+}
+
+int vivid_vid_cap_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->has_crop_cap && !dev->has_compose_cap)
+ return -ENOTTY;
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (vivid_is_webcam(dev))
+ return -EINVAL;
+
+ sel->r.left = sel->r.top = 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (!dev->has_crop_cap)
+ return -EINVAL;
+ sel->r = dev->crop_cap;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (!dev->has_crop_cap)
+ return -EINVAL;
+ sel->r = dev->src_rect;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (!dev->has_compose_cap)
+ return -EINVAL;
+ sel->r = vivid_max_rect;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (!dev->has_compose_cap)
+ return -EINVAL;
+ sel->r = dev->compose_cap;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ if (!dev->has_compose_cap)
+ return -EINVAL;
+ sel->r = dev->fmt_cap_rect;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_rect *crop = &dev->crop_cap;
+ struct v4l2_rect *compose = &dev->compose_cap;
+ unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
+ int ret;
+
+ if (!dev->has_crop_cap && !dev->has_compose_cap)
+ return -ENOTTY;
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (vivid_is_webcam(dev))
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (!dev->has_crop_cap)
+ return -EINVAL;
+ ret = vivid_vid_adjust_sel(s->flags, &s->r);
+ if (ret)
+ return ret;
+ rect_set_min_size(&s->r, &vivid_min_rect);
+ rect_set_max_size(&s->r, &dev->src_rect);
+ rect_map_inside(&s->r, &dev->crop_bounds_cap);
+ s->r.top /= factor;
+ s->r.height /= factor;
+ if (dev->has_scaler_cap) {
+ struct v4l2_rect fmt = dev->fmt_cap_rect;
+ struct v4l2_rect max_rect = {
+ 0, 0,
+ s->r.width * MAX_ZOOM,
+ s->r.height * MAX_ZOOM
+ };
+ struct v4l2_rect min_rect = {
+ 0, 0,
+ s->r.width / MAX_ZOOM,
+ s->r.height / MAX_ZOOM
+ };
+
+ rect_set_min_size(&fmt, &min_rect);
+ if (!dev->has_compose_cap)
+ rect_set_max_size(&fmt, &max_rect);
+ if (!rect_same_size(&dev->fmt_cap_rect, &fmt) &&
+ vb2_is_busy(&dev->vb_vid_cap_q))
+ return -EBUSY;
+ if (dev->has_compose_cap) {
+ rect_set_min_size(compose, &min_rect);
+ rect_set_max_size(compose, &max_rect);
+ }
+ dev->fmt_cap_rect = fmt;
+ tpg_s_buf_height(&dev->tpg, fmt.height);
+ } else if (dev->has_compose_cap) {
+ struct v4l2_rect fmt = dev->fmt_cap_rect;
+
+ rect_set_min_size(&fmt, &s->r);
+ if (!rect_same_size(&dev->fmt_cap_rect, &fmt) &&
+ vb2_is_busy(&dev->vb_vid_cap_q))
+ return -EBUSY;
+ dev->fmt_cap_rect = fmt;
+ tpg_s_buf_height(&dev->tpg, fmt.height);
+ rect_set_size_to(compose, &s->r);
+ rect_map_inside(compose, &dev->fmt_cap_rect);
+ } else {
+ if (!rect_same_size(&s->r, &dev->fmt_cap_rect) &&
+ vb2_is_busy(&dev->vb_vid_cap_q))
+ return -EBUSY;
+ rect_set_size_to(&dev->fmt_cap_rect, &s->r);
+ rect_set_size_to(compose, &s->r);
+ rect_map_inside(compose, &dev->fmt_cap_rect);
+ tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height);
+ }
+ s->r.top *= factor;
+ s->r.height *= factor;
+ *crop = s->r;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (!dev->has_compose_cap)
+ return -EINVAL;
+ ret = vivid_vid_adjust_sel(s->flags, &s->r);
+ if (ret)
+ return ret;
+ rect_set_min_size(&s->r, &vivid_min_rect);
+ rect_set_max_size(&s->r, &dev->fmt_cap_rect);
+ if (dev->has_scaler_cap) {
+ struct v4l2_rect max_rect = {
+ 0, 0,
+ dev->src_rect.width * MAX_ZOOM,
+ (dev->src_rect.height / factor) * MAX_ZOOM
+ };
+
+ rect_set_max_size(&s->r, &max_rect);
+ if (dev->has_crop_cap) {
+ struct v4l2_rect min_rect = {
+ 0, 0,
+ s->r.width / MAX_ZOOM,
+ (s->r.height * factor) / MAX_ZOOM
+ };
+ struct v4l2_rect max_rect = {
+ 0, 0,
+ s->r.width * MAX_ZOOM,
+ (s->r.height * factor) * MAX_ZOOM
+ };
+
+ rect_set_min_size(crop, &min_rect);
+ rect_set_max_size(crop, &max_rect);
+ rect_map_inside(crop, &dev->crop_bounds_cap);
+ }
+ } else if (dev->has_crop_cap) {
+ s->r.top *= factor;
+ s->r.height *= factor;
+ rect_set_max_size(&s->r, &dev->src_rect);
+ rect_set_size_to(crop, &s->r);
+ rect_map_inside(crop, &dev->crop_bounds_cap);
+ s->r.top /= factor;
+ s->r.height /= factor;
+ } else {
+ rect_set_size_to(&s->r, &dev->src_rect);
+ s->r.height /= factor;
+ }
+ rect_map_inside(&s->r, &dev->fmt_cap_rect);
+ if (dev->bitmap_cap && (compose->width != s->r.width ||
+ compose->height != s->r.height)) {
+ kfree(dev->bitmap_cap);
+ dev->bitmap_cap = NULL;
+ }
+ *compose = s->r;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tpg_s_crop_compose(&dev->tpg, crop, compose);
+ return 0;
+}
+
+int vivid_vid_cap_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *cap)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (vivid_get_pixel_aspect(dev)) {
+ case TPG_PIXEL_ASPECT_NTSC:
+ cap->pixelaspect.numerator = 11;
+ cap->pixelaspect.denominator = 10;
+ break;
+ case TPG_PIXEL_ASPECT_PAL:
+ cap->pixelaspect.numerator = 54;
+ cap->pixelaspect.denominator = 59;
+ break;
+ case TPG_PIXEL_ASPECT_SQUARE:
+ cap->pixelaspect.numerator = 1;
+ cap->pixelaspect.denominator = 1;
+ break;
+ }
+ return 0;
+}
+
+int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ const struct vivid_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(formats_ovl))
+ return -EINVAL;
+
+ fmt = &formats_ovl[f->index];
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct v4l2_rect *compose = &dev->compose_cap;
+ struct v4l2_window *win = &f->fmt.win;
+ unsigned clipcount = win->clipcount;
+
+ win->w.top = dev->overlay_cap_top;
+ win->w.left = dev->overlay_cap_left;
+ win->w.width = compose->width;
+ win->w.height = compose->height;
+ win->field = dev->overlay_cap_field;
+ win->clipcount = dev->clipcount_cap;
+ if (clipcount > dev->clipcount_cap)
+ clipcount = dev->clipcount_cap;
+ if (dev->bitmap_cap == NULL)
+ win->bitmap = NULL;
+ else if (win->bitmap) {
+ if (copy_to_user(win->bitmap, dev->bitmap_cap,
+ ((compose->width + 7) / 8) * compose->height))
+ return -EFAULT;
+ }
+ if (clipcount && win->clips) {
+ if (copy_to_user(win->clips, dev->clips_cap,
+ clipcount * sizeof(dev->clips_cap[0])))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct v4l2_rect *compose = &dev->compose_cap;
+ struct v4l2_window *win = &f->fmt.win;
+ int i, j;
+
+ win->w.left = clamp_t(int, win->w.left,
+ -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
+ win->w.top = clamp_t(int, win->w.top,
+ -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
+ win->w.width = compose->width;
+ win->w.height = compose->height;
+ if (win->field != V4L2_FIELD_BOTTOM && win->field != V4L2_FIELD_TOP)
+ win->field = V4L2_FIELD_ANY;
+ win->chromakey = 0;
+ win->global_alpha = 0;
+ if (win->clipcount && !win->clips)
+ win->clipcount = 0;
+ if (win->clipcount > MAX_CLIPS)
+ win->clipcount = MAX_CLIPS;
+ if (win->clipcount) {
+ if (copy_from_user(dev->try_clips_cap, win->clips,
+ win->clipcount * sizeof(dev->clips_cap[0])))
+ return -EFAULT;
+ for (i = 0; i < win->clipcount; i++) {
+ struct v4l2_rect *r = &dev->try_clips_cap[i].c;
+
+ r->top = clamp_t(s32, r->top, 0, dev->fb_cap.fmt.height - 1);
+ r->height = clamp_t(s32, r->height, 1, dev->fb_cap.fmt.height - r->top);
+ r->left = clamp_t(u32, r->left, 0, dev->fb_cap.fmt.width - 1);
+ r->width = clamp_t(u32, r->width, 1, dev->fb_cap.fmt.width - r->left);
+ }
+ /*
+ * Yeah, so sue me, it's an O(n^2) algorithm. But n is a small
+ * number and it's typically a one-time deal.
+ */
+ for (i = 0; i < win->clipcount - 1; i++) {
+ struct v4l2_rect *r1 = &dev->try_clips_cap[i].c;
+
+ for (j = i + 1; j < win->clipcount; j++) {
+ struct v4l2_rect *r2 = &dev->try_clips_cap[j].c;
+
+ if (rect_overlap(r1, r2))
+ return -EINVAL;
+ }
+ }
+ if (copy_to_user(win->clips, dev->try_clips_cap,
+ win->clipcount * sizeof(dev->clips_cap[0])))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct v4l2_rect *compose = &dev->compose_cap;
+ struct v4l2_window *win = &f->fmt.win;
+ int ret = vidioc_try_fmt_vid_overlay(file, priv, f);
+ unsigned bitmap_size = ((compose->width + 7) / 8) * compose->height;
+ unsigned clips_size = win->clipcount * sizeof(dev->clips_cap[0]);
+ void *new_bitmap = NULL;
+
+ if (ret)
+ return ret;
+
+ if (win->bitmap) {
+ new_bitmap = vzalloc(bitmap_size);
+
+ if (new_bitmap == NULL)
+ return -ENOMEM;
+ if (copy_from_user(new_bitmap, win->bitmap, bitmap_size)) {
+ vfree(new_bitmap);
+ return -EFAULT;
+ }
+ }
+
+ dev->overlay_cap_top = win->w.top;
+ dev->overlay_cap_left = win->w.left;
+ dev->overlay_cap_field = win->field;
+ vfree(dev->bitmap_cap);
+ dev->bitmap_cap = new_bitmap;
+ dev->clipcount_cap = win->clipcount;
+ if (dev->clipcount_cap)
+ memcpy(dev->clips_cap, dev->try_clips_cap, clips_size);
+ return 0;
+}
+
+int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (i && dev->fb_vbase_cap == NULL)
+ return -EINVAL;
+
+ if (i && dev->fb_cap.fmt.pixelformat != dev->fmt_cap->fourcc) {
+ dprintk(dev, 1, "mismatch between overlay and video capture pixelformats\n");
+ return -EINVAL;
+ }
+
+ if (dev->overlay_cap_owner && dev->overlay_cap_owner != fh)
+ return -EBUSY;
+ dev->overlay_cap_owner = i ? fh : NULL;
+ return 0;
+}
+
+int vivid_vid_cap_g_fbuf(struct file *file, void *fh,
+ struct v4l2_framebuffer *a)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ *a = dev->fb_cap;
+ a->capability = V4L2_FBUF_CAP_BITMAP_CLIPPING |
+ V4L2_FBUF_CAP_LIST_CLIPPING;
+ a->flags = V4L2_FBUF_FLAG_PRIMARY;
+ a->fmt.field = V4L2_FIELD_NONE;
+ a->fmt.colorspace = V4L2_COLORSPACE_SRGB;
+ a->fmt.priv = 0;
+ return 0;
+}
+
+int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
+ const struct v4l2_framebuffer *a)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct vivid_fmt *fmt;
+
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ if (dev->overlay_cap_owner)
+ return -EBUSY;
+
+ if (a->base == NULL) {
+ dev->fb_cap.base = NULL;
+ dev->fb_vbase_cap = NULL;
+ return 0;
+ }
+
+ if (a->fmt.width < 48 || a->fmt.height < 32)
+ return -EINVAL;
+ fmt = vivid_get_format(dev, a->fmt.pixelformat);
+ if (!fmt || !fmt->can_do_overlay)
+ return -EINVAL;
+ if (a->fmt.bytesperline < (a->fmt.width * fmt->depth) / 8)
+ return -EINVAL;
+ if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
+ return -EINVAL;
+
+ dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base);
+ dev->fb_cap = *a;
+ dev->overlay_cap_left = clamp_t(int, dev->overlay_cap_left,
+ -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
+ dev->overlay_cap_top = clamp_t(int, dev->overlay_cap_top,
+ -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
+ return 0;
+}
+
+static const struct v4l2_audio vivid_audio_inputs[] = {
+ { 0, "TV", V4L2_AUDCAP_STEREO },
+ { 1, "Line-In", V4L2_AUDCAP_STEREO },
+};
+
+int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (inp->index >= dev->num_inputs)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ switch (dev->input_type[inp->index]) {
+ case WEBCAM:
+ snprintf(inp->name, sizeof(inp->name), "Webcam %u",
+ dev->input_name_counter[inp->index]);
+ inp->capabilities = 0;
+ break;
+ case TV:
+ snprintf(inp->name, sizeof(inp->name), "TV %u",
+ dev->input_name_counter[inp->index]);
+ inp->type = V4L2_INPUT_TYPE_TUNER;
+ inp->std = V4L2_STD_ALL;
+ if (dev->has_audio_inputs)
+ inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
+ inp->capabilities = V4L2_IN_CAP_STD;
+ break;
+ case SVID:
+ snprintf(inp->name, sizeof(inp->name), "S-Video %u",
+ dev->input_name_counter[inp->index]);
+ inp->std = V4L2_STD_ALL;
+ if (dev->has_audio_inputs)
+ inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
+ inp->capabilities = V4L2_IN_CAP_STD;
+ break;
+ case HDMI:
+ snprintf(inp->name, sizeof(inp->name), "HDMI %u",
+ dev->input_name_counter[inp->index]);
+ inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
+ if (dev->edid_blocks == 0 ||
+ dev->dv_timings_signal_mode == NO_SIGNAL)
+ inp->status |= V4L2_IN_ST_NO_SIGNAL;
+ else if (dev->dv_timings_signal_mode == NO_LOCK ||
+ dev->dv_timings_signal_mode == OUT_OF_RANGE)
+ inp->status |= V4L2_IN_ST_NO_H_LOCK;
+ break;
+ }
+ if (dev->sensor_hflip)
+ inp->status |= V4L2_IN_ST_HFLIP;
+ if (dev->sensor_vflip)
+ inp->status |= V4L2_IN_ST_VFLIP;
+ if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) {
+ if (dev->std_signal_mode == NO_SIGNAL) {
+ inp->status |= V4L2_IN_ST_NO_SIGNAL;
+ } else if (dev->std_signal_mode == NO_LOCK) {
+ inp->status |= V4L2_IN_ST_NO_H_LOCK;
+ } else if (vivid_is_tv_cap(dev)) {
+ switch (tpg_g_quality(&dev->tpg)) {
+ case TPG_QUAL_GRAY:
+ inp->status |= V4L2_IN_ST_COLOR_KILL;
+ break;
+ case TPG_QUAL_NOISE:
+ inp->status |= V4L2_IN_ST_NO_H_LOCK;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+int vidioc_g_input(struct file *file, void *priv, unsigned *i)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ *i = dev->input;
+ return 0;
+}
+
+int vidioc_s_input(struct file *file, void *priv, unsigned i)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt;
+ unsigned brightness;
+
+ if (i >= dev->num_inputs)
+ return -EINVAL;
+
+ if (i == dev->input)
+ return 0;
+
+ if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
+ return -EBUSY;
+
+ dev->input = i;
+ dev->vid_cap_dev.tvnorms = 0;
+ if (dev->input_type[i] == TV || dev->input_type[i] == SVID) {
+ dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1;
+ dev->vid_cap_dev.tvnorms = V4L2_STD_ALL;
+ }
+ dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
+ vivid_update_format_cap(dev, false);
+
+ if (dev->colorspace) {
+ switch (dev->input_type[i]) {
+ case WEBCAM:
+ v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SRGB);
+ break;
+ case TV:
+ case SVID:
+ v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SMPTE170M);
+ break;
+ case HDMI:
+ if (bt->standards & V4L2_DV_BT_STD_CEA861) {
+ if (dev->src_rect.width == 720 && dev->src_rect.height <= 576)
+ v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SMPTE170M);
+ else
+ v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_REC709);
+ } else {
+ v4l2_ctrl_s_ctrl(dev->colorspace, V4L2_COLORSPACE_SRGB);
+ }
+ break;
+ }
+ }
+
+ /*
+ * Modify the brightness range depending on the input.
+ * This makes it easy to use vivid to test if applications can
+ * handle control range modifications and is also how this is
+ * typically used in practice as different inputs may be hooked
+ * up to different receivers with different control ranges.
+ */
+ brightness = 128 * i + dev->input_brightness[i];
+ v4l2_ctrl_modify_range(dev->brightness,
+ 128 * i, 255 + 128 * i, 1, 128 + 128 * i);
+ v4l2_ctrl_s_ctrl(dev->brightness, brightness);
+ return 0;
+}
+
+int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
+{
+ if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
+ return -EINVAL;
+ *vin = vivid_audio_inputs[vin->index];
+ return 0;
+}
+
+int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_sdtv_cap(dev))
+ return -EINVAL;
+ *vin = vivid_audio_inputs[dev->tv_audio_input];
+ return 0;
+}
+
+int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_sdtv_cap(dev))
+ return -EINVAL;
+ if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
+ return -EINVAL;
+ dev->tv_audio_input = vin->index;
+ return 0;
+}
+
+int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (vf->tuner != 0)
+ return -EINVAL;
+ vf->frequency = dev->tv_freq;
+ return 0;
+}
+
+int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (vf->tuner != 0)
+ return -EINVAL;
+ dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ);
+ if (vivid_is_tv_cap(dev))
+ vivid_update_quality(dev);
+ return 0;
+}
+
+int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (vt->index != 0)
+ return -EINVAL;
+ if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2)
+ return -EINVAL;
+ dev->tv_audmode = vt->audmode;
+ return 0;
+}
+
+int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ enum tpg_quality qual;
+
+ if (vt->index != 0)
+ return -EINVAL;
+
+ vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
+ vt->audmode = dev->tv_audmode;
+ vt->rangelow = MIN_TV_FREQ;
+ vt->rangehigh = MAX_TV_FREQ;
+ qual = vivid_get_quality(dev, &vt->afc);
+ if (qual == TPG_QUAL_COLOR)
+ vt->signal = 0xffff;
+ else if (qual == TPG_QUAL_GRAY)
+ vt->signal = 0x8000;
+ else
+ vt->signal = 0;
+ if (qual == TPG_QUAL_NOISE) {
+ vt->rxsubchans = 0;
+ } else if (qual == TPG_QUAL_GRAY) {
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO;
+ } else {
+ unsigned channel_nr = dev->tv_freq / (6 * 16);
+ unsigned options = (dev->std_cap & V4L2_STD_NTSC_M) ? 4 : 3;
+
+ switch (channel_nr % options) {
+ case 0:
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO;
+ break;
+ case 1:
+ vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ break;
+ case 2:
+ if (dev->std_cap & V4L2_STD_NTSC_M)
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP;
+ else
+ vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
+ break;
+ case 3:
+ vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP;
+ break;
+ }
+ }
+ strlcpy(vt->name, "TV Tuner", sizeof(vt->name));
+ return 0;
+}
+
+/* Must remain in sync with the vivid_ctrl_standard_strings array */
+const v4l2_std_id vivid_standard[] = {
+ V4L2_STD_NTSC_M,
+ V4L2_STD_NTSC_M_JP,
+ V4L2_STD_NTSC_M_KR,
+ V4L2_STD_NTSC_443,
+ V4L2_STD_PAL_BG | V4L2_STD_PAL_H,
+ V4L2_STD_PAL_I,
+ V4L2_STD_PAL_DK,
+ V4L2_STD_PAL_M,
+ V4L2_STD_PAL_N,
+ V4L2_STD_PAL_Nc,
+ V4L2_STD_PAL_60,
+ V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H,
+ V4L2_STD_SECAM_DK,
+ V4L2_STD_SECAM_L,
+ V4L2_STD_SECAM_LC,
+ V4L2_STD_UNKNOWN
+};
+
+/* Must remain in sync with the vivid_standard array */
+const char * const vivid_ctrl_standard_strings[] = {
+ "NTSC-M",
+ "NTSC-M-JP",
+ "NTSC-M-KR",
+ "NTSC-443",
+ "PAL-BGH",
+ "PAL-I",
+ "PAL-DK",
+ "PAL-M",
+ "PAL-N",
+ "PAL-Nc",
+ "PAL-60",
+ "SECAM-BGH",
+ "SECAM-DK",
+ "SECAM-L",
+ "SECAM-Lc",
+ NULL,
+};
+
+int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_sdtv_cap(dev))
+ return -ENODATA;
+ if (dev->std_signal_mode == NO_SIGNAL ||
+ dev->std_signal_mode == NO_LOCK) {
+ *id = V4L2_STD_UNKNOWN;
+ return 0;
+ }
+ if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) {
+ *id = V4L2_STD_UNKNOWN;
+ } else if (dev->std_signal_mode == CURRENT_STD) {
+ *id = dev->std_cap;
+ } else if (dev->std_signal_mode == SELECTED_STD) {
+ *id = dev->query_std;
+ } else {
+ *id = vivid_standard[dev->query_std_last];
+ dev->query_std_last = (dev->query_std_last + 1) % ARRAY_SIZE(vivid_standard);
+ }
+
+ return 0;
+}
+
+int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_sdtv_cap(dev))
+ return -ENODATA;
+ if (dev->std_cap == id)
+ return 0;
+ if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
+ return -EBUSY;
+ dev->std_cap = id;
+ vivid_update_format_cap(dev, false);
+ return 0;
+}
+
+int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_hdmi_cap(dev))
+ return -ENODATA;
+ if (vb2_is_busy(&dev->vb_vid_cap_q))
+ return -EBUSY;
+ if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap,
+ 0, NULL, NULL))
+ return -EINVAL;
+ if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap, 0))
+ return 0;
+ dev->dv_timings_cap = *timings;
+ vivid_update_format_cap(dev, false);
+ return 0;
+}
+
+int vidioc_query_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_hdmi_cap(dev))
+ return -ENODATA;
+ if (dev->dv_timings_signal_mode == NO_SIGNAL ||
+ dev->edid_blocks == 0)
+ return -ENOLINK;
+ if (dev->dv_timings_signal_mode == NO_LOCK)
+ return -ENOLCK;
+ if (dev->dv_timings_signal_mode == OUT_OF_RANGE) {
+ timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2;
+ return -ERANGE;
+ }
+ if (dev->dv_timings_signal_mode == CURRENT_DV_TIMINGS) {
+ *timings = dev->dv_timings_cap;
+ } else if (dev->dv_timings_signal_mode == SELECTED_DV_TIMINGS) {
+ *timings = v4l2_dv_timings_presets[dev->query_dv_timings];
+ } else {
+ *timings = v4l2_dv_timings_presets[dev->query_dv_timings_last];
+ dev->query_dv_timings_last = (dev->query_dv_timings_last + 1) %
+ dev->query_dv_timings_size;
+ }
+ return 0;
+}
+
+int vidioc_s_edid(struct file *file, void *_fh,
+ struct v4l2_edid *edid)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+ if (edid->pad >= dev->num_inputs)
+ return -EINVAL;
+ if (dev->input_type[edid->pad] != HDMI || edid->start_block)
+ return -EINVAL;
+ if (edid->blocks == 0) {
+ dev->edid_blocks = 0;
+ return 0;
+ }
+ if (edid->blocks > dev->edid_max_blocks) {
+ edid->blocks = dev->edid_max_blocks;
+ return -E2BIG;
+ }
+ dev->edid_blocks = edid->blocks;
+ memcpy(dev->edid, edid->edid, edid->blocks * 128);
+ return 0;
+}
+
+int vidioc_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_webcam(dev) && !dev->has_scaler_cap)
+ return -EINVAL;
+ if (vivid_get_format(dev, fsize->pixel_format) == NULL)
+ return -EINVAL;
+ if (vivid_is_webcam(dev)) {
+ if (fsize->index >= ARRAY_SIZE(webcam_sizes))
+ return -EINVAL;
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete = webcam_sizes[fsize->index];
+ return 0;
+ }
+ if (fsize->index)
+ return -EINVAL;
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = MIN_WIDTH;
+ fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM;
+ fsize->stepwise.step_width = 2;
+ fsize->stepwise.min_height = MIN_HEIGHT;
+ fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM;
+ fsize->stepwise.step_height = 2;
+ return 0;
+}
+
+/* timeperframe is arbitrary and continuous */
+int vidioc_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fival)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct vivid_fmt *fmt;
+ int i;
+
+ fmt = vivid_get_format(dev, fival->pixel_format);
+ if (!fmt)
+ return -EINVAL;
+
+ if (!vivid_is_webcam(dev)) {
+ static const struct v4l2_fract step = { 1, 1 };
+
+ if (fival->index)
+ return -EINVAL;
+ if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM)
+ return -EINVAL;
+ if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM)
+ return -EINVAL;
+ fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
+ fival->stepwise.min = tpf_min;
+ fival->stepwise.max = tpf_max;
+ fival->stepwise.step = step;
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
+ if (fival->width == webcam_sizes[i].width &&
+ fival->height == webcam_sizes[i].height)
+ break;
+ if (i == ARRAY_SIZE(webcam_sizes))
+ return -EINVAL;
+ if (fival->index >= 2 * (3 - i))
+ return -EINVAL;
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = webcam_intervals[fival->index];
+ return 0;
+}
+
+int vivid_vid_cap_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (parm->type != (dev->multiplanar ?
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE))
+ return -EINVAL;
+
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.capture.timeperframe = dev->timeperframe_vid_cap;
+ parm->parm.capture.readbuffers = 1;
+ return 0;
+}
+
+#define FRACT_CMP(a, OP, b) \
+ ((u64)(a).numerator * (b).denominator OP (u64)(b).numerator * (a).denominator)
+
+int vivid_vid_cap_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ unsigned ival_sz = 2 * (3 - dev->webcam_size_idx);
+ struct v4l2_fract tpf;
+ unsigned i;
+
+ if (parm->type != (dev->multiplanar ?
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE))
+ return -EINVAL;
+ if (!vivid_is_webcam(dev))
+ return vivid_vid_cap_g_parm(file, priv, parm);
+
+ tpf = parm->parm.capture.timeperframe;
+
+ if (tpf.denominator == 0)
+ tpf = webcam_intervals[ival_sz - 1];
+ for (i = 0; i < ival_sz; i++)
+ if (FRACT_CMP(tpf, >=, webcam_intervals[i]))
+ break;
+ if (i == ival_sz)
+ i = ival_sz - 1;
+ dev->webcam_ival_idx = i;
+ tpf = webcam_intervals[dev->webcam_ival_idx];
+ tpf = FRACT_CMP(tpf, <, tpf_min) ? tpf_min : tpf;
+ tpf = FRACT_CMP(tpf, >, tpf_max) ? tpf_max : tpf;
+
+ /* resync the thread's timings */
+ dev->cap_seq_resync = true;
+ dev->timeperframe_vid_cap = tpf;
+ parm->parm.capture.timeperframe = tpf;
+ parm->parm.capture.readbuffers = 1;
+ return 0;
+}
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.h b/drivers/media/platform/vivid/vivid-vid-cap.h
new file mode 100644
index 000000000000..94079815dbc2
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vid-cap.h
@@ -0,0 +1,71 @@
+/*
+ * vivid-vid-cap.h - video capture support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_VID_CAP_H_
+#define _VIVID_VID_CAP_H_
+
+void vivid_update_quality(struct vivid_dev *dev);
+void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls);
+enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev);
+
+extern const v4l2_std_id vivid_standard[];
+extern const char * const vivid_ctrl_standard_strings[];
+
+extern const struct vb2_ops vivid_vid_cap_qops;
+
+int vivid_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_vid_cap_g_selection(struct file *file, void *priv, struct v4l2_selection *sel);
+int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s);
+int vivid_vid_cap_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cap);
+int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_fmtdesc *f);
+int vidioc_g_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_try_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_s_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i);
+int vivid_vid_cap_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a);
+int vivid_vid_cap_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a);
+int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *inp);
+int vidioc_g_input(struct file *file, void *priv, unsigned *i);
+int vidioc_s_input(struct file *file, void *priv, unsigned i);
+int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin);
+int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin);
+int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin);
+int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
+int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf);
+int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt);
+int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt);
+int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id);
+int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id);
+int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
+int vidioc_query_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
+int vidioc_s_edid(struct file *file, void *_fh, struct v4l2_edid *edid);
+int vidioc_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize);
+int vidioc_enum_frameintervals(struct file *file, void *priv, struct v4l2_frmivalenum *fival);
+int vivid_vid_cap_g_parm(struct file *file, void *priv, struct v4l2_streamparm *parm);
+int vivid_vid_cap_s_parm(struct file *file, void *priv, struct v4l2_streamparm *parm);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
new file mode 100644
index 000000000000..16cd6d2d2ed6
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -0,0 +1,571 @@
+/*
+ * vivid-vid-common.c - common video support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-dv-timings.h>
+
+#include "vivid-core.h"
+#include "vivid-vid-common.h"
+
+const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+ V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 25000000, 600000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT,
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
+};
+
+/* ------------------------------------------------------------------
+ Basic structures
+ ------------------------------------------------------------------*/
+
+struct vivid_fmt vivid_formats[] = {
+ {
+ .name = "4:2:2, packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ .is_yuv = true,
+ .planes = 1,
+ .data_offset = { PLANE0_DATA_OFFSET, 0 },
+ },
+ {
+ .name = "4:2:2, packed, UYVY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = 16,
+ .is_yuv = true,
+ .planes = 1,
+ },
+ {
+ .name = "4:2:2, packed, YVYU",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .depth = 16,
+ .is_yuv = true,
+ .planes = 1,
+ },
+ {
+ .name = "4:2:2, packed, VYUY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .depth = 16,
+ .is_yuv = true,
+ .planes = 1,
+ },
+ {
+ .name = "RGB565 (LE)",
+ .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
+ .depth = 16,
+ .planes = 1,
+ .can_do_overlay = true,
+ },
+ {
+ .name = "RGB565 (BE)",
+ .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+ .depth = 16,
+ .planes = 1,
+ .can_do_overlay = true,
+ },
+ {
+ .name = "RGB555 (LE)",
+ .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
+ .depth = 16,
+ .planes = 1,
+ .can_do_overlay = true,
+ },
+ {
+ .name = "XRGB555 (LE)",
+ .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */
+ .depth = 16,
+ .planes = 1,
+ .can_do_overlay = true,
+ },
+ {
+ .name = "ARGB555 (LE)",
+ .fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */
+ .depth = 16,
+ .planes = 1,
+ .can_do_overlay = true,
+ .alpha_mask = 0x8000,
+ },
+ {
+ .name = "RGB555 (BE)",
+ .fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
+ .depth = 16,
+ .planes = 1,
+ .can_do_overlay = true,
+ },
+ {
+ .name = "RGB24 (LE)",
+ .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
+ .depth = 24,
+ .planes = 1,
+ },
+ {
+ .name = "RGB24 (BE)",
+ .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
+ .depth = 24,
+ .planes = 1,
+ },
+ {
+ .name = "RGB32 (LE)",
+ .fourcc = V4L2_PIX_FMT_RGB32, /* argb */
+ .depth = 32,
+ .planes = 1,
+ },
+ {
+ .name = "RGB32 (BE)",
+ .fourcc = V4L2_PIX_FMT_BGR32, /* bgra */
+ .depth = 32,
+ .planes = 1,
+ },
+ {
+ .name = "XRGB32 (LE)",
+ .fourcc = V4L2_PIX_FMT_XRGB32, /* argb */
+ .depth = 32,
+ .planes = 1,
+ },
+ {
+ .name = "XRGB32 (BE)",
+ .fourcc = V4L2_PIX_FMT_XBGR32, /* bgra */
+ .depth = 32,
+ .planes = 1,
+ },
+ {
+ .name = "ARGB32 (LE)",
+ .fourcc = V4L2_PIX_FMT_ARGB32, /* argb */
+ .depth = 32,
+ .planes = 1,
+ .alpha_mask = 0x000000ff,
+ },
+ {
+ .name = "ARGB32 (BE)",
+ .fourcc = V4L2_PIX_FMT_ABGR32, /* bgra */
+ .depth = 32,
+ .planes = 1,
+ .alpha_mask = 0xff000000,
+ },
+ {
+ .name = "4:2:2, planar, YUV",
+ .fourcc = V4L2_PIX_FMT_NV16M,
+ .depth = 8,
+ .is_yuv = true,
+ .planes = 2,
+ .data_offset = { PLANE0_DATA_OFFSET, 0 },
+ },
+ {
+ .name = "4:2:2, planar, YVU",
+ .fourcc = V4L2_PIX_FMT_NV61M,
+ .depth = 8,
+ .is_yuv = true,
+ .planes = 2,
+ .data_offset = { 0, PLANE0_DATA_OFFSET },
+ },
+};
+
+/* There are 2 multiplanar formats in the list */
+#define VIVID_MPLANAR_FORMATS 2
+
+const struct vivid_fmt *vivid_get_format(struct vivid_dev *dev, u32 pixelformat)
+{
+ const struct vivid_fmt *fmt;
+ unsigned k;
+
+ for (k = 0; k < ARRAY_SIZE(vivid_formats); k++) {
+ fmt = &vivid_formats[k];
+ if (fmt->fourcc == pixelformat)
+ if (fmt->planes == 1 || dev->multiplanar)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+bool vivid_vid_can_loop(struct vivid_dev *dev)
+{
+ if (dev->src_rect.width != dev->sink_rect.width ||
+ dev->src_rect.height != dev->sink_rect.height)
+ return false;
+ if (dev->fmt_cap->fourcc != dev->fmt_out->fourcc)
+ return false;
+ if (dev->field_cap != dev->field_out)
+ return false;
+ if (vivid_is_svid_cap(dev) && vivid_is_svid_out(dev)) {
+ if (!(dev->std_cap & V4L2_STD_525_60) !=
+ !(dev->std_out & V4L2_STD_525_60))
+ return false;
+ return true;
+ }
+ if (vivid_is_hdmi_cap(dev) && vivid_is_hdmi_out(dev))
+ return true;
+ return false;
+}
+
+void vivid_send_source_change(struct vivid_dev *dev, unsigned type)
+{
+ struct v4l2_event ev = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+ unsigned i;
+
+ for (i = 0; i < dev->num_inputs; i++) {
+ ev.id = i;
+ if (dev->input_type[i] == type) {
+ if (video_is_registered(&dev->vid_cap_dev) && dev->has_vid_cap)
+ v4l2_event_queue(&dev->vid_cap_dev, &ev);
+ if (video_is_registered(&dev->vbi_cap_dev) && dev->has_vbi_cap)
+ v4l2_event_queue(&dev->vbi_cap_dev, &ev);
+ }
+ }
+}
+
+/*
+ * Conversion function that converts a single-planar format to a
+ * single-plane multiplanar format.
+ */
+void fmt_sp2mp(const struct v4l2_format *sp_fmt, struct v4l2_format *mp_fmt)
+{
+ struct v4l2_pix_format_mplane *mp = &mp_fmt->fmt.pix_mp;
+ struct v4l2_plane_pix_format *ppix = &mp->plane_fmt[0];
+ const struct v4l2_pix_format *pix = &sp_fmt->fmt.pix;
+ bool is_out = sp_fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ memset(mp->reserved, 0, sizeof(mp->reserved));
+ mp_fmt->type = is_out ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+ mp->width = pix->width;
+ mp->height = pix->height;
+ mp->pixelformat = pix->pixelformat;
+ mp->field = pix->field;
+ mp->colorspace = pix->colorspace;
+ mp->num_planes = 1;
+ mp->flags = pix->flags;
+ ppix->sizeimage = pix->sizeimage;
+ ppix->bytesperline = pix->bytesperline;
+ memset(ppix->reserved, 0, sizeof(ppix->reserved));
+}
+
+int fmt_sp2mp_func(struct file *file, void *priv,
+ struct v4l2_format *f, fmtfunc func)
+{
+ struct v4l2_format fmt;
+ struct v4l2_pix_format_mplane *mp = &fmt.fmt.pix_mp;
+ struct v4l2_plane_pix_format *ppix = &mp->plane_fmt[0];
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ int ret;
+
+ /* Converts to a mplane format */
+ fmt_sp2mp(f, &fmt);
+ /* Passes it to the generic mplane format function */
+ ret = func(file, priv, &fmt);
+ /* Copies back the mplane data to the single plane format */
+ pix->width = mp->width;
+ pix->height = mp->height;
+ pix->pixelformat = mp->pixelformat;
+ pix->field = mp->field;
+ pix->colorspace = mp->colorspace;
+ pix->sizeimage = ppix->sizeimage;
+ pix->bytesperline = ppix->bytesperline;
+ pix->flags = mp->flags;
+ return ret;
+}
+
+/* v4l2_rect helper function: copy the width/height values */
+void rect_set_size_to(struct v4l2_rect *r, const struct v4l2_rect *size)
+{
+ r->width = size->width;
+ r->height = size->height;
+}
+
+/* v4l2_rect helper function: width and height of r should be >= min_size */
+void rect_set_min_size(struct v4l2_rect *r, const struct v4l2_rect *min_size)
+{
+ if (r->width < min_size->width)
+ r->width = min_size->width;
+ if (r->height < min_size->height)
+ r->height = min_size->height;
+}
+
+/* v4l2_rect helper function: width and height of r should be <= max_size */
+void rect_set_max_size(struct v4l2_rect *r, const struct v4l2_rect *max_size)
+{
+ if (r->width > max_size->width)
+ r->width = max_size->width;
+ if (r->height > max_size->height)
+ r->height = max_size->height;
+}
+
+/* v4l2_rect helper function: r should be inside boundary */
+void rect_map_inside(struct v4l2_rect *r, const struct v4l2_rect *boundary)
+{
+ rect_set_max_size(r, boundary);
+ if (r->left < boundary->left)
+ r->left = boundary->left;
+ if (r->top < boundary->top)
+ r->top = boundary->top;
+ if (r->left + r->width > boundary->width)
+ r->left = boundary->width - r->width;
+ if (r->top + r->height > boundary->height)
+ r->top = boundary->height - r->height;
+}
+
+/* v4l2_rect helper function: return true if r1 has the same size as r2 */
+bool rect_same_size(const struct v4l2_rect *r1, const struct v4l2_rect *r2)
+{
+ return r1->width == r2->width && r1->height == r2->height;
+}
+
+/* v4l2_rect helper function: calculate the intersection of two rects */
+struct v4l2_rect rect_intersect(const struct v4l2_rect *a, const struct v4l2_rect *b)
+{
+ struct v4l2_rect r;
+ int right, bottom;
+
+ r.top = max(a->top, b->top);
+ r.left = max(a->left, b->left);
+ bottom = min(a->top + a->height, b->top + b->height);
+ right = min(a->left + a->width, b->left + b->width);
+ r.height = max(0, bottom - r.top);
+ r.width = max(0, right - r.left);
+ return r;
+}
+
+/*
+ * v4l2_rect helper function: scale rect r by to->width / from->width and
+ * to->height / from->height.
+ */
+void rect_scale(struct v4l2_rect *r, const struct v4l2_rect *from,
+ const struct v4l2_rect *to)
+{
+ if (from->width == 0 || from->height == 0) {
+ r->left = r->top = r->width = r->height = 0;
+ return;
+ }
+ r->left = (((r->left - from->left) * to->width) / from->width) & ~1;
+ r->width = ((r->width * to->width) / from->width) & ~1;
+ r->top = ((r->top - from->top) * to->height) / from->height;
+ r->height = (r->height * to->height) / from->height;
+}
+
+bool rect_overlap(const struct v4l2_rect *r1, const struct v4l2_rect *r2)
+{
+ /*
+ * IF the left side of r1 is to the right of the right side of r2 OR
+ * the left side of r2 is to the right of the right side of r1 THEN
+ * they do not overlap.
+ */
+ if (r1->left >= r2->left + r2->width ||
+ r2->left >= r1->left + r1->width)
+ return false;
+ /*
+ * IF the top side of r1 is below the bottom of r2 OR
+ * the top side of r2 is below the bottom of r1 THEN
+ * they do not overlap.
+ */
+ if (r1->top >= r2->top + r2->height ||
+ r2->top >= r1->top + r1->height)
+ return false;
+ return true;
+}
+int vivid_vid_adjust_sel(unsigned flags, struct v4l2_rect *r)
+{
+ unsigned w = r->width;
+ unsigned h = r->height;
+
+ if (!(flags & V4L2_SEL_FLAG_LE)) {
+ w++;
+ h++;
+ if (w < 2)
+ w = 2;
+ if (h < 2)
+ h = 2;
+ }
+ if (!(flags & V4L2_SEL_FLAG_GE)) {
+ if (w > MAX_WIDTH)
+ w = MAX_WIDTH;
+ if (h > MAX_HEIGHT)
+ h = MAX_HEIGHT;
+ }
+ w = w & ~1;
+ h = h & ~1;
+ if (w < 2 || h < 2)
+ return -ERANGE;
+ if (w > MAX_WIDTH || h > MAX_HEIGHT)
+ return -ERANGE;
+ if (r->top < 0)
+ r->top = 0;
+ if (r->left < 0)
+ r->left = 0;
+ r->left &= ~1;
+ r->top &= ~1;
+ if (r->left + w > MAX_WIDTH)
+ r->left = MAX_WIDTH - w;
+ if (r->top + h > MAX_HEIGHT)
+ r->top = MAX_HEIGHT - h;
+ if ((flags & (V4L2_SEL_FLAG_GE | V4L2_SEL_FLAG_LE)) ==
+ (V4L2_SEL_FLAG_GE | V4L2_SEL_FLAG_LE) &&
+ (r->width != w || r->height != h))
+ return -ERANGE;
+ r->width = w;
+ r->height = h;
+ return 0;
+}
+
+int vivid_enum_fmt_vid(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct vivid_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(vivid_formats) -
+ (dev->multiplanar ? 0 : VIVID_MPLANAR_FORMATS))
+ return -EINVAL;
+
+ fmt = &vivid_formats[f->index];
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+int vidioc_enum_fmt_vid_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+ return vivid_enum_fmt_vid(file, priv, f);
+}
+
+int vidioc_enum_fmt_vid(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+ return vivid_enum_fmt_vid(file, priv, f);
+}
+
+int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX) {
+ if (!vivid_is_sdtv_cap(dev))
+ return -ENODATA;
+ *id = dev->std_cap;
+ } else {
+ if (!vivid_is_svid_out(dev))
+ return -ENODATA;
+ *id = dev->std_out;
+ }
+ return 0;
+}
+
+int vidioc_g_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX) {
+ if (!vivid_is_hdmi_cap(dev))
+ return -ENODATA;
+ *timings = dev->dv_timings_cap;
+ } else {
+ if (!vivid_is_hdmi_out(dev))
+ return -ENODATA;
+ *timings = dev->dv_timings_out;
+ }
+ return 0;
+}
+
+int vidioc_enum_dv_timings(struct file *file, void *_fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX) {
+ if (!vivid_is_hdmi_cap(dev))
+ return -ENODATA;
+ } else {
+ if (!vivid_is_hdmi_out(dev))
+ return -ENODATA;
+ }
+ return v4l2_enum_dv_timings_cap(timings, &vivid_dv_timings_cap,
+ NULL, NULL);
+}
+
+int vidioc_dv_timings_cap(struct file *file, void *_fh,
+ struct v4l2_dv_timings_cap *cap)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ if (vdev->vfl_dir == VFL_DIR_RX) {
+ if (!vivid_is_hdmi_cap(dev))
+ return -ENODATA;
+ } else {
+ if (!vivid_is_hdmi_out(dev))
+ return -ENODATA;
+ }
+ *cap = vivid_dv_timings_cap;
+ return 0;
+}
+
+int vidioc_g_edid(struct file *file, void *_fh,
+ struct v4l2_edid *edid)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+ if (vdev->vfl_dir == VFL_DIR_RX) {
+ if (edid->pad >= dev->num_inputs)
+ return -EINVAL;
+ if (dev->input_type[edid->pad] != HDMI)
+ return -EINVAL;
+ } else {
+ if (edid->pad >= dev->num_outputs)
+ return -EINVAL;
+ if (dev->output_type[edid->pad] != HDMI)
+ return -EINVAL;
+ }
+ if (edid->start_block == 0 && edid->blocks == 0) {
+ edid->blocks = dev->edid_blocks;
+ return 0;
+ }
+ if (dev->edid_blocks == 0)
+ return -ENODATA;
+ if (edid->start_block >= dev->edid_blocks)
+ return -EINVAL;
+ if (edid->start_block + edid->blocks > dev->edid_blocks)
+ edid->blocks = dev->edid_blocks - edid->start_block;
+ memcpy(edid->edid, dev->edid, edid->blocks * 128);
+ return 0;
+}
diff --git a/drivers/media/platform/vivid/vivid-vid-common.h b/drivers/media/platform/vivid/vivid-vid-common.h
new file mode 100644
index 000000000000..3ec4fa85c9b9
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vid-common.h
@@ -0,0 +1,61 @@
+/*
+ * vivid-vid-common.h - common video support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_VID_COMMON_H_
+#define _VIVID_VID_COMMON_H_
+
+typedef int (*fmtfunc)(struct file *file, void *priv, struct v4l2_format *f);
+
+/*
+ * Conversion function that converts a single-planar format to a
+ * single-plane multiplanar format.
+ */
+void fmt_sp2mp(const struct v4l2_format *sp_fmt, struct v4l2_format *mp_fmt);
+int fmt_sp2mp_func(struct file *file, void *priv,
+ struct v4l2_format *f, fmtfunc func);
+
+extern const struct v4l2_dv_timings_cap vivid_dv_timings_cap;
+
+const struct vivid_fmt *vivid_get_format(struct vivid_dev *dev, u32 pixelformat);
+
+bool vivid_vid_can_loop(struct vivid_dev *dev);
+void vivid_send_source_change(struct vivid_dev *dev, unsigned type);
+
+bool rect_overlap(const struct v4l2_rect *r1, const struct v4l2_rect *r2);
+void rect_set_size_to(struct v4l2_rect *r, const struct v4l2_rect *size);
+void rect_set_min_size(struct v4l2_rect *r, const struct v4l2_rect *min_size);
+void rect_set_max_size(struct v4l2_rect *r, const struct v4l2_rect *max_size);
+void rect_map_inside(struct v4l2_rect *r, const struct v4l2_rect *boundary);
+bool rect_same_size(const struct v4l2_rect *r1, const struct v4l2_rect *r2);
+struct v4l2_rect rect_intersect(const struct v4l2_rect *a, const struct v4l2_rect *b);
+void rect_scale(struct v4l2_rect *r, const struct v4l2_rect *from,
+ const struct v4l2_rect *to);
+int vivid_vid_adjust_sel(unsigned flags, struct v4l2_rect *r);
+
+int vivid_enum_fmt_vid(struct file *file, void *priv, struct v4l2_fmtdesc *f);
+int vidioc_enum_fmt_vid_mplane(struct file *file, void *priv, struct v4l2_fmtdesc *f);
+int vidioc_enum_fmt_vid(struct file *file, void *priv, struct v4l2_fmtdesc *f);
+int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id);
+int vidioc_g_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
+int vidioc_enum_dv_timings(struct file *file, void *_fh, struct v4l2_enum_dv_timings *timings);
+int vidioc_dv_timings_cap(struct file *file, void *_fh, struct v4l2_dv_timings_cap *cap);
+int vidioc_g_edid(struct file *file, void *_fh, struct v4l2_edid *edid);
+int vidioc_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub);
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
new file mode 100644
index 000000000000..69c2dbd2d165
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -0,0 +1,1146 @@
+/*
+ * vivid-vid-out.c - video output support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-dv-timings.h>
+
+#include "vivid-core.h"
+#include "vivid-vid-common.h"
+#include "vivid-kthread-out.h"
+#include "vivid-vid-out.h"
+
+static int vid_out_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned *nbuffers, unsigned *nplanes,
+ unsigned sizes[], void *alloc_ctxs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ unsigned planes = dev->fmt_out->planes;
+ unsigned h = dev->fmt_out_rect.height;
+ unsigned size = dev->bytesperline_out[0] * h;
+
+ if (dev->field_out == V4L2_FIELD_ALTERNATE) {
+ /*
+ * You cannot use write() with FIELD_ALTERNATE since the field
+ * information (TOP/BOTTOM) cannot be passed to the kernel.
+ */
+ if (vb2_fileio_is_active(vq))
+ return -EINVAL;
+ }
+
+ if (dev->queue_setup_error) {
+ /*
+ * Error injection: test what happens if queue_setup() returns
+ * an error.
+ */
+ dev->queue_setup_error = false;
+ return -EINVAL;
+ }
+
+ if (fmt) {
+ const struct v4l2_pix_format_mplane *mp;
+ struct v4l2_format mp_fmt;
+
+ if (!V4L2_TYPE_IS_MULTIPLANAR(fmt->type)) {
+ fmt_sp2mp(fmt, &mp_fmt);
+ fmt = &mp_fmt;
+ }
+ mp = &fmt->fmt.pix_mp;
+ /*
+ * Check if the number of planes in the specified format match
+ * the number of planes in the current format. You can't mix that.
+ */
+ if (mp->num_planes != planes)
+ return -EINVAL;
+ sizes[0] = mp->plane_fmt[0].sizeimage;
+ if (planes == 2) {
+ sizes[1] = mp->plane_fmt[1].sizeimage;
+ if (sizes[0] < dev->bytesperline_out[0] * h ||
+ sizes[1] < dev->bytesperline_out[1] * h)
+ return -EINVAL;
+ } else if (sizes[0] < size) {
+ return -EINVAL;
+ }
+ } else {
+ if (planes == 2) {
+ sizes[0] = dev->bytesperline_out[0] * h;
+ sizes[1] = dev->bytesperline_out[1] * h;
+ } else {
+ sizes[0] = size;
+ }
+ }
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = planes;
+
+ /*
+ * videobuf2-vmalloc allocator is context-less so no need to set
+ * alloc_ctxs array.
+ */
+
+ if (planes == 2)
+ dprintk(dev, 1, "%s, count=%d, sizes=%u, %u\n", __func__,
+ *nbuffers, sizes[0], sizes[1]);
+ else
+ dprintk(dev, 1, "%s, count=%d, size=%u\n", __func__,
+ *nbuffers, sizes[0]);
+ return 0;
+}
+
+static int vid_out_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size;
+ unsigned planes = dev->fmt_out->planes;
+ unsigned p;
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (WARN_ON(NULL == dev->fmt_out))
+ return -EINVAL;
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+
+ if (dev->field_out != V4L2_FIELD_ALTERNATE)
+ vb->v4l2_buf.field = dev->field_out;
+ else if (vb->v4l2_buf.field != V4L2_FIELD_TOP &&
+ vb->v4l2_buf.field != V4L2_FIELD_BOTTOM)
+ return -EINVAL;
+
+ for (p = 0; p < planes; p++) {
+ size = dev->bytesperline_out[p] * dev->fmt_out_rect.height +
+ vb->v4l2_planes[p].data_offset;
+
+ if (vb2_get_plane_payload(vb, p) < size) {
+ dprintk(dev, 1, "%s the payload is too small for plane %u (%lu < %lu)\n",
+ __func__, p, vb2_get_plane_payload(vb, p), size);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void vid_out_buf_queue(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->vid_out_active);
+ spin_unlock(&dev->slock);
+}
+
+static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
+ if (vb2_is_streaming(&dev->vb_vid_cap_q))
+ dev->can_loop_video = vivid_vid_can_loop(dev);
+
+ if (dev->kthread_vid_out)
+ return 0;
+
+ dev->vid_out_seq_count = 0;
+ dprintk(dev, 1, "%s\n", __func__);
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_out(dev, &dev->vid_out_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void vid_out_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_out(dev, &dev->vid_out_streaming);
+ dev->can_loop_video = false;
+}
+
+const struct vb2_ops vivid_vid_out_qops = {
+ .queue_setup = vid_out_queue_setup,
+ .buf_prepare = vid_out_buf_prepare,
+ .buf_queue = vid_out_buf_queue,
+ .start_streaming = vid_out_start_streaming,
+ .stop_streaming = vid_out_stop_streaming,
+ .wait_prepare = vivid_unlock,
+ .wait_finish = vivid_lock,
+};
+
+/*
+ * Called whenever the format has to be reset which can occur when
+ * changing outputs, standard, timings, etc.
+ */
+void vivid_update_format_out(struct vivid_dev *dev)
+{
+ struct v4l2_bt_timings *bt = &dev->dv_timings_out.bt;
+ unsigned size;
+
+ switch (dev->output_type[dev->output]) {
+ case SVID:
+ default:
+ dev->field_out = dev->tv_field_out;
+ dev->sink_rect.width = 720;
+ if (dev->std_out & V4L2_STD_525_60) {
+ dev->sink_rect.height = 480;
+ dev->timeperframe_vid_out = (struct v4l2_fract) { 1001, 30000 };
+ dev->service_set_out = V4L2_SLICED_CAPTION_525;
+ } else {
+ dev->sink_rect.height = 576;
+ dev->timeperframe_vid_out = (struct v4l2_fract) { 1000, 25000 };
+ dev->service_set_out = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
+ }
+ dev->colorspace_out = V4L2_COLORSPACE_SMPTE170M;
+ break;
+ case HDMI:
+ dev->sink_rect.width = bt->width;
+ dev->sink_rect.height = bt->height;
+ size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt);
+ dev->timeperframe_vid_out = (struct v4l2_fract) {
+ size / 100, (u32)bt->pixelclock / 100
+ };
+ if (bt->interlaced)
+ dev->field_out = V4L2_FIELD_ALTERNATE;
+ else
+ dev->field_out = V4L2_FIELD_NONE;
+ if (!dev->dvi_d_out && (bt->standards & V4L2_DV_BT_STD_CEA861)) {
+ if (bt->width == 720 && bt->height <= 576)
+ dev->colorspace_out = V4L2_COLORSPACE_SMPTE170M;
+ else
+ dev->colorspace_out = V4L2_COLORSPACE_REC709;
+ } else {
+ dev->colorspace_out = V4L2_COLORSPACE_SRGB;
+ }
+ break;
+ }
+ dev->compose_out = dev->sink_rect;
+ dev->compose_bounds_out = dev->sink_rect;
+ dev->crop_out = dev->compose_out;
+ if (V4L2_FIELD_HAS_T_OR_B(dev->field_out))
+ dev->crop_out.height /= 2;
+ dev->fmt_out_rect = dev->crop_out;
+ dev->bytesperline_out[0] = (dev->sink_rect.width * dev->fmt_out->depth) / 8;
+ if (dev->fmt_out->planes == 2)
+ dev->bytesperline_out[1] = (dev->sink_rect.width * dev->fmt_out->depth) / 8;
+}
+
+/* Map the field to something that is valid for the current output */
+static enum v4l2_field vivid_field_out(struct vivid_dev *dev, enum v4l2_field field)
+{
+ if (vivid_is_svid_out(dev)) {
+ switch (field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ case V4L2_FIELD_ALTERNATE:
+ return field;
+ case V4L2_FIELD_INTERLACED:
+ default:
+ return V4L2_FIELD_INTERLACED;
+ }
+ }
+ if (vivid_is_hdmi_out(dev))
+ return dev->dv_timings_out.bt.interlaced ? V4L2_FIELD_ALTERNATE :
+ V4L2_FIELD_NONE;
+ return V4L2_FIELD_NONE;
+}
+
+static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev)
+{
+ if (vivid_is_svid_out(dev))
+ return (dev->std_out & V4L2_STD_525_60) ?
+ TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
+
+ if (vivid_is_hdmi_out(dev) &&
+ dev->sink_rect.width == 720 && dev->sink_rect.height <= 576)
+ return dev->sink_rect.height == 480 ?
+ TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
+
+ return TPG_PIXEL_ASPECT_SQUARE;
+}
+
+int vivid_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
+ unsigned p;
+
+ mp->width = dev->fmt_out_rect.width;
+ mp->height = dev->fmt_out_rect.height;
+ mp->field = dev->field_out;
+ mp->pixelformat = dev->fmt_out->fourcc;
+ mp->colorspace = dev->colorspace_out;
+ mp->num_planes = dev->fmt_out->planes;
+ for (p = 0; p < mp->num_planes; p++) {
+ mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p];
+ mp->plane_fmt[p].sizeimage =
+ mp->plane_fmt[p].bytesperline * mp->height;
+ }
+ return 0;
+}
+
+int vivid_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_bt_timings *bt = &dev->dv_timings_out.bt;
+ struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *pfmt = mp->plane_fmt;
+ const struct vivid_fmt *fmt;
+ unsigned bytesperline, max_bpl;
+ unsigned factor = 1;
+ unsigned w, h;
+ unsigned p;
+
+ fmt = vivid_get_format(dev, mp->pixelformat);
+ if (!fmt) {
+ dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n",
+ mp->pixelformat);
+ mp->pixelformat = V4L2_PIX_FMT_YUYV;
+ fmt = vivid_get_format(dev, mp->pixelformat);
+ }
+
+ mp->field = vivid_field_out(dev, mp->field);
+ if (vivid_is_svid_out(dev)) {
+ w = 720;
+ h = (dev->std_out & V4L2_STD_525_60) ? 480 : 576;
+ } else {
+ w = dev->sink_rect.width;
+ h = dev->sink_rect.height;
+ }
+ if (V4L2_FIELD_HAS_T_OR_B(mp->field))
+ factor = 2;
+ if (!dev->has_scaler_out && !dev->has_crop_out && !dev->has_compose_out) {
+ mp->width = w;
+ mp->height = h / factor;
+ } else {
+ struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor };
+
+ rect_set_min_size(&r, &vivid_min_rect);
+ rect_set_max_size(&r, &vivid_max_rect);
+ if (dev->has_scaler_out && !dev->has_crop_out) {
+ struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h };
+
+ rect_set_max_size(&r, &max_r);
+ } else if (!dev->has_scaler_out && dev->has_compose_out && !dev->has_crop_out) {
+ rect_set_max_size(&r, &dev->sink_rect);
+ } else if (!dev->has_scaler_out && !dev->has_compose_out) {
+ rect_set_min_size(&r, &dev->sink_rect);
+ }
+ mp->width = r.width;
+ mp->height = r.height / factor;
+ }
+
+ /* This driver supports custom bytesperline values */
+
+ /* Calculate the minimum supported bytesperline value */
+ bytesperline = (mp->width * fmt->depth) >> 3;
+ /* Calculate the maximum supported bytesperline value */
+ max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->depth) >> 3;
+ mp->num_planes = fmt->planes;
+ for (p = 0; p < mp->num_planes; p++) {
+ if (pfmt[p].bytesperline > max_bpl)
+ pfmt[p].bytesperline = max_bpl;
+ if (pfmt[p].bytesperline < bytesperline)
+ pfmt[p].bytesperline = bytesperline;
+ pfmt[p].sizeimage = pfmt[p].bytesperline * mp->height;
+ memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
+ }
+ if (vivid_is_svid_out(dev))
+ mp->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else if (dev->dvi_d_out || !(bt->standards & V4L2_DV_BT_STD_CEA861))
+ mp->colorspace = V4L2_COLORSPACE_SRGB;
+ else if (bt->width == 720 && bt->height <= 576)
+ mp->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else if (mp->colorspace != V4L2_COLORSPACE_SMPTE170M &&
+ mp->colorspace != V4L2_COLORSPACE_REC709 &&
+ mp->colorspace != V4L2_COLORSPACE_SRGB)
+ mp->colorspace = V4L2_COLORSPACE_REC709;
+ memset(mp->reserved, 0, sizeof(mp->reserved));
+ return 0;
+}
+
+int vivid_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_rect *crop = &dev->crop_out;
+ struct v4l2_rect *compose = &dev->compose_out;
+ struct vb2_queue *q = &dev->vb_vid_out_q;
+ int ret = vivid_try_fmt_vid_out(file, priv, f);
+ unsigned factor = 1;
+
+ if (ret < 0)
+ return ret;
+
+ if (vb2_is_busy(q) &&
+ (vivid_is_svid_out(dev) ||
+ mp->width != dev->fmt_out_rect.width ||
+ mp->height != dev->fmt_out_rect.height ||
+ mp->pixelformat != dev->fmt_out->fourcc ||
+ mp->field != dev->field_out)) {
+ dprintk(dev, 1, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ /*
+ * Allow for changing the colorspace on the fly. Useful for testing
+ * purposes, and it is something that HDMI transmitters are able
+ * to do.
+ */
+ if (vb2_is_busy(q))
+ goto set_colorspace;
+
+ dev->fmt_out = vivid_get_format(dev, mp->pixelformat);
+ if (V4L2_FIELD_HAS_T_OR_B(mp->field))
+ factor = 2;
+
+ if (dev->has_scaler_out || dev->has_crop_out || dev->has_compose_out) {
+ struct v4l2_rect r = { 0, 0, mp->width, mp->height };
+
+ if (dev->has_scaler_out) {
+ if (dev->has_crop_out)
+ rect_map_inside(crop, &r);
+ else
+ *crop = r;
+ if (dev->has_compose_out && !dev->has_crop_out) {
+ struct v4l2_rect min_r = {
+ 0, 0,
+ r.width / MAX_ZOOM,
+ factor * r.height / MAX_ZOOM
+ };
+ struct v4l2_rect max_r = {
+ 0, 0,
+ r.width * MAX_ZOOM,
+ factor * r.height * MAX_ZOOM
+ };
+
+ rect_set_min_size(compose, &min_r);
+ rect_set_max_size(compose, &max_r);
+ rect_map_inside(compose, &dev->compose_bounds_out);
+ } else if (dev->has_compose_out) {
+ struct v4l2_rect min_r = {
+ 0, 0,
+ crop->width / MAX_ZOOM,
+ factor * crop->height / MAX_ZOOM
+ };
+ struct v4l2_rect max_r = {
+ 0, 0,
+ crop->width * MAX_ZOOM,
+ factor * crop->height * MAX_ZOOM
+ };
+
+ rect_set_min_size(compose, &min_r);
+ rect_set_max_size(compose, &max_r);
+ rect_map_inside(compose, &dev->compose_bounds_out);
+ }
+ } else if (dev->has_compose_out && !dev->has_crop_out) {
+ rect_set_size_to(crop, &r);
+ r.height *= factor;
+ rect_set_size_to(compose, &r);
+ rect_map_inside(compose, &dev->compose_bounds_out);
+ } else if (!dev->has_compose_out) {
+ rect_map_inside(crop, &r);
+ r.height /= factor;
+ rect_set_size_to(compose, &r);
+ } else {
+ r.height *= factor;
+ rect_set_max_size(compose, &r);
+ rect_map_inside(compose, &dev->compose_bounds_out);
+ crop->top *= factor;
+ crop->height *= factor;
+ rect_set_size_to(crop, compose);
+ rect_map_inside(crop, &r);
+ crop->top /= factor;
+ crop->height /= factor;
+ }
+ } else {
+ struct v4l2_rect r = { 0, 0, mp->width, mp->height };
+
+ rect_set_size_to(crop, &r);
+ r.height /= factor;
+ rect_set_size_to(compose, &r);
+ }
+
+ dev->fmt_out_rect.width = mp->width;
+ dev->fmt_out_rect.height = mp->height;
+ dev->bytesperline_out[0] = mp->plane_fmt[0].bytesperline;
+ if (mp->num_planes > 1)
+ dev->bytesperline_out[1] = mp->plane_fmt[1].bytesperline;
+ dev->field_out = mp->field;
+ if (vivid_is_svid_out(dev))
+ dev->tv_field_out = mp->field;
+
+set_colorspace:
+ dev->colorspace_out = mp->colorspace;
+ if (dev->loop_video) {
+ vivid_send_source_change(dev, SVID);
+ vivid_send_source_change(dev, HDMI);
+ }
+ return 0;
+}
+
+int vidioc_g_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+ return vivid_g_fmt_vid_out(file, priv, f);
+}
+
+int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+ return vivid_try_fmt_vid_out(file, priv, f);
+}
+
+int vidioc_s_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->multiplanar)
+ return -ENOTTY;
+ return vivid_s_fmt_vid_out(file, priv, f);
+}
+
+int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+ return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_out);
+}
+
+int vidioc_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+ return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_out);
+}
+
+int vidioc_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (dev->multiplanar)
+ return -ENOTTY;
+ return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_out);
+}
+
+int vivid_vid_out_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!dev->has_crop_out && !dev->has_compose_out)
+ return -ENOTTY;
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ sel->r.left = sel->r.top = 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (!dev->has_crop_out)
+ return -EINVAL;
+ sel->r = dev->crop_out;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ if (!dev->has_crop_out)
+ return -EINVAL;
+ sel->r = dev->fmt_out_rect;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (!dev->has_compose_out)
+ return -EINVAL;
+ sel->r = vivid_max_rect;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (!dev->has_compose_out)
+ return -EINVAL;
+ sel->r = dev->compose_out;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (!dev->has_compose_out)
+ return -EINVAL;
+ sel->r = dev->sink_rect;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_rect *crop = &dev->crop_out;
+ struct v4l2_rect *compose = &dev->compose_out;
+ unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_out) ? 2 : 1;
+ int ret;
+
+ if (!dev->has_crop_out && !dev->has_compose_out)
+ return -ENOTTY;
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (!dev->has_crop_out)
+ return -EINVAL;
+ ret = vivid_vid_adjust_sel(s->flags, &s->r);
+ if (ret)
+ return ret;
+ rect_set_min_size(&s->r, &vivid_min_rect);
+ rect_set_max_size(&s->r, &dev->fmt_out_rect);
+ if (dev->has_scaler_out) {
+ struct v4l2_rect max_rect = {
+ 0, 0,
+ dev->sink_rect.width * MAX_ZOOM,
+ (dev->sink_rect.height / factor) * MAX_ZOOM
+ };
+
+ rect_set_max_size(&s->r, &max_rect);
+ if (dev->has_compose_out) {
+ struct v4l2_rect min_rect = {
+ 0, 0,
+ s->r.width / MAX_ZOOM,
+ (s->r.height * factor) / MAX_ZOOM
+ };
+ struct v4l2_rect max_rect = {
+ 0, 0,
+ s->r.width * MAX_ZOOM,
+ (s->r.height * factor) * MAX_ZOOM
+ };
+
+ rect_set_min_size(compose, &min_rect);
+ rect_set_max_size(compose, &max_rect);
+ rect_map_inside(compose, &dev->compose_bounds_out);
+ }
+ } else if (dev->has_compose_out) {
+ s->r.top *= factor;
+ s->r.height *= factor;
+ rect_set_max_size(&s->r, &dev->sink_rect);
+ rect_set_size_to(compose, &s->r);
+ rect_map_inside(compose, &dev->compose_bounds_out);
+ s->r.top /= factor;
+ s->r.height /= factor;
+ } else {
+ rect_set_size_to(&s->r, &dev->sink_rect);
+ s->r.height /= factor;
+ }
+ rect_map_inside(&s->r, &dev->fmt_out_rect);
+ *crop = s->r;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (!dev->has_compose_out)
+ return -EINVAL;
+ ret = vivid_vid_adjust_sel(s->flags, &s->r);
+ if (ret)
+ return ret;
+ rect_set_min_size(&s->r, &vivid_min_rect);
+ rect_set_max_size(&s->r, &dev->sink_rect);
+ rect_map_inside(&s->r, &dev->compose_bounds_out);
+ s->r.top /= factor;
+ s->r.height /= factor;
+ if (dev->has_scaler_out) {
+ struct v4l2_rect fmt = dev->fmt_out_rect;
+ struct v4l2_rect max_rect = {
+ 0, 0,
+ s->r.width * MAX_ZOOM,
+ s->r.height * MAX_ZOOM
+ };
+ struct v4l2_rect min_rect = {
+ 0, 0,
+ s->r.width / MAX_ZOOM,
+ s->r.height / MAX_ZOOM
+ };
+
+ rect_set_min_size(&fmt, &min_rect);
+ if (!dev->has_crop_out)
+ rect_set_max_size(&fmt, &max_rect);
+ if (!rect_same_size(&dev->fmt_out_rect, &fmt) &&
+ vb2_is_busy(&dev->vb_vid_out_q))
+ return -EBUSY;
+ if (dev->has_crop_out) {
+ rect_set_min_size(crop, &min_rect);
+ rect_set_max_size(crop, &max_rect);
+ }
+ dev->fmt_out_rect = fmt;
+ } else if (dev->has_crop_out) {
+ struct v4l2_rect fmt = dev->fmt_out_rect;
+
+ rect_set_min_size(&fmt, &s->r);
+ if (!rect_same_size(&dev->fmt_out_rect, &fmt) &&
+ vb2_is_busy(&dev->vb_vid_out_q))
+ return -EBUSY;
+ dev->fmt_out_rect = fmt;
+ rect_set_size_to(crop, &s->r);
+ rect_map_inside(crop, &dev->fmt_out_rect);
+ } else {
+ if (!rect_same_size(&s->r, &dev->fmt_out_rect) &&
+ vb2_is_busy(&dev->vb_vid_out_q))
+ return -EBUSY;
+ rect_set_size_to(&dev->fmt_out_rect, &s->r);
+ rect_set_size_to(crop, &s->r);
+ crop->height /= factor;
+ rect_map_inside(crop, &dev->fmt_out_rect);
+ }
+ s->r.top *= factor;
+ s->r.height *= factor;
+ if (dev->bitmap_out && (compose->width != s->r.width ||
+ compose->height != s->r.height)) {
+ kfree(dev->bitmap_out);
+ dev->bitmap_out = NULL;
+ }
+ *compose = s->r;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vivid_vid_out_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *cap)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (cap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (vivid_get_pixel_aspect(dev)) {
+ case TPG_PIXEL_ASPECT_NTSC:
+ cap->pixelaspect.numerator = 11;
+ cap->pixelaspect.denominator = 10;
+ break;
+ case TPG_PIXEL_ASPECT_PAL:
+ cap->pixelaspect.numerator = 54;
+ cap->pixelaspect.denominator = 59;
+ break;
+ case TPG_PIXEL_ASPECT_SQUARE:
+ cap->pixelaspect.numerator = 1;
+ cap->pixelaspect.denominator = 1;
+ break;
+ }
+ return 0;
+}
+
+int vidioc_g_fmt_vid_out_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct v4l2_rect *compose = &dev->compose_out;
+ struct v4l2_window *win = &f->fmt.win;
+ unsigned clipcount = win->clipcount;
+
+ if (!dev->has_fb)
+ return -EINVAL;
+ win->w.top = dev->overlay_out_top;
+ win->w.left = dev->overlay_out_left;
+ win->w.width = compose->width;
+ win->w.height = compose->height;
+ win->clipcount = dev->clipcount_out;
+ win->field = V4L2_FIELD_ANY;
+ win->chromakey = dev->chromakey_out;
+ win->global_alpha = dev->global_alpha_out;
+ if (clipcount > dev->clipcount_out)
+ clipcount = dev->clipcount_out;
+ if (dev->bitmap_out == NULL)
+ win->bitmap = NULL;
+ else if (win->bitmap) {
+ if (copy_to_user(win->bitmap, dev->bitmap_out,
+ ((dev->compose_out.width + 7) / 8) * dev->compose_out.height))
+ return -EFAULT;
+ }
+ if (clipcount && win->clips) {
+ if (copy_to_user(win->clips, dev->clips_out,
+ clipcount * sizeof(dev->clips_out[0])))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int vidioc_try_fmt_vid_out_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct v4l2_rect *compose = &dev->compose_out;
+ struct v4l2_window *win = &f->fmt.win;
+ int i, j;
+
+ if (!dev->has_fb)
+ return -EINVAL;
+ win->w.left = clamp_t(int, win->w.left,
+ -dev->display_width, dev->display_width);
+ win->w.top = clamp_t(int, win->w.top,
+ -dev->display_height, dev->display_height);
+ win->w.width = compose->width;
+ win->w.height = compose->height;
+ /*
+ * It makes no sense for an OSD to overlay only top or bottom fields,
+ * so always set this to ANY.
+ */
+ win->field = V4L2_FIELD_ANY;
+ if (win->clipcount && !win->clips)
+ win->clipcount = 0;
+ if (win->clipcount > MAX_CLIPS)
+ win->clipcount = MAX_CLIPS;
+ if (win->clipcount) {
+ if (copy_from_user(dev->try_clips_out, win->clips,
+ win->clipcount * sizeof(dev->clips_out[0])))
+ return -EFAULT;
+ for (i = 0; i < win->clipcount; i++) {
+ struct v4l2_rect *r = &dev->try_clips_out[i].c;
+
+ r->top = clamp_t(s32, r->top, 0, dev->display_height - 1);
+ r->height = clamp_t(s32, r->height, 1, dev->display_height - r->top);
+ r->left = clamp_t(u32, r->left, 0, dev->display_width - 1);
+ r->width = clamp_t(u32, r->width, 1, dev->display_width - r->left);
+ }
+ /*
+ * Yeah, so sue me, it's an O(n^2) algorithm. But n is a small
+ * number and it's typically a one-time deal.
+ */
+ for (i = 0; i < win->clipcount - 1; i++) {
+ struct v4l2_rect *r1 = &dev->try_clips_out[i].c;
+
+ for (j = i + 1; j < win->clipcount; j++) {
+ struct v4l2_rect *r2 = &dev->try_clips_out[j].c;
+
+ if (rect_overlap(r1, r2))
+ return -EINVAL;
+ }
+ }
+ if (copy_to_user(win->clips, dev->try_clips_out,
+ win->clipcount * sizeof(dev->clips_out[0])))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int vidioc_s_fmt_vid_out_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const struct v4l2_rect *compose = &dev->compose_out;
+ struct v4l2_window *win = &f->fmt.win;
+ int ret = vidioc_try_fmt_vid_out_overlay(file, priv, f);
+ unsigned bitmap_size = ((compose->width + 7) / 8) * compose->height;
+ unsigned clips_size = win->clipcount * sizeof(dev->clips_out[0]);
+ void *new_bitmap = NULL;
+
+ if (ret)
+ return ret;
+
+ if (win->bitmap) {
+ new_bitmap = memdup_user(win->bitmap, bitmap_size);
+
+ if (IS_ERR(new_bitmap))
+ return PTR_ERR(new_bitmap);
+ }
+
+ dev->overlay_out_top = win->w.top;
+ dev->overlay_out_left = win->w.left;
+ kfree(dev->bitmap_out);
+ dev->bitmap_out = new_bitmap;
+ dev->clipcount_out = win->clipcount;
+ if (dev->clipcount_out)
+ memcpy(dev->clips_out, dev->try_clips_out, clips_size);
+ dev->chromakey_out = win->chromakey;
+ dev->global_alpha_out = win->global_alpha;
+ return ret;
+}
+
+int vivid_vid_out_overlay(struct file *file, void *fh, unsigned i)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (i && !dev->fmt_out->can_do_overlay) {
+ dprintk(dev, 1, "unsupported output format for output overlay\n");
+ return -EINVAL;
+ }
+
+ dev->overlay_out_enabled = i;
+ return 0;
+}
+
+int vivid_vid_out_g_fbuf(struct file *file, void *fh,
+ struct v4l2_framebuffer *a)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ a->capability = V4L2_FBUF_CAP_EXTERNOVERLAY |
+ V4L2_FBUF_CAP_BITMAP_CLIPPING |
+ V4L2_FBUF_CAP_LIST_CLIPPING |
+ V4L2_FBUF_CAP_CHROMAKEY |
+ V4L2_FBUF_CAP_SRC_CHROMAKEY |
+ V4L2_FBUF_CAP_GLOBAL_ALPHA |
+ V4L2_FBUF_CAP_LOCAL_ALPHA |
+ V4L2_FBUF_CAP_LOCAL_INV_ALPHA;
+ a->flags = V4L2_FBUF_FLAG_OVERLAY | dev->fbuf_out_flags;
+ a->base = (void *)dev->video_pbase;
+ a->fmt.width = dev->display_width;
+ a->fmt.height = dev->display_height;
+ if (dev->fb_defined.green.length == 5)
+ a->fmt.pixelformat = V4L2_PIX_FMT_ARGB555;
+ else
+ a->fmt.pixelformat = V4L2_PIX_FMT_RGB565;
+ a->fmt.bytesperline = dev->display_byte_stride;
+ a->fmt.sizeimage = a->fmt.height * a->fmt.bytesperline;
+ a->fmt.field = V4L2_FIELD_NONE;
+ a->fmt.colorspace = V4L2_COLORSPACE_SRGB;
+ a->fmt.priv = 0;
+ return 0;
+}
+
+int vivid_vid_out_s_fbuf(struct file *file, void *fh,
+ const struct v4l2_framebuffer *a)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ const unsigned chroma_flags = V4L2_FBUF_FLAG_CHROMAKEY |
+ V4L2_FBUF_FLAG_SRC_CHROMAKEY;
+ const unsigned alpha_flags = V4L2_FBUF_FLAG_GLOBAL_ALPHA |
+ V4L2_FBUF_FLAG_LOCAL_ALPHA |
+ V4L2_FBUF_FLAG_LOCAL_INV_ALPHA;
+
+
+ if ((a->flags & chroma_flags) == chroma_flags)
+ return -EINVAL;
+ switch (a->flags & alpha_flags) {
+ case 0:
+ case V4L2_FBUF_FLAG_GLOBAL_ALPHA:
+ case V4L2_FBUF_FLAG_LOCAL_ALPHA:
+ case V4L2_FBUF_FLAG_LOCAL_INV_ALPHA:
+ break;
+ default:
+ return -EINVAL;
+ }
+ dev->fbuf_out_flags &= ~(chroma_flags | alpha_flags);
+ dev->fbuf_out_flags = a->flags & (chroma_flags | alpha_flags);
+ return 0;
+}
+
+static const struct v4l2_audioout vivid_audio_outputs[] = {
+ { 0, "Line-Out 1" },
+ { 1, "Line-Out 2" },
+};
+
+int vidioc_enum_output(struct file *file, void *priv,
+ struct v4l2_output *out)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (out->index >= dev->num_outputs)
+ return -EINVAL;
+
+ out->type = V4L2_OUTPUT_TYPE_ANALOG;
+ switch (dev->output_type[out->index]) {
+ case SVID:
+ snprintf(out->name, sizeof(out->name), "S-Video %u",
+ dev->output_name_counter[out->index]);
+ out->std = V4L2_STD_ALL;
+ if (dev->has_audio_outputs)
+ out->audioset = (1 << ARRAY_SIZE(vivid_audio_outputs)) - 1;
+ out->capabilities = V4L2_OUT_CAP_STD;
+ break;
+ case HDMI:
+ snprintf(out->name, sizeof(out->name), "HDMI %u",
+ dev->output_name_counter[out->index]);
+ out->capabilities = V4L2_OUT_CAP_DV_TIMINGS;
+ break;
+ }
+ return 0;
+}
+
+int vidioc_g_output(struct file *file, void *priv, unsigned *o)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ *o = dev->output;
+ return 0;
+}
+
+int vidioc_s_output(struct file *file, void *priv, unsigned o)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (o >= dev->num_outputs)
+ return -EINVAL;
+
+ if (o == dev->output)
+ return 0;
+
+ if (vb2_is_busy(&dev->vb_vid_out_q) || vb2_is_busy(&dev->vb_vbi_out_q))
+ return -EBUSY;
+
+ dev->output = o;
+ dev->tv_audio_output = 0;
+ if (dev->output_type[o] == SVID)
+ dev->vid_out_dev.tvnorms = V4L2_STD_ALL;
+ else
+ dev->vid_out_dev.tvnorms = 0;
+
+ dev->vbi_out_dev.tvnorms = dev->vid_out_dev.tvnorms;
+ vivid_update_format_out(dev);
+ return 0;
+}
+
+int vidioc_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vout)
+{
+ if (vout->index >= ARRAY_SIZE(vivid_audio_outputs))
+ return -EINVAL;
+ *vout = vivid_audio_outputs[vout->index];
+ return 0;
+}
+
+int vidioc_g_audout(struct file *file, void *fh, struct v4l2_audioout *vout)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_svid_out(dev))
+ return -EINVAL;
+ *vout = vivid_audio_outputs[dev->tv_audio_output];
+ return 0;
+}
+
+int vidioc_s_audout(struct file *file, void *fh, const struct v4l2_audioout *vout)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_svid_out(dev))
+ return -EINVAL;
+ if (vout->index >= ARRAY_SIZE(vivid_audio_outputs))
+ return -EINVAL;
+ dev->tv_audio_output = vout->index;
+ return 0;
+}
+
+int vivid_vid_out_s_std(struct file *file, void *priv, v4l2_std_id id)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_svid_out(dev))
+ return -ENODATA;
+ if (dev->std_out == id)
+ return 0;
+ if (vb2_is_busy(&dev->vb_vid_out_q) || vb2_is_busy(&dev->vb_vbi_out_q))
+ return -EBUSY;
+ dev->std_out = id;
+ vivid_update_format_out(dev);
+ return 0;
+}
+
+int vivid_vid_out_s_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_hdmi_out(dev))
+ return -ENODATA;
+ if (vb2_is_busy(&dev->vb_vid_out_q))
+ return -EBUSY;
+ if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap,
+ 0, NULL, NULL))
+ return -EINVAL;
+ if (v4l2_match_dv_timings(timings, &dev->dv_timings_out, 0))
+ return 0;
+ dev->dv_timings_out = *timings;
+ vivid_update_format_out(dev);
+ return 0;
+}
+
+int vivid_vid_out_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (parm->type != (dev->multiplanar ?
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_OUTPUT))
+ return -EINVAL;
+
+ parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.output.timeperframe = dev->timeperframe_vid_out;
+ parm->parm.output.writebuffers = 1;
+return 0;
+}
+
+int vidioc_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ case V4L2_EVENT_SOURCE_CHANGE:
+ if (fh->vdev->vfl_dir == VFL_DIR_RX)
+ return v4l2_src_change_event_subscribe(fh, sub);
+ break;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
diff --git a/drivers/media/platform/vivid/vivid-vid-out.h b/drivers/media/platform/vivid/vivid-vid-out.h
new file mode 100644
index 000000000000..dfa84db184ed
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-vid-out.h
@@ -0,0 +1,56 @@
+/*
+ * vivid-vid-out.h - video output support functions.
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _VIVID_VID_OUT_H_
+#define _VIVID_VID_OUT_H_
+
+extern const struct vb2_ops vivid_vid_out_qops;
+
+void vivid_update_format_out(struct vivid_dev *dev);
+
+int vivid_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_g_fmt_vid_out_mplane(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_s_fmt_vid_out_mplane(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_vid_out_g_selection(struct file *file, void *priv, struct v4l2_selection *sel);
+int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection *s);
+int vivid_vid_out_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cap);
+int vidioc_enum_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_fmtdesc *f);
+int vidioc_g_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_try_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_s_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f);
+int vivid_vid_out_overlay(struct file *file, void *fh, unsigned i);
+int vivid_vid_out_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a);
+int vivid_vid_out_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a);
+int vidioc_enum_output(struct file *file, void *priv, struct v4l2_output *out);
+int vidioc_g_output(struct file *file, void *priv, unsigned *i);
+int vidioc_s_output(struct file *file, void *priv, unsigned i);
+int vidioc_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vout);
+int vidioc_g_audout(struct file *file, void *fh, struct v4l2_audioout *vout);
+int vidioc_s_audout(struct file *file, void *fh, const struct v4l2_audioout *vout);
+int vivid_vid_out_s_std(struct file *file, void *priv, v4l2_std_id id);
+int vivid_vid_out_s_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
+int vivid_vid_out_g_parm(struct file *file, void *priv, struct v4l2_streamparm *parm);
+
+#endif
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 235c0e349820..cff1eb144a5c 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -332,7 +332,7 @@ static int __init gemtek_init(void)
static void __exit gemtek_exit(void)
{
- hardmute = 1; /* Turn off PLL */
+ hardmute = true; /* Turn off PLL */
#ifdef CONFIG_PNP
pnp_unregister_driver(&gemtek_driver.pnp_driver);
#endif
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index d7ce8fe6b5ae..28a89466cddc 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -56,7 +56,7 @@ struct fmi
static struct fmi fmi_card;
static struct pnp_dev *dev;
-bool pnp_attached;
+static bool pnp_attached;
#define RSF16_MINFREQ (87U * 16000)
#define RSF16_MAXFREQ (108U * 16000)
@@ -285,7 +285,7 @@ static int __init fmi_init(void)
io = isapnp_fmi_probe();
if (io < 0)
continue;
- pnp_attached = 1;
+ pnp_attached = true;
}
if (!request_region(io, 2, "radio-sf16fmi")) {
if (pnp_attached)
@@ -349,7 +349,7 @@ static int __init fmi_init(void)
mutex_init(&fmi->lock);
/* mute card and set default frequency */
- fmi->mute = 1;
+ fmi->mute = true;
fmi->curfreq = RSF16_MINFREQ;
fmi_set_freq(fmi);
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 93d864eb8306..b8d61cbc18cb 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -305,7 +305,7 @@ static void fmr2_pnp_remove(struct pnp_dev *pdev)
pnp_set_drvdata(pdev, NULL);
}
-struct isa_driver fmr2_isa_driver = {
+static struct isa_driver fmr2_isa_driver = {
.match = fmr2_isa_match,
.remove = fmr2_isa_remove,
.driver = {
@@ -313,7 +313,7 @@ struct isa_driver fmr2_isa_driver = {
},
};
-struct pnp_driver fmr2_pnp_driver = {
+static struct pnp_driver fmr2_pnp_driver = {
.name = "radio-sf16fmr2",
.id_table = fmr2_pnp_ids,
.probe = fmr2_pnp_probe,
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 925049654c5b..cc3990111411 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -124,11 +124,11 @@ struct tea5764_regs {
struct tea5764_write_regs {
u8 intreg; /* INTMSK */
- u16 frqset; /* FRQSETMSB & FRQSETLSB */
- u16 tnctrl; /* TNCTRL1 & TNCTRL2 */
- u16 testreg; /* TESTBITS & TESTMODE */
- u16 rdsctrl; /* RDSCTRL1 & RDSCTRL2 */
- u16 rdsbbl; /* PAUSEDET & RDSBBL */
+ __be16 frqset; /* FRQSETMSB & FRQSETLSB */
+ __be16 tnctrl; /* TNCTRL1 & TNCTRL2 */
+ __be16 testreg; /* TESTBITS & TESTMODE */
+ __be16 rdsctrl; /* RDSCTRL1 & RDSCTRL2 */
+ __be16 rdsbbl; /* PAUSEDET & RDSBBL */
} __attribute__ ((packed));
#ifdef CONFIG_RADIO_TEA5764_XTAL
@@ -165,7 +165,7 @@ static int tea5764_i2c_read(struct tea5764_device *radio)
if (i2c_transfer(radio->i2c_client->adapter, msgs, 1) != 1)
return -EIO;
for (i = 0; i < sizeof(struct tea5764_regs) / sizeof(u16); i++)
- p[i] = __be16_to_cpu(p[i]);
+ p[i] = __be16_to_cpu((__force __be16)p[i]);
return 0;
}
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index 0e750aef656a..909c3f92d839 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -208,7 +208,7 @@ static int si470x_set_band(struct si470x_device *radio, int band)
static int si470x_set_chan(struct si470x_device *radio, unsigned short chan)
{
int retval;
- bool timed_out = 0;
+ bool timed_out = false;
/* start tuning */
radio->registers[CHANNEL] &= ~CHANNEL_CHAN;
@@ -300,7 +300,7 @@ static int si470x_set_seek(struct si470x_device *radio,
{
int band, retval;
unsigned int freq;
- bool timed_out = 0;
+ bool timed_out = false;
/* set band */
if (seek->rangelow || seek->rangehigh) {
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 494fac061306..57f0bc3b60e7 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -607,9 +607,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
/* Set up interrupt endpoint information. */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
- if (((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ==
- USB_DIR_IN) && ((endpoint->bmAttributes &
- USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT))
+ if (usb_endpoint_is_int_in(endpoint))
radio->int_in_endpoint = endpoint;
}
if (!radio->int_in_endpoint) {
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 4b2e9e8298e1..6f28f6e02ea5 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -440,7 +440,7 @@ static int fm_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
* command with u16 payload - convert to be16
*/
if (payload != NULL)
- *(u16 *)payload = cpu_to_be16(*(u16 *)payload);
+ *(__be16 *)payload = cpu_to_be16(*(u16 *)payload);
} else if (payload != NULL) {
fm_cb(skb)->fm_op = *((u8 *)payload + 2);
@@ -595,7 +595,7 @@ static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
skb_pull(skb, sizeof(struct fm_event_msg_hdr));
memcpy(&fmdev->irq_info.flag, skb->data, fm_evt_hdr->dlen);
- fmdev->irq_info.flag = be16_to_cpu(fmdev->irq_info.flag);
+ fmdev->irq_info.flag = be16_to_cpu((__force __be16)fmdev->irq_info.flag);
fmdbg("irq: flag register(0x%x)\n", fmdev->irq_info.flag);
/* Continue next function in interrupt handler table */
@@ -764,7 +764,7 @@ static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev)
* Extract PI code and store in local cache.
* We need this during AF switch processing.
*/
- cur_picode = be16_to_cpu(rds_fmt.data.groupgeneral.pidata);
+ cur_picode = be16_to_cpu((__force __be16)rds_fmt.data.groupgeneral.pidata);
if (fmdev->rx.stat_info.picode != cur_picode)
fmdev->rx.stat_info.picode = cur_picode;
@@ -989,7 +989,7 @@ static void fm_irq_afjump_rd_freq_resp(struct fmdev *fmdev)
/* Skip header info and copy only response data */
skb_pull(skb, sizeof(struct fm_event_msg_hdr));
memcpy(&read_freq, skb->data, sizeof(read_freq));
- read_freq = be16_to_cpu(read_freq);
+ read_freq = be16_to_cpu((__force __be16)read_freq);
curr_freq = fmdev->rx.region.bot_freq + ((u32)read_freq * FM_FREQ_MUL);
jumped_freq = fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx];
@@ -1317,7 +1317,8 @@ static int load_default_rx_configuration(struct fmdev *fmdev)
/* Does FM power on sequence */
static int fm_power_up(struct fmdev *fmdev, u8 mode)
{
- u16 payload, asic_id, asic_ver;
+ u16 payload;
+ __be16 asic_id, asic_ver;
int resp_len, ret;
u8 fw_name[50];
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.c b/drivers/media/radio/wl128x/fmdrv_rx.c
index ebf09a3927de..09632cb26cb6 100644
--- a/drivers/media/radio/wl128x/fmdrv_rx.c
+++ b/drivers/media/radio/wl128x/fmdrv_rx.c
@@ -116,7 +116,7 @@ int fm_rx_set_freq(struct fmdev *fmdev, u32 freq)
if (ret < 0)
goto exit;
- curr_frq = be16_to_cpu(curr_frq);
+ curr_frq = be16_to_cpu((__force __be16)curr_frq);
curr_frq_in_khz = (fmdev->rx.region.bot_freq + ((u32)curr_frq * FM_FREQ_MUL));
if (curr_frq_in_khz != freq) {
@@ -189,7 +189,7 @@ int fm_rx_seek(struct fmdev *fmdev, u32 seek_upward,
if (ret < 0)
return ret;
- curr_frq = be16_to_cpu(curr_frq);
+ curr_frq = be16_to_cpu((__force __be16)curr_frq);
last_frq = (fmdev->rx.region.top_freq - fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
/* Check the offset in order to be aligned to the channel spacing*/
@@ -285,7 +285,7 @@ again:
if (ret < 0)
return ret;
- curr_frq = be16_to_cpu(curr_frq);
+ curr_frq = be16_to_cpu((__force __be16)curr_frq);
fmdev->rx.freq = (fmdev->rx.region.bot_freq +
((u32)curr_frq * FM_FREQ_MUL));
@@ -517,7 +517,7 @@ int fm_rx_set_rfdepend_softmute(struct fmdev *fmdev, u8 rfdepend_mute)
/* Returns the signal strength level of current channel */
int fm_rx_get_rssi_level(struct fmdev *fmdev, u16 *rssilvl)
{
- u16 curr_rssi_lel;
+ __be16 curr_rssi_lel;
u32 resp_len;
int ret;
@@ -608,7 +608,7 @@ int fm_rx_set_stereo_mono(struct fmdev *fmdev, u16 mode)
/* Gets current RX stereo/mono mode */
int fm_rx_get_stereo_mono(struct fmdev *fmdev, u16 *mode)
{
- u16 curr_mode;
+ __be16 curr_mode;
u32 resp_len;
int ret;
diff --git a/drivers/media/radio/wl128x/fmdrv_tx.c b/drivers/media/radio/wl128x/fmdrv_tx.c
index 6ea33e09d63b..839970b0f313 100644
--- a/drivers/media/radio/wl128x/fmdrv_tx.c
+++ b/drivers/media/radio/wl128x/fmdrv_tx.c
@@ -374,7 +374,7 @@ int fm_tx_get_tune_cap_val(struct fmdev *fmdev)
if (ret < 0)
return ret;
- curr_val = be16_to_cpu(curr_val);
+ curr_val = be16_to_cpu((__force __be16)curr_val);
return curr_val;
}
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 5e626af8e313..8ce08107a69d 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -164,6 +164,16 @@ config IR_ENE
To compile this driver as a module, choose M here: the
module will be called ene_ir.
+config IR_HIX5HD2
+ tristate "Hisilicon hix5hd2 IR remote control"
+ depends on RC_CORE
+ help
+ Say Y here if you want to use hisilicon hix5hd2 remote control.
+ To compile this driver as a module, choose M here: the module will be
+ called ir-hix5hd2.
+
+ If you're not sure, select N here
+
config IR_IMON
tristate "SoundGraph iMON Receiver and Display"
depends on USB_ARCH_HAS_HCD
@@ -333,7 +343,8 @@ config IR_GPIO_CIR
config RC_ST
tristate "ST remote control receiver"
- depends on ARCH_STI && RC_CORE
+ depends on RC_CORE
+ depends on ARCH_STI || COMPILE_TEST
help
Say Y here if you want support for ST remote control driver
which allows both IR and UHF RX.
@@ -344,7 +355,7 @@ config RC_ST
config IR_SUNXI
tristate "SUNXI IR remote control"
depends on RC_CORE
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI || COMPILE_TEST
---help---
Say Y if you want to use sunXi internal IR Controller
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 9f9843a1af5f..0989f940e9cf 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_IR_XMP_DECODER) += ir-xmp-decoder.o
# stand-alone IR receivers/transmitters
obj-$(CONFIG_RC_ATI_REMOTE) += ati_remote.o
+obj-$(CONFIG_IR_HIX5HD2) += ir-hix5hd2.o
obj-$(CONFIG_IR_IMON) += imon.o
obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o
obj-$(CONFIG_IR_MCEUSB) += mceusb.o
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index d16d9b496b92..e80f2c6c5f1a 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -979,7 +979,7 @@ static int ene_transmit(struct rc_dev *rdev, unsigned *buf, unsigned n)
dev->tx_reg = 0;
dev->tx_done = 0;
dev->tx_sample = 0;
- dev->tx_sample_pulse = 0;
+ dev->tx_sample_pulse = false;
dbg("TX: %d samples", dev->tx_len);
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index f0a1f7d31ee6..b5167573240e 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -148,7 +148,6 @@ static int fintek_hw_detect(struct fintek_dev *fintek)
u8 vendor_major, vendor_minor;
u8 portsel, ir_class;
u16 vendor, chip;
- int ret = 0;
fintek_config_mode_enable(fintek);
@@ -208,7 +207,7 @@ static int fintek_hw_detect(struct fintek_dev *fintek)
spin_unlock_irqrestore(&fintek->fintek_lock, flags);
- return ret;
+ return 0;
}
static void fintek_cir_ldev_init(struct fintek_dev *fintek)
@@ -644,7 +643,6 @@ static int fintek_suspend(struct pnp_dev *pdev, pm_message_t state)
static int fintek_resume(struct pnp_dev *pdev)
{
- int ret = 0;
struct fintek_dev *fintek = pnp_get_drvdata(pdev);
fit_dbg("%s called", __func__);
@@ -661,7 +659,7 @@ static int fintek_resume(struct pnp_dev *pdev)
fintek_cir_regs_init(fintek);
- return ret;
+ return 0;
}
static void fintek_shutdown(struct pnp_dev *pdev)
diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
index bfb282a714e8..ec49f94425fc 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.c
+++ b/drivers/media/rc/img-ir/img-ir-hw.c
@@ -25,12 +25,6 @@
/* Decoders lock (only modified to preprocess them) */
static DEFINE_SPINLOCK(img_ir_decoders_lock);
-extern struct img_ir_decoder img_ir_nec;
-extern struct img_ir_decoder img_ir_jvc;
-extern struct img_ir_decoder img_ir_sony;
-extern struct img_ir_decoder img_ir_sharp;
-extern struct img_ir_decoder img_ir_sanyo;
-
static bool img_ir_decoders_preprocessed;
static struct img_ir_decoder *img_ir_decoders[] = {
#ifdef CONFIG_IR_IMG_NEC
diff --git a/drivers/media/rc/img-ir/img-ir-hw.h b/drivers/media/rc/img-ir/img-ir-hw.h
index 3e40ce87b898..8fcc16c32c5b 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.h
+++ b/drivers/media/rc/img-ir/img-ir-hw.h
@@ -168,6 +168,12 @@ struct img_ir_decoder {
struct img_ir_filter *out, u64 protocols);
};
+extern struct img_ir_decoder img_ir_nec;
+extern struct img_ir_decoder img_ir_jvc;
+extern struct img_ir_decoder img_ir_sony;
+extern struct img_ir_decoder img_ir_sharp;
+extern struct img_ir_decoder img_ir_sanyo;
+
/**
* struct img_ir_reg_timings - Reg values for decoder timings at clock rate.
* @ctrl: Processed control register value.
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 7115e68ba697..b8837dd39bb2 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -87,6 +87,18 @@ static ssize_t lcd_write(struct file *file, const char __user *buf,
/*** G L O B A L S ***/
+struct imon_panel_key_table {
+ u64 hw_code;
+ u32 keycode;
+};
+
+struct imon_usb_dev_descr {
+ __u16 flags;
+#define IMON_NO_FLAGS 0
+#define IMON_NEED_20MS_PKT_DELAY 1
+ struct imon_panel_key_table key_table[];
+};
+
struct imon_context {
struct device *dev;
/* Newer devices have two interfaces */
@@ -150,6 +162,8 @@ struct imon_context {
struct timer_list ttimer; /* touch screen timer */
int touch_x; /* x coordinate on touchscreen */
int touch_y; /* y coordinate on touchscreen */
+ struct imon_usb_dev_descr *dev_descr; /* device description with key
+ table for front panels */
};
#define TOUCH_TIMEOUT (HZ/30)
@@ -186,8 +200,132 @@ enum {
IMON_KEY_PANEL = 2,
};
-enum {
- IMON_NEED_20MS_PKT_DELAY = 1
+static struct usb_class_driver imon_vfd_class = {
+ .name = DEVICE_NAME,
+ .fops = &vfd_fops,
+ .minor_base = DISPLAY_MINOR_BASE,
+};
+
+static struct usb_class_driver imon_lcd_class = {
+ .name = DEVICE_NAME,
+ .fops = &lcd_fops,
+ .minor_base = DISPLAY_MINOR_BASE,
+};
+
+/* imon receiver front panel/knob key table */
+static const struct imon_usb_dev_descr imon_default_table = {
+ .flags = IMON_NO_FLAGS,
+ .key_table = {
+ { 0x000000000f00ffeell, KEY_MEDIA }, /* Go */
+ { 0x000000001200ffeell, KEY_UP },
+ { 0x000000001300ffeell, KEY_DOWN },
+ { 0x000000001400ffeell, KEY_LEFT },
+ { 0x000000001500ffeell, KEY_RIGHT },
+ { 0x000000001600ffeell, KEY_ENTER },
+ { 0x000000001700ffeell, KEY_ESC },
+ { 0x000000001f00ffeell, KEY_AUDIO },
+ { 0x000000002000ffeell, KEY_VIDEO },
+ { 0x000000002100ffeell, KEY_CAMERA },
+ { 0x000000002700ffeell, KEY_DVD },
+ { 0x000000002300ffeell, KEY_TV },
+ { 0x000000002b00ffeell, KEY_EXIT },
+ { 0x000000002c00ffeell, KEY_SELECT },
+ { 0x000000002d00ffeell, KEY_MENU },
+ { 0x000000000500ffeell, KEY_PREVIOUS },
+ { 0x000000000700ffeell, KEY_REWIND },
+ { 0x000000000400ffeell, KEY_STOP },
+ { 0x000000003c00ffeell, KEY_PLAYPAUSE },
+ { 0x000000000800ffeell, KEY_FASTFORWARD },
+ { 0x000000000600ffeell, KEY_NEXT },
+ { 0x000000010000ffeell, KEY_RIGHT },
+ { 0x000001000000ffeell, KEY_LEFT },
+ { 0x000000003d00ffeell, KEY_SELECT },
+ { 0x000100000000ffeell, KEY_VOLUMEUP },
+ { 0x010000000000ffeell, KEY_VOLUMEDOWN },
+ { 0x000000000100ffeell, KEY_MUTE },
+ /* 0xffdc iMON MCE VFD */
+ { 0x00010000ffffffeell, KEY_VOLUMEUP },
+ { 0x01000000ffffffeell, KEY_VOLUMEDOWN },
+ { 0x00000001ffffffeell, KEY_MUTE },
+ { 0x0000000fffffffeell, KEY_MEDIA },
+ { 0x00000012ffffffeell, KEY_UP },
+ { 0x00000013ffffffeell, KEY_DOWN },
+ { 0x00000014ffffffeell, KEY_LEFT },
+ { 0x00000015ffffffeell, KEY_RIGHT },
+ { 0x00000016ffffffeell, KEY_ENTER },
+ { 0x00000017ffffffeell, KEY_ESC },
+ /* iMON Knob values */
+ { 0x000100ffffffffeell, KEY_VOLUMEUP },
+ { 0x010000ffffffffeell, KEY_VOLUMEDOWN },
+ { 0x000008ffffffffeell, KEY_MUTE },
+ { 0, KEY_RESERVED },
+ }
+};
+
+static const struct imon_usb_dev_descr imon_OEM_VFD = {
+ .flags = IMON_NEED_20MS_PKT_DELAY,
+ .key_table = {
+ { 0x000000000f00ffeell, KEY_MEDIA }, /* Go */
+ { 0x000000001200ffeell, KEY_UP },
+ { 0x000000001300ffeell, KEY_DOWN },
+ { 0x000000001400ffeell, KEY_LEFT },
+ { 0x000000001500ffeell, KEY_RIGHT },
+ { 0x000000001600ffeell, KEY_ENTER },
+ { 0x000000001700ffeell, KEY_ESC },
+ { 0x000000001f00ffeell, KEY_AUDIO },
+ { 0x000000002b00ffeell, KEY_EXIT },
+ { 0x000000002c00ffeell, KEY_SELECT },
+ { 0x000000002d00ffeell, KEY_MENU },
+ { 0x000000000500ffeell, KEY_PREVIOUS },
+ { 0x000000000700ffeell, KEY_REWIND },
+ { 0x000000000400ffeell, KEY_STOP },
+ { 0x000000003c00ffeell, KEY_PLAYPAUSE },
+ { 0x000000000800ffeell, KEY_FASTFORWARD },
+ { 0x000000000600ffeell, KEY_NEXT },
+ { 0x000000010000ffeell, KEY_RIGHT },
+ { 0x000001000000ffeell, KEY_LEFT },
+ { 0x000000003d00ffeell, KEY_SELECT },
+ { 0x000100000000ffeell, KEY_VOLUMEUP },
+ { 0x010000000000ffeell, KEY_VOLUMEDOWN },
+ { 0x000000000100ffeell, KEY_MUTE },
+ /* 0xffdc iMON MCE VFD */
+ { 0x00010000ffffffeell, KEY_VOLUMEUP },
+ { 0x01000000ffffffeell, KEY_VOLUMEDOWN },
+ { 0x00000001ffffffeell, KEY_MUTE },
+ { 0x0000000fffffffeell, KEY_MEDIA },
+ { 0x00000012ffffffeell, KEY_UP },
+ { 0x00000013ffffffeell, KEY_DOWN },
+ { 0x00000014ffffffeell, KEY_LEFT },
+ { 0x00000015ffffffeell, KEY_RIGHT },
+ { 0x00000016ffffffeell, KEY_ENTER },
+ { 0x00000017ffffffeell, KEY_ESC },
+ /* iMON Knob values */
+ { 0x000100ffffffffeell, KEY_VOLUMEUP },
+ { 0x010000ffffffffeell, KEY_VOLUMEDOWN },
+ { 0x000008ffffffffeell, KEY_MUTE },
+ { 0, KEY_RESERVED },
+ }
+};
+
+/* imon receiver front panel/knob key table for DH102*/
+static const struct imon_usb_dev_descr imon_DH102 = {
+ .flags = IMON_NO_FLAGS,
+ .key_table = {
+ { 0x000100000000ffeell, KEY_VOLUMEUP },
+ { 0x010000000000ffeell, KEY_VOLUMEDOWN },
+ { 0x000000010000ffeell, KEY_MUTE },
+ { 0x0000000f0000ffeell, KEY_MEDIA },
+ { 0x000000120000ffeell, KEY_UP },
+ { 0x000000130000ffeell, KEY_DOWN },
+ { 0x000000140000ffeell, KEY_LEFT },
+ { 0x000000150000ffeell, KEY_RIGHT },
+ { 0x000000160000ffeell, KEY_ENTER },
+ { 0x000000170000ffeell, KEY_ESC },
+ { 0x0000002b0000ffeell, KEY_EXIT },
+ { 0x0000002c0000ffeell, KEY_SELECT },
+ { 0x0000002d0000ffeell, KEY_MENU },
+ { 0, KEY_RESERVED }
+ }
};
/*
@@ -208,7 +346,8 @@ static struct usb_device_id imon_usb_id_table[] = {
* SoundGraph iMON PAD (IR & LCD)
* SoundGraph iMON Knob (IR only)
*/
- { USB_DEVICE(0x15c2, 0xffdc) },
+ { USB_DEVICE(0x15c2, 0xffdc),
+ .driver_info = (unsigned long)&imon_default_table },
/*
* Newer devices, all driven by the latest iMON Windows driver, full
@@ -216,43 +355,62 @@ static struct usb_device_id imon_usb_id_table[] = {
* Need user input to fill in details on unknown devices.
*/
/* SoundGraph iMON OEM Touch LCD (IR & 7" VGA LCD) */
- { USB_DEVICE(0x15c2, 0x0034) },
+ { USB_DEVICE(0x15c2, 0x0034),
+ .driver_info = (unsigned long)&imon_DH102 },
/* SoundGraph iMON OEM Touch LCD (IR & 4.3" VGA LCD) */
- { USB_DEVICE(0x15c2, 0x0035) },
+ { USB_DEVICE(0x15c2, 0x0035),
+ .driver_info = (unsigned long)&imon_default_table},
/* SoundGraph iMON OEM VFD (IR & VFD) */
- { USB_DEVICE(0x15c2, 0x0036), .driver_info = IMON_NEED_20MS_PKT_DELAY },
+ { USB_DEVICE(0x15c2, 0x0036),
+ .driver_info = (unsigned long)&imon_OEM_VFD },
/* device specifics unknown */
- { USB_DEVICE(0x15c2, 0x0037) },
+ { USB_DEVICE(0x15c2, 0x0037),
+ .driver_info = (unsigned long)&imon_default_table},
/* SoundGraph iMON OEM LCD (IR & LCD) */
- { USB_DEVICE(0x15c2, 0x0038) },
+ { USB_DEVICE(0x15c2, 0x0038),
+ .driver_info = (unsigned long)&imon_default_table},
/* SoundGraph iMON UltraBay (IR & LCD) */
- { USB_DEVICE(0x15c2, 0x0039) },
+ { USB_DEVICE(0x15c2, 0x0039),
+ .driver_info = (unsigned long)&imon_default_table},
/* device specifics unknown */
- { USB_DEVICE(0x15c2, 0x003a) },
+ { USB_DEVICE(0x15c2, 0x003a),
+ .driver_info = (unsigned long)&imon_default_table},
/* device specifics unknown */
- { USB_DEVICE(0x15c2, 0x003b) },
+ { USB_DEVICE(0x15c2, 0x003b),
+ .driver_info = (unsigned long)&imon_default_table},
/* SoundGraph iMON OEM Inside (IR only) */
- { USB_DEVICE(0x15c2, 0x003c) },
+ { USB_DEVICE(0x15c2, 0x003c),
+ .driver_info = (unsigned long)&imon_default_table},
/* device specifics unknown */
- { USB_DEVICE(0x15c2, 0x003d) },
+ { USB_DEVICE(0x15c2, 0x003d),
+ .driver_info = (unsigned long)&imon_default_table},
/* device specifics unknown */
- { USB_DEVICE(0x15c2, 0x003e) },
+ { USB_DEVICE(0x15c2, 0x003e),
+ .driver_info = (unsigned long)&imon_default_table},
/* device specifics unknown */
- { USB_DEVICE(0x15c2, 0x003f) },
+ { USB_DEVICE(0x15c2, 0x003f),
+ .driver_info = (unsigned long)&imon_default_table},
/* device specifics unknown */
- { USB_DEVICE(0x15c2, 0x0040) },
+ { USB_DEVICE(0x15c2, 0x0040),
+ .driver_info = (unsigned long)&imon_default_table},
/* SoundGraph iMON MINI (IR only) */
- { USB_DEVICE(0x15c2, 0x0041) },
+ { USB_DEVICE(0x15c2, 0x0041),
+ .driver_info = (unsigned long)&imon_default_table},
/* Antec Veris Multimedia Station EZ External (IR only) */
- { USB_DEVICE(0x15c2, 0x0042) },
+ { USB_DEVICE(0x15c2, 0x0042),
+ .driver_info = (unsigned long)&imon_default_table},
/* Antec Veris Multimedia Station Basic Internal (IR only) */
- { USB_DEVICE(0x15c2, 0x0043) },
+ { USB_DEVICE(0x15c2, 0x0043),
+ .driver_info = (unsigned long)&imon_default_table},
/* Antec Veris Multimedia Station Elite (IR & VFD) */
- { USB_DEVICE(0x15c2, 0x0044) },
+ { USB_DEVICE(0x15c2, 0x0044),
+ .driver_info = (unsigned long)&imon_default_table},
/* Antec Veris Multimedia Station Premiere (IR & LCD) */
- { USB_DEVICE(0x15c2, 0x0045) },
+ { USB_DEVICE(0x15c2, 0x0045),
+ .driver_info = (unsigned long)&imon_default_table},
/* device specifics unknown */
- { USB_DEVICE(0x15c2, 0x0046) },
+ { USB_DEVICE(0x15c2, 0x0046),
+ .driver_info = (unsigned long)&imon_default_table},
{}
};
@@ -266,67 +424,6 @@ static struct usb_driver imon_driver = {
.id_table = imon_usb_id_table,
};
-static struct usb_class_driver imon_vfd_class = {
- .name = DEVICE_NAME,
- .fops = &vfd_fops,
- .minor_base = DISPLAY_MINOR_BASE,
-};
-
-static struct usb_class_driver imon_lcd_class = {
- .name = DEVICE_NAME,
- .fops = &lcd_fops,
- .minor_base = DISPLAY_MINOR_BASE,
-};
-
-/* imon receiver front panel/knob key table */
-static const struct {
- u64 hw_code;
- u32 keycode;
-} imon_panel_key_table[] = {
- { 0x000000000f00ffeell, KEY_MEDIA }, /* Go */
- { 0x000000001200ffeell, KEY_UP },
- { 0x000000001300ffeell, KEY_DOWN },
- { 0x000000001400ffeell, KEY_LEFT },
- { 0x000000001500ffeell, KEY_RIGHT },
- { 0x000000001600ffeell, KEY_ENTER },
- { 0x000000001700ffeell, KEY_ESC },
- { 0x000000001f00ffeell, KEY_AUDIO },
- { 0x000000002000ffeell, KEY_VIDEO },
- { 0x000000002100ffeell, KEY_CAMERA },
- { 0x000000002700ffeell, KEY_DVD },
- { 0x000000002300ffeell, KEY_TV },
- { 0x000000002b00ffeell, KEY_EXIT },
- { 0x000000002c00ffeell, KEY_SELECT },
- { 0x000000002d00ffeell, KEY_MENU },
- { 0x000000000500ffeell, KEY_PREVIOUS },
- { 0x000000000700ffeell, KEY_REWIND },
- { 0x000000000400ffeell, KEY_STOP },
- { 0x000000003c00ffeell, KEY_PLAYPAUSE },
- { 0x000000000800ffeell, KEY_FASTFORWARD },
- { 0x000000000600ffeell, KEY_NEXT },
- { 0x000000010000ffeell, KEY_RIGHT },
- { 0x000001000000ffeell, KEY_LEFT },
- { 0x000000003d00ffeell, KEY_SELECT },
- { 0x000100000000ffeell, KEY_VOLUMEUP },
- { 0x010000000000ffeell, KEY_VOLUMEDOWN },
- { 0x000000000100ffeell, KEY_MUTE },
- /* 0xffdc iMON MCE VFD */
- { 0x00010000ffffffeell, KEY_VOLUMEUP },
- { 0x01000000ffffffeell, KEY_VOLUMEDOWN },
- { 0x00000001ffffffeell, KEY_MUTE },
- { 0x0000000fffffffeell, KEY_MEDIA },
- { 0x00000012ffffffeell, KEY_UP },
- { 0x00000013ffffffeell, KEY_DOWN },
- { 0x00000014ffffffeell, KEY_LEFT },
- { 0x00000015ffffffeell, KEY_RIGHT },
- { 0x00000016ffffffeell, KEY_ENTER },
- { 0x00000017ffffffeell, KEY_ESC },
- /* iMON Knob values */
- { 0x000100ffffffffeell, KEY_VOLUMEUP },
- { 0x010000ffffffffeell, KEY_VOLUMEDOWN },
- { 0x000008ffffffffeell, KEY_MUTE },
-};
-
/* to prevent races between open() and disconnect(), probing, etc */
static DEFINE_MUTEX(driver_lock);
@@ -1210,18 +1307,19 @@ static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 scancode)
return keycode;
}
-static u32 imon_panel_key_lookup(u64 code)
+static u32 imon_panel_key_lookup(struct imon_context *ictx, u64 code)
{
int i;
u32 keycode = KEY_RESERVED;
+ struct imon_panel_key_table *key_table = ictx->dev_descr->key_table;
- for (i = 0; i < ARRAY_SIZE(imon_panel_key_table); i++) {
- if (imon_panel_key_table[i].hw_code == (code | 0xffee)) {
- keycode = imon_panel_key_table[i].keycode;
+ for (i = 0; key_table[i].hw_code != 0; i++) {
+ if (key_table[i].hw_code == (code | 0xffee)) {
+ keycode = key_table[i].keycode;
break;
}
}
-
+ ictx->release_code = false;
return keycode;
}
@@ -1340,7 +1438,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
}
buf[2] = dir & 0xFF;
buf[3] = (dir >> 8) & 0xFF;
- scancode = be32_to_cpu(*((u32 *)buf));
+ scancode = be32_to_cpu(*((__be32 *)buf));
}
} else {
/*
@@ -1404,7 +1502,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
}
buf[2] = dir & 0xFF;
buf[3] = (dir >> 8) & 0xFF;
- scancode = be32_to_cpu(*((u32 *)buf));
+ scancode = be32_to_cpu(*((__be32 *)buf));
} else {
/*
* Hack alert: instead of using keycodes, we have
@@ -1509,11 +1607,12 @@ static void imon_incoming_packet(struct imon_context *ictx,
/* Figure out what key was pressed */
if (len == 8 && buf[7] == 0xee) {
- scancode = be64_to_cpu(*((u64 *)buf));
+ scancode = be64_to_cpu(*((__be64 *)buf));
ktype = IMON_KEY_PANEL;
- kc = imon_panel_key_lookup(scancode);
+ kc = imon_panel_key_lookup(ictx, scancode);
+ ictx->release_code = false;
} else {
- scancode = be32_to_cpu(*((u32 *)buf));
+ scancode = be32_to_cpu(*((__be32 *)buf));
if (ictx->rc_type == RC_BIT_RC6_MCE) {
ktype = IMON_KEY_IMON;
if (buf[0] == 0x80)
@@ -1908,6 +2007,7 @@ out:
static struct input_dev *imon_init_idev(struct imon_context *ictx)
{
+ struct imon_panel_key_table *key_table = ictx->dev_descr->key_table;
struct input_dev *idev;
int ret, i;
@@ -1933,8 +2033,8 @@ static struct input_dev *imon_init_idev(struct imon_context *ictx)
BIT_MASK(REL_WHEEL);
/* panel and/or knob code support */
- for (i = 0; i < ARRAY_SIZE(imon_panel_key_table); i++) {
- u32 kc = imon_panel_key_table[i].keycode;
+ for (i = 0; key_table[i].hw_code != 0; i++) {
+ u32 kc = key_table[i].keycode;
__set_bit(kc, idev->keybit);
}
@@ -2023,7 +2123,7 @@ static bool imon_find_endpoints(struct imon_context *ictx,
for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) {
ep = &iface_desc->endpoint[i].desc;
ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
- ep_type = ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ ep_type = usb_endpoint_type(ep);
if (!ir_ep_found && ep_dir == USB_DIR_IN &&
ep_type == USB_ENDPOINT_XFER_INT) {
@@ -2135,9 +2235,11 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf,
ictx->vendor = le16_to_cpu(ictx->usbdev_intf0->descriptor.idVendor);
ictx->product = le16_to_cpu(ictx->usbdev_intf0->descriptor.idProduct);
+ /* save drive info for later accessing the panel/knob key table */
+ ictx->dev_descr = (struct imon_usb_dev_descr *)id->driver_info;
/* default send_packet delay is 5ms but some devices need more */
- ictx->send_packet_delay = id->driver_info & IMON_NEED_20MS_PKT_DELAY ?
- 20 : 5;
+ ictx->send_packet_delay = ictx->dev_descr->flags &
+ IMON_NEED_20MS_PKT_DELAY ? 20 : 5;
ret = -ENODEV;
iface_desc = intf->cur_altsetting;
@@ -2181,6 +2283,7 @@ idev_setup_failed:
usb_kill_urb(ictx->rx_urb_intf0);
urb_submit_failed:
find_endpoint_failed:
+ usb_put_dev(ictx->usbdev_intf0);
mutex_unlock(&ictx->lock);
usb_free_urb(tx_urb);
tx_urb_alloc_failed:
@@ -2253,6 +2356,7 @@ urb_submit_failed:
input_unregister_device(ictx->touch);
touch_setup_failed:
find_endpoint_failed:
+ usb_put_dev(ictx->usbdev_intf1);
mutex_unlock(&ictx->lock);
usb_free_urb(rx_urb);
rx_urb_alloc_failed:
@@ -2366,11 +2470,13 @@ static int imon_probe(struct usb_interface *interface,
usbdev->bus->busnum, usbdev->devnum);
mutex_unlock(&driver_lock);
+ usb_put_dev(usbdev);
return 0;
fail:
mutex_unlock(&driver_lock);
+ usb_put_dev(usbdev);
dev_err(dev, "unable to register, err %d\n", ret);
return ret;
@@ -2410,6 +2516,7 @@ static void imon_disconnect(struct usb_interface *interface)
if (ifnum == 0) {
ictx->dev_present_intf0 = false;
usb_kill_urb(ictx->rx_urb_intf0);
+ usb_put_dev(ictx->usbdev_intf0);
input_unregister_device(ictx->idev);
rc_unregister_device(ictx->rdev);
if (ictx->display_supported) {
@@ -2421,6 +2528,7 @@ static void imon_disconnect(struct usb_interface *interface)
} else {
ictx->dev_present_intf1 = false;
usb_kill_urb(ictx->rx_urb_intf1);
+ usb_put_dev(ictx->usbdev_intf1);
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
input_unregister_device(ictx->touch);
del_timer_sync(&ictx->ttimer);
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
new file mode 100644
index 000000000000..08bbd4f508cd
--- /dev/null
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <media/rc-core.h>
+
+/* Allow the driver to compile on all architectures */
+#ifndef writel_relaxed
+# define writel_relaxed writel
+#endif
+#ifndef readl_relaxed
+# define readl_relaxed readl
+#endif
+
+#define IR_ENABLE 0x00
+#define IR_CONFIG 0x04
+#define CNT_LEADS 0x08
+#define CNT_LEADE 0x0c
+#define CNT_SLEADE 0x10
+#define CNT0_B 0x14
+#define CNT1_B 0x18
+#define IR_BUSY 0x1c
+#define IR_DATAH 0x20
+#define IR_DATAL 0x24
+#define IR_INTM 0x28
+#define IR_INTS 0x2c
+#define IR_INTC 0x30
+#define IR_START 0x34
+
+/* interrupt mask */
+#define INTMS_SYMBRCV (BIT(24) | BIT(8))
+#define INTMS_TIMEOUT (BIT(25) | BIT(9))
+#define INTMS_OVERFLOW (BIT(26) | BIT(10))
+#define INT_CLR_OVERFLOW BIT(18)
+#define INT_CLR_TIMEOUT BIT(17)
+#define INT_CLR_RCV BIT(16)
+#define INT_CLR_RCVTIMEOUT (BIT(16) | BIT(17))
+
+#define IR_CLK 0x48
+#define IR_CLK_ENABLE BIT(4)
+#define IR_CLK_RESET BIT(5)
+
+#define IR_CFG_WIDTH_MASK 0xffff
+#define IR_CFG_WIDTH_SHIFT 16
+#define IR_CFG_FORMAT_MASK 0x3
+#define IR_CFG_FORMAT_SHIFT 14
+#define IR_CFG_INT_LEVEL_MASK 0x3f
+#define IR_CFG_INT_LEVEL_SHIFT 8
+/* only support raw mode */
+#define IR_CFG_MODE_RAW BIT(7)
+#define IR_CFG_FREQ_MASK 0x7f
+#define IR_CFG_FREQ_SHIFT 0
+#define IR_CFG_INT_THRESHOLD 1
+/* symbol start from low to high, symbol stream end at high*/
+#define IR_CFG_SYMBOL_FMT 0
+#define IR_CFG_SYMBOL_MAXWIDTH 0x3e80
+
+#define IR_HIX5HD2_NAME "hix5hd2-ir"
+
+struct hix5hd2_ir_priv {
+ int irq;
+ void volatile __iomem *base;
+ struct device *dev;
+ struct rc_dev *rdev;
+ struct regmap *regmap;
+ struct clk *clock;
+ unsigned long rate;
+};
+
+static void hix5hd2_ir_enable(struct hix5hd2_ir_priv *dev, bool on)
+{
+ u32 val;
+
+ regmap_read(dev->regmap, IR_CLK, &val);
+ if (on) {
+ val &= ~IR_CLK_RESET;
+ val |= IR_CLK_ENABLE;
+ } else {
+ val &= ~IR_CLK_ENABLE;
+ val |= IR_CLK_RESET;
+ }
+ regmap_write(dev->regmap, IR_CLK, val);
+}
+
+static int hix5hd2_ir_config(struct hix5hd2_ir_priv *priv)
+{
+ int timeout = 10000;
+ u32 val, rate;
+
+ writel_relaxed(0x01, priv->base + IR_ENABLE);
+ while (readl_relaxed(priv->base + IR_BUSY)) {
+ if (timeout--) {
+ udelay(1);
+ } else {
+ dev_err(priv->dev, "IR_BUSY timeout\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ /* Now only support raw mode, with symbol start from low to high */
+ rate = DIV_ROUND_CLOSEST(priv->rate, 1000000);
+ val = IR_CFG_SYMBOL_MAXWIDTH & IR_CFG_WIDTH_MASK << IR_CFG_WIDTH_SHIFT;
+ val |= IR_CFG_SYMBOL_FMT & IR_CFG_FORMAT_MASK << IR_CFG_FORMAT_SHIFT;
+ val |= (IR_CFG_INT_THRESHOLD - 1) & IR_CFG_INT_LEVEL_MASK
+ << IR_CFG_INT_LEVEL_SHIFT;
+ val |= IR_CFG_MODE_RAW;
+ val |= (rate - 1) & IR_CFG_FREQ_MASK << IR_CFG_FREQ_SHIFT;
+ writel_relaxed(val, priv->base + IR_CONFIG);
+
+ writel_relaxed(0x00, priv->base + IR_INTM);
+ /* write arbitrary value to start */
+ writel_relaxed(0x01, priv->base + IR_START);
+ return 0;
+}
+
+static int hix5hd2_ir_open(struct rc_dev *rdev)
+{
+ struct hix5hd2_ir_priv *priv = rdev->priv;
+
+ hix5hd2_ir_enable(priv, true);
+ return hix5hd2_ir_config(priv);
+}
+
+static void hix5hd2_ir_close(struct rc_dev *rdev)
+{
+ struct hix5hd2_ir_priv *priv = rdev->priv;
+
+ hix5hd2_ir_enable(priv, false);
+}
+
+static irqreturn_t hix5hd2_ir_rx_interrupt(int irq, void *data)
+{
+ u32 symb_num, symb_val, symb_time;
+ u32 data_l, data_h;
+ u32 irq_sr, i;
+ struct hix5hd2_ir_priv *priv = data;
+
+ irq_sr = readl_relaxed(priv->base + IR_INTS);
+ if (irq_sr & INTMS_OVERFLOW) {
+ /*
+ * we must read IR_DATAL first, then we can clean up
+ * IR_INTS availably since logic would not clear
+ * fifo when overflow, drv do the job
+ */
+ ir_raw_event_reset(priv->rdev);
+ symb_num = readl_relaxed(priv->base + IR_DATAH);
+ for (i = 0; i < symb_num; i++)
+ readl_relaxed(priv->base + IR_DATAL);
+
+ writel_relaxed(INT_CLR_OVERFLOW, priv->base + IR_INTC);
+ dev_info(priv->dev, "overflow, level=%d\n",
+ IR_CFG_INT_THRESHOLD);
+ }
+
+ if ((irq_sr & INTMS_SYMBRCV) || (irq_sr & INTMS_TIMEOUT)) {
+ DEFINE_IR_RAW_EVENT(ev);
+
+ symb_num = readl_relaxed(priv->base + IR_DATAH);
+ for (i = 0; i < symb_num; i++) {
+ symb_val = readl_relaxed(priv->base + IR_DATAL);
+ data_l = ((symb_val & 0xffff) * 10);
+ data_h = ((symb_val >> 16) & 0xffff) * 10;
+ symb_time = (data_l + data_h) / 10;
+
+ ev.duration = US_TO_NS(data_l);
+ ev.pulse = true;
+ ir_raw_event_store(priv->rdev, &ev);
+
+ if (symb_time < IR_CFG_SYMBOL_MAXWIDTH) {
+ ev.duration = US_TO_NS(data_h);
+ ev.pulse = false;
+ ir_raw_event_store(priv->rdev, &ev);
+ } else {
+ ir_raw_event_set_idle(priv->rdev, true);
+ }
+ }
+
+ if (irq_sr & INTMS_SYMBRCV)
+ writel_relaxed(INT_CLR_RCV, priv->base + IR_INTC);
+ if (irq_sr & INTMS_TIMEOUT)
+ writel_relaxed(INT_CLR_TIMEOUT, priv->base + IR_INTC);
+ }
+
+ /* Empty software fifo */
+ ir_raw_event_handle(priv->rdev);
+ return IRQ_HANDLED;
+}
+
+static int hix5hd2_ir_probe(struct platform_device *pdev)
+{
+ struct rc_dev *rdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct hix5hd2_ir_priv *priv;
+ struct device_node *node = pdev->dev.of_node;
+ const char *map_name;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = syscon_regmap_lookup_by_phandle(node,
+ "hisilicon,power-syscon");
+ if (IS_ERR(priv->regmap)) {
+ dev_err(dev, "no power-reg\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR((__force void *)priv->base))
+ return PTR_ERR((__force void *)priv->base);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0) {
+ dev_err(dev, "irq can not get\n");
+ return priv->irq;
+ }
+
+ rdev = rc_allocate_device();
+ if (!rdev)
+ return -ENOMEM;
+
+ priv->clock = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clock)) {
+ dev_err(dev, "clock not found\n");
+ ret = PTR_ERR(priv->clock);
+ goto err;
+ }
+ clk_prepare_enable(priv->clock);
+ priv->rate = clk_get_rate(priv->clock);
+
+ rdev->driver_type = RC_DRIVER_IR_RAW;
+ rdev->allowed_protocols = RC_BIT_ALL;
+ rdev->priv = priv;
+ rdev->open = hix5hd2_ir_open;
+ rdev->close = hix5hd2_ir_close;
+ rdev->driver_name = IR_HIX5HD2_NAME;
+ map_name = of_get_property(node, "linux,rc-map-name", NULL);
+ rdev->map_name = map_name ?: RC_MAP_EMPTY;
+ rdev->input_name = IR_HIX5HD2_NAME;
+ rdev->input_phys = IR_HIX5HD2_NAME "/input0";
+ rdev->input_id.bustype = BUS_HOST;
+ rdev->input_id.vendor = 0x0001;
+ rdev->input_id.product = 0x0001;
+ rdev->input_id.version = 0x0100;
+ rdev->rx_resolution = US_TO_NS(10);
+ rdev->timeout = US_TO_NS(IR_CFG_SYMBOL_MAXWIDTH * 10);
+
+ ret = rc_register_device(rdev);
+ if (ret < 0)
+ goto clkerr;
+
+ if (devm_request_irq(dev, priv->irq, hix5hd2_ir_rx_interrupt,
+ IRQF_NO_SUSPEND, pdev->name, priv) < 0) {
+ dev_err(dev, "IRQ %d register failed\n", priv->irq);
+ ret = -EINVAL;
+ goto regerr;
+ }
+
+ priv->rdev = rdev;
+ priv->dev = dev;
+ platform_set_drvdata(pdev, priv);
+
+ return ret;
+
+regerr:
+ rc_unregister_device(rdev);
+ rdev = NULL;
+clkerr:
+ clk_disable_unprepare(priv->clock);
+err:
+ rc_free_device(rdev);
+ dev_err(dev, "Unable to register device (%d)\n", ret);
+ return ret;
+}
+
+static int hix5hd2_ir_remove(struct platform_device *pdev)
+{
+ struct hix5hd2_ir_priv *priv = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(priv->clock);
+ rc_unregister_device(priv->rdev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int hix5hd2_ir_suspend(struct device *dev)
+{
+ struct hix5hd2_ir_priv *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clock);
+ hix5hd2_ir_enable(priv, false);
+
+ return 0;
+}
+
+static int hix5hd2_ir_resume(struct device *dev)
+{
+ struct hix5hd2_ir_priv *priv = dev_get_drvdata(dev);
+
+ hix5hd2_ir_enable(priv, true);
+ clk_prepare_enable(priv->clock);
+
+ writel_relaxed(0x01, priv->base + IR_ENABLE);
+ writel_relaxed(0x00, priv->base + IR_INTM);
+ writel_relaxed(0xff, priv->base + IR_INTC);
+ writel_relaxed(0x01, priv->base + IR_START);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(hix5hd2_ir_pm_ops, hix5hd2_ir_suspend,
+ hix5hd2_ir_resume);
+
+static struct of_device_id hix5hd2_ir_table[] = {
+ { .compatible = "hisilicon,hix5hd2-ir", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hix5hd2_ir_table);
+
+static struct platform_driver hix5hd2_ir_driver = {
+ .driver = {
+ .name = IR_HIX5HD2_NAME,
+ .of_match_table = hix5hd2_ir_table,
+ .pm = &hix5hd2_ir_pm_ops,
+ },
+ .probe = hix5hd2_ir_probe,
+ .remove = hix5hd2_ir_remove,
+};
+
+module_platform_driver(hix5hd2_ir_driver);
+
+MODULE_DESCRIPTION("IR controller driver for hix5hd2 platforms");
+MODULE_AUTHOR("Guoxiong Yan <yanguoxiong@huawei.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hix5hd2-ir");
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 447fe35862dc..56abf9120cc2 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1666,7 +1666,6 @@ static int ite_suspend(struct pnp_dev *pdev, pm_message_t state)
static int ite_resume(struct pnp_dev *pdev)
{
- int ret = 0;
struct ite_dev *dev = pnp_get_drvdata(pdev);
unsigned long flags;
@@ -1681,7 +1680,7 @@ static int ite_resume(struct pnp_dev *pdev)
spin_unlock_irqrestore(&dev->lock, flags);
- return ret;
+ return 0;
}
static void ite_shutdown(struct pnp_dev *pdev)
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index 0b8c54919010..abf60794223d 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-dm1105-nec.o \
rc-dntv-live-dvb-t.o \
rc-dntv-live-dvbt-pro.o \
+ rc-dvbsky.o \
rc-em-terratec.o \
rc-encore-enltv2.o \
rc-encore-enltv.o \
diff --git a/drivers/media/rc/keymaps/rc-dvbsky.c b/drivers/media/rc/keymaps/rc-dvbsky.c
new file mode 100644
index 000000000000..c5115a1165d1
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-dvbsky.c
@@ -0,0 +1,78 @@
+/* rc-dvbsky.c - Keytable for DVBSky Remote Controllers
+ *
+ * keymap imported from ir-keymaps.c
+ *
+ *
+ * Copyright (c) 2010-2012 by Nibble Max <nibble.max@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+/*
+ * This table contains the complete RC5 code, instead of just the data part
+ */
+
+static struct rc_map_table rc5_dvbsky[] = {
+ { 0x0000, KEY_0 },
+ { 0x0001, KEY_1 },
+ { 0x0002, KEY_2 },
+ { 0x0003, KEY_3 },
+ { 0x0004, KEY_4 },
+ { 0x0005, KEY_5 },
+ { 0x0006, KEY_6 },
+ { 0x0007, KEY_7 },
+ { 0x0008, KEY_8 },
+ { 0x0009, KEY_9 },
+ { 0x000a, KEY_MUTE },
+ { 0x000d, KEY_OK },
+ { 0x000b, KEY_STOP },
+ { 0x000c, KEY_EXIT },
+ { 0x000e, KEY_CAMERA }, /*Snap shot*/
+ { 0x000f, KEY_SUBTITLE }, /*PIP*/
+ { 0x0010, KEY_VOLUMEUP },
+ { 0x0011, KEY_VOLUMEDOWN },
+ { 0x0012, KEY_FAVORITES },
+ { 0x0013, KEY_LIST }, /*Info*/
+ { 0x0016, KEY_PAUSE },
+ { 0x0017, KEY_PLAY },
+ { 0x001f, KEY_RECORD },
+ { 0x0020, KEY_CHANNELDOWN },
+ { 0x0021, KEY_CHANNELUP },
+ { 0x0025, KEY_POWER2 },
+ { 0x0026, KEY_REWIND },
+ { 0x0027, KEY_FASTFORWARD },
+ { 0x0029, KEY_LAST },
+ { 0x002b, KEY_MENU },
+ { 0x002c, KEY_EPG },
+ { 0x002d, KEY_ZOOM },
+};
+
+static struct rc_map_list rc5_dvbsky_map = {
+ .map = {
+ .scan = rc5_dvbsky,
+ .size = ARRAY_SIZE(rc5_dvbsky),
+ .rc_type = RC_TYPE_RC5,
+ .name = RC_MAP_DVBSKY,
+ }
+};
+
+static int __init init_rc_map_rc5_dvbsky(void)
+{
+ return rc_map_register(&rc5_dvbsky_map);
+}
+
+static void __exit exit_rc_map_rc5_dvbsky(void)
+{
+ rc_map_unregister(&rc5_dvbsky_map);
+}
+
+module_init(init_rc_map_rc5_dvbsky)
+module_exit(exit_rc_map_rc5_dvbsky)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nibble Max <nibble.max@gmail.com>");
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index dc5cbffcd5a2..249d2fbc8f37 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -595,7 +595,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
case LIRC_GET_FEATURES:
- result = put_user(ir->d.features, (__u32 *)arg);
+ result = put_user(ir->d.features, (__u32 __user *)arg);
break;
case LIRC_GET_REC_MODE:
if (!(ir->d.features & LIRC_CAN_REC_MASK)) {
@@ -605,7 +605,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
result = put_user(LIRC_REC2MODE
(ir->d.features & LIRC_CAN_REC_MASK),
- (__u32 *)arg);
+ (__u32 __user *)arg);
break;
case LIRC_SET_REC_MODE:
if (!(ir->d.features & LIRC_CAN_REC_MASK)) {
@@ -613,7 +613,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
- result = get_user(mode, (__u32 *)arg);
+ result = get_user(mode, (__u32 __user *)arg);
if (!result && !(LIRC_MODE2REC(mode) & ir->d.features))
result = -EINVAL;
/*
@@ -622,7 +622,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
*/
break;
case LIRC_GET_LENGTH:
- result = put_user(ir->d.code_length, (__u32 *)arg);
+ result = put_user(ir->d.code_length, (__u32 __user *)arg);
break;
case LIRC_GET_MIN_TIMEOUT:
if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) ||
@@ -631,7 +631,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
- result = put_user(ir->d.min_timeout, (__u32 *)arg);
+ result = put_user(ir->d.min_timeout, (__u32 __user *)arg);
break;
case LIRC_GET_MAX_TIMEOUT:
if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) ||
@@ -640,7 +640,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
- result = put_user(ir->d.max_timeout, (__u32 *)arg);
+ result = put_user(ir->d.max_timeout, (__u32 __user *)arg);
break;
default:
result = -EINVAL;
@@ -736,7 +736,7 @@ ssize_t lirc_dev_fop_read(struct file *file,
}
} else {
lirc_buffer_read(ir->buf, buf);
- ret = copy_to_user((void *)buffer+written, buf,
+ ret = copy_to_user((void __user *)buffer+written, buf,
ir->buf->chunk_size);
if (!ret)
written += ir->buf->chunk_size;
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 45b0894288e5..2cdb740cde48 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -397,6 +397,10 @@ static struct usb_device_id mceusb_dev_table[] = {
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
{ USB_DEVICE(VENDOR_HAUPPAUGE, 0xb131),
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
+ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb138),
+ .driver_info = HAUPPAUGE_CX_HYBRID_TV },
+ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb139),
+ .driver_info = HAUPPAUGE_CX_HYBRID_TV },
{ USB_DEVICE(VENDOR_PCTV, 0x0259),
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
{ USB_DEVICE(VENDOR_PCTV, 0x025e),
@@ -1198,10 +1202,9 @@ static void mceusb_flash_led(struct mceusb_dev *ir)
mce_async_out(ir, FLASH_LED, sizeof(FLASH_LED));
}
-static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir,
- struct usb_interface *intf)
+static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
{
- struct usb_device *udev = usb_get_dev(interface_to_usbdev(intf));
+ struct usb_device *udev = ir->usbdev;
struct device *dev = ir->dev;
struct rc_dev *rc;
int ret;
@@ -1341,7 +1344,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
if (!ir->urb_in)
goto urb_in_alloc_fail;
- ir->usbdev = dev;
+ ir->usbdev = usb_get_dev(dev);
ir->dev = &intf->dev;
ir->len_in = maxp;
ir->flags.microsoft_gen1 = is_microsoft_gen1;
@@ -1362,7 +1365,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
snprintf(name + strlen(name), sizeof(name) - strlen(name),
" %s", buf);
- ir->rc = mceusb_init_rc_dev(ir, intf);
+ ir->rc = mceusb_init_rc_dev(ir);
if (!ir->rc)
goto rc_dev_fail;
@@ -1408,6 +1411,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
/* Error-handling path */
rc_dev_fail:
+ usb_put_dev(ir->usbdev);
usb_free_urb(ir->urb_in);
urb_in_alloc_fail:
usb_free_coherent(dev, maxp, ir->buf_in, ir->dma_in);
@@ -1435,6 +1439,7 @@ static void mceusb_dev_disconnect(struct usb_interface *intf)
usb_kill_urb(ir->urb_in);
usb_free_urb(ir->urb_in);
usb_free_coherent(dev, ir->len_in, ir->buf_in, ir->dma_in);
+ usb_put_dev(dev);
kfree(ir);
}
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 7f4fd859bba5..9c2c8635ff33 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -229,7 +229,6 @@ static int nvt_hw_detect(struct nvt_dev *nvt)
{
unsigned long flags;
u8 chip_major, chip_minor;
- int ret = 0;
char chip_id[12];
bool chip_unknown = false;
@@ -285,7 +284,7 @@ static int nvt_hw_detect(struct nvt_dev *nvt)
nvt->chip_minor = chip_minor;
spin_unlock_irqrestore(&nvt->nvt_lock, flags);
- return ret;
+ return 0;
}
static void nvt_cir_ldev_init(struct nvt_dev *nvt)
@@ -1177,7 +1176,6 @@ static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
static int nvt_resume(struct pnp_dev *pdev)
{
- int ret = 0;
struct nvt_dev *nvt = pnp_get_drvdata(pdev);
nvt_dbg("%s called", __func__);
@@ -1195,7 +1193,7 @@ static int nvt_resume(struct pnp_dev *pdev)
nvt_cir_regs_init(nvt);
nvt_cir_wake_regs_init(nvt);
- return ret;
+ return 0;
}
static void nvt_shutdown(struct pnp_dev *pdev)
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index 5c151351afa4..0e758ae2e529 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -22,8 +22,8 @@ struct st_rc_device {
int irq;
int irq_wake;
struct clk *sys_clock;
- void *base; /* Register base address */
- void *rx_base;/* RX Register base address */
+ volatile void __iomem *base; /* Register base address */
+ volatile void __iomem *rx_base;/* RX Register base address */
struct rc_dev *rdev;
bool overclocking;
int sample_mult;
@@ -267,8 +267,8 @@ static int st_rc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rc_dev->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(rc_dev->base)) {
- ret = PTR_ERR(rc_dev->base);
+ if (IS_ERR((__force void *)rc_dev->base)) {
+ ret = PTR_ERR((__force void *)rc_dev->base);
goto err;
}
@@ -278,7 +278,7 @@ static int st_rc_probe(struct platform_device *pdev)
rc_dev->rx_base = rc_dev->base;
- rc_dev->rstc = reset_control_get(dev, NULL);
+ rc_dev->rstc = reset_control_get_optional(dev, NULL);
if (IS_ERR(rc_dev->rstc))
rc_dev->rstc = NULL;
@@ -376,9 +376,10 @@ static int st_rc_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(st_rc_pm_ops, st_rc_suspend, st_rc_resume);
#endif
+static SIMPLE_DEV_PM_OPS(st_rc_pm_ops, st_rc_suspend, st_rc_resume);
+
#ifdef CONFIG_OF
static struct of_device_id st_rc_match[] = {
{ .compatible = "st,comms-irb", },
@@ -391,11 +392,8 @@ MODULE_DEVICE_TABLE(of, st_rc_match);
static struct platform_driver st_rc_driver = {
.driver = {
.name = IR_ST_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(st_rc_match),
-#ifdef CONFIG_PM
.pm = &st_rc_pm_ops,
-#endif
},
.probe = st_rc_probe,
.remove = st_rc_remove,
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 80c4feeb01ea..bf4a44272f0e 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -362,16 +362,14 @@ static int streamzap_probe(struct usb_interface *intf,
}
sz->endpoint = &(iface_host->endpoint[0].desc);
- if ((sz->endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
- != USB_DIR_IN) {
+ if (!usb_endpoint_dir_in(sz->endpoint)) {
dev_err(&intf->dev, "%s: endpoint doesn't match input device "
"02%02x\n", __func__, sz->endpoint->bEndpointAddress);
retval = -ENODEV;
goto free_sz;
}
- if ((sz->endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- != USB_ENDPOINT_XFER_INT) {
+ if (!usb_endpoint_xfer_int(sz->endpoint)) {
dev_err(&intf->dev, "%s: endpoint attributes don't match xfer "
"02%02x\n", __func__, sz->endpoint->bmAttributes);
retval = -ENODEV;
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index d79fd1ce5a18..f039dc2a21cf 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -204,6 +204,7 @@ config MEDIA_TUNER_FC0013
config MEDIA_TUNER_TDA18212
tristate "NXP TDA18212 silicon tuner"
depends on MEDIA_SUPPORT && I2C
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
NXP TDA18212 silicon tuner driver.
@@ -226,6 +227,7 @@ config MEDIA_TUNER_FC2580
config MEDIA_TUNER_M88TS2022
tristate "Montage M88TS2022 silicon tuner"
depends on MEDIA_SUPPORT && I2C
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
Montage M88TS2022 silicon tuner driver.
@@ -247,6 +249,7 @@ config MEDIA_TUNER_SI2157
config MEDIA_TUNER_IT913X
tristate "ITE Tech IT913x silicon tuner"
depends on MEDIA_SUPPORT && I2C
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
ITE Tech IT913x silicon tuner driver.
@@ -257,4 +260,18 @@ config MEDIA_TUNER_R820T
default m if !MEDIA_SUBDRV_AUTOSELECT
help
Rafael Micro R820T silicon tuner driver.
+
+config MEDIA_TUNER_MXL301RF
+ tristate "MaxLinear MxL301RF tuner"
+ depends on MEDIA_SUPPORT && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ MaxLinear MxL301RF OFDM tuner driver.
+
+config MEDIA_TUNER_QM1D1C0042
+ tristate "Sharp QM1D1C0042 tuner"
+ depends on MEDIA_SUPPORT && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Sharp QM1D1C0042 trellis coded 8PSK tuner driver.
endmenu
diff --git a/drivers/media/tuners/Makefile b/drivers/media/tuners/Makefile
index 5591699755ba..49fcf8033848 100644
--- a/drivers/media/tuners/Makefile
+++ b/drivers/media/tuners/Makefile
@@ -37,8 +37,10 @@ obj-$(CONFIG_MEDIA_TUNER_M88TS2022) += m88ts2022.o
obj-$(CONFIG_MEDIA_TUNER_FC0011) += fc0011.o
obj-$(CONFIG_MEDIA_TUNER_FC0012) += fc0012.o
obj-$(CONFIG_MEDIA_TUNER_FC0013) += fc0013.o
-obj-$(CONFIG_MEDIA_TUNER_IT913X) += tuner_it913x.o
+obj-$(CONFIG_MEDIA_TUNER_IT913X) += it913x.o
obj-$(CONFIG_MEDIA_TUNER_R820T) += r820t.o
+obj-$(CONFIG_MEDIA_TUNER_MXL301RF) += mxl301rf.o
+obj-$(CONFIG_MEDIA_TUNER_QM1D1C0042) += qm1d1c0042.o
ccflags-y += -I$(srctree)/drivers/media/dvb-core
ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index 90d93348f20c..510239f80c0d 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -26,7 +26,7 @@ static int e4000_init(struct dvb_frontend *fe)
struct e4000 *s = fe->tuner_priv;
int ret;
- dev_dbg(&s->client->dev, "%s:\n", __func__);
+ dev_dbg(&s->client->dev, "\n");
/* dummy I2C to ensure I2C wakes up */
ret = regmap_write(s->regmap, 0x02, 0x40);
@@ -87,7 +87,7 @@ static int e4000_init(struct dvb_frontend *fe)
s->active = true;
err:
if (ret)
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -97,7 +97,7 @@ static int e4000_sleep(struct dvb_frontend *fe)
struct e4000 *s = fe->tuner_priv;
int ret;
- dev_dbg(&s->client->dev, "%s:\n", __func__);
+ dev_dbg(&s->client->dev, "\n");
s->active = false;
@@ -106,7 +106,7 @@ static int e4000_sleep(struct dvb_frontend *fe)
goto err;
err:
if (ret)
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -121,9 +121,8 @@ static int e4000_set_params(struct dvb_frontend *fe)
u8 buf[5], i_data[4], q_data[4];
dev_dbg(&s->client->dev,
- "%s: delivery_system=%d frequency=%u bandwidth_hz=%u\n",
- __func__, c->delivery_system, c->frequency,
- c->bandwidth_hz);
+ "delivery_system=%d frequency=%u bandwidth_hz=%u\n",
+ c->delivery_system, c->frequency, c->bandwidth_hz);
/* gain control manual */
ret = regmap_write(s->regmap, 0x1a, 0x00);
@@ -150,9 +149,8 @@ static int e4000_set_params(struct dvb_frontend *fe)
buf[3] = 0x00;
buf[4] = e4000_pll_lut[i].div;
- dev_dbg(&s->client->dev,
- "%s: f_vco=%llu pll div=%d sigma_delta=%04x\n",
- __func__, f_vco, buf[0], sigma_delta);
+ dev_dbg(&s->client->dev, "f_vco=%llu pll div=%d sigma_delta=%04x\n",
+ f_vco, buf[0], sigma_delta);
ret = regmap_bulk_write(s->regmap, 0x09, buf, 5);
if (ret)
@@ -253,7 +251,7 @@ static int e4000_set_params(struct dvb_frontend *fe)
goto err;
err:
if (ret)
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -262,7 +260,7 @@ static int e4000_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct e4000 *s = fe->tuner_priv;
- dev_dbg(&s->client->dev, "%s:\n", __func__);
+ dev_dbg(&s->client->dev, "\n");
*frequency = 0; /* Zero-IF */
@@ -276,10 +274,9 @@ static int e4000_set_lna_gain(struct dvb_frontend *fe)
int ret;
u8 u8tmp;
- dev_dbg(&s->client->dev, "%s: lna auto=%d->%d val=%d->%d\n",
- __func__, s->lna_gain_auto->cur.val,
- s->lna_gain_auto->val, s->lna_gain->cur.val,
- s->lna_gain->val);
+ dev_dbg(&s->client->dev, "lna auto=%d->%d val=%d->%d\n",
+ s->lna_gain_auto->cur.val, s->lna_gain_auto->val,
+ s->lna_gain->cur.val, s->lna_gain->val);
if (s->lna_gain_auto->val && s->if_gain_auto->cur.val)
u8tmp = 0x17;
@@ -301,7 +298,7 @@ static int e4000_set_lna_gain(struct dvb_frontend *fe)
}
err:
if (ret)
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -312,10 +309,9 @@ static int e4000_set_mixer_gain(struct dvb_frontend *fe)
int ret;
u8 u8tmp;
- dev_dbg(&s->client->dev, "%s: mixer auto=%d->%d val=%d->%d\n",
- __func__, s->mixer_gain_auto->cur.val,
- s->mixer_gain_auto->val, s->mixer_gain->cur.val,
- s->mixer_gain->val);
+ dev_dbg(&s->client->dev, "mixer auto=%d->%d val=%d->%d\n",
+ s->mixer_gain_auto->cur.val, s->mixer_gain_auto->val,
+ s->mixer_gain->cur.val, s->mixer_gain->val);
if (s->mixer_gain_auto->val)
u8tmp = 0x15;
@@ -333,7 +329,7 @@ static int e4000_set_mixer_gain(struct dvb_frontend *fe)
}
err:
if (ret)
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -345,10 +341,9 @@ static int e4000_set_if_gain(struct dvb_frontend *fe)
u8 buf[2];
u8 u8tmp;
- dev_dbg(&s->client->dev, "%s: if auto=%d->%d val=%d->%d\n",
- __func__, s->if_gain_auto->cur.val,
- s->if_gain_auto->val, s->if_gain->cur.val,
- s->if_gain->val);
+ dev_dbg(&s->client->dev, "if auto=%d->%d val=%d->%d\n",
+ s->if_gain_auto->cur.val, s->if_gain_auto->val,
+ s->if_gain->cur.val, s->if_gain->val);
if (s->if_gain_auto->val && s->lna_gain_auto->cur.val)
u8tmp = 0x17;
@@ -372,7 +367,7 @@ static int e4000_set_if_gain(struct dvb_frontend *fe)
}
err:
if (ret)
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -390,7 +385,7 @@ static int e4000_pll_lock(struct dvb_frontend *fe)
s->pll_lock->val = (utmp & 0x01);
err:
if (ret)
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -400,7 +395,7 @@ static int e4000_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
struct e4000 *s = container_of(ctrl->handler, struct e4000, hdl);
int ret;
- if (s->active == false)
+ if (!s->active)
return 0;
switch (ctrl->id) {
@@ -408,8 +403,8 @@ static int e4000_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
ret = e4000_pll_lock(s->fe);
break;
default:
- dev_dbg(&s->client->dev, "%s: unknown ctrl: id=%d name=%s\n",
- __func__, ctrl->id, ctrl->name);
+ dev_dbg(&s->client->dev, "unknown ctrl: id=%d name=%s\n",
+ ctrl->id, ctrl->name);
ret = -EINVAL;
}
@@ -423,7 +418,7 @@ static int e4000_s_ctrl(struct v4l2_ctrl *ctrl)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- if (s->active == false)
+ if (!s->active)
return 0;
switch (ctrl->id) {
@@ -445,8 +440,8 @@ static int e4000_s_ctrl(struct v4l2_ctrl *ctrl)
ret = e4000_set_if_gain(s->fe);
break;
default:
- dev_dbg(&s->client->dev, "%s: unknown ctrl: id=%d name=%s\n",
- __func__, ctrl->id, ctrl->name);
+ dev_dbg(&s->client->dev, "unknown ctrl: id=%d name=%s\n",
+ ctrl->id, ctrl->name);
ret = -EINVAL;
}
@@ -494,7 +489,7 @@ static int e4000_probe(struct i2c_client *client,
s = kzalloc(sizeof(struct e4000), GFP_KERNEL);
if (!s) {
ret = -ENOMEM;
- dev_err(&client->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
+ dev_err(&client->dev, "kzalloc() failed\n");
goto err;
}
@@ -512,7 +507,7 @@ static int e4000_probe(struct i2c_client *client,
if (ret)
goto err;
- dev_dbg(&s->client->dev, "%s: chip id=%02x\n", __func__, utmp);
+ dev_dbg(&s->client->dev, "chip id=%02x\n", utmp);
if (utmp != 0x40) {
ret = -ENODEV;
@@ -559,9 +554,7 @@ static int e4000_probe(struct i2c_client *client,
s->sd.ctrl_handler = &s->hdl;
#endif
- dev_info(&s->client->dev,
- "%s: Elonics E4000 successfully identified\n",
- KBUILD_MODNAME);
+ dev_info(&s->client->dev, "Elonics E4000 successfully identified\n");
fe->tuner_priv = s;
memcpy(&fe->ops.tuner_ops, &e4000_tuner_ops,
@@ -573,7 +566,7 @@ static int e4000_probe(struct i2c_client *client,
return 0;
err:
if (ret) {
- dev_dbg(&client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
kfree(s);
}
@@ -586,7 +579,7 @@ static int e4000_remove(struct i2c_client *client)
struct e4000 *s = container_of(sd, struct e4000, sd);
struct dvb_frontend *fe = s->fe;
- dev_dbg(&client->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
#if IS_ENABLED(CONFIG_VIDEO_V4L2)
v4l2_ctrl_handler_free(&s->hdl);
diff --git a/drivers/media/tuners/it913x.c b/drivers/media/tuners/it913x.c
new file mode 100644
index 000000000000..a076c87eda7a
--- /dev/null
+++ b/drivers/media/tuners/it913x.c
@@ -0,0 +1,478 @@
+/*
+ * ITE IT913X silicon tuner driver
+ *
+ * Copyright (C) 2011 Malcolm Priestley (tvboxspy@gmail.com)
+ * IT9137 Copyright (C) ITE Tech Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
+ */
+
+#include "it913x.h"
+#include <linux/regmap.h>
+
+struct it913x_dev {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct dvb_frontend *fe;
+ u8 chip_ver:2;
+ u8 role:2;
+ u16 xtal;
+ u8 fdiv;
+ u8 clk_mode;
+ u32 fn_min;
+ bool active;
+};
+
+static int it913x_init(struct dvb_frontend *fe)
+{
+ struct it913x_dev *dev = fe->tuner_priv;
+ int ret;
+ unsigned int utmp;
+ u8 iqik_m_cal, nv_val, buf[2];
+ static const u8 nv[] = {48, 32, 24, 16, 12, 8, 6, 4, 2};
+ unsigned long timeout;
+
+ dev_dbg(&dev->client->dev, "role %u\n", dev->role);
+
+ ret = regmap_write(dev->regmap, 0x80ec4c, 0x68);
+ if (ret)
+ goto err;
+
+ usleep_range(10000, 100000);
+
+ ret = regmap_read(dev->regmap, 0x80ec86, &utmp);
+ if (ret)
+ goto err;
+
+ switch (utmp) {
+ case 0:
+ /* 12.000 MHz */
+ dev->clk_mode = utmp;
+ dev->xtal = 2000;
+ dev->fdiv = 3;
+ iqik_m_cal = 16;
+ break;
+ case 1:
+ /* 20.480 MHz */
+ dev->clk_mode = utmp;
+ dev->xtal = 640;
+ dev->fdiv = 1;
+ iqik_m_cal = 6;
+ break;
+ default:
+ dev_err(&dev->client->dev, "unknown clock identifier %d\n", utmp);
+ goto err;
+ }
+
+ ret = regmap_read(dev->regmap, 0x80ed03, &utmp);
+ if (ret)
+ goto err;
+
+ else if (utmp < ARRAY_SIZE(nv))
+ nv_val = nv[utmp];
+ else
+ nv_val = 2;
+
+ #define TIMEOUT 50
+ timeout = jiffies + msecs_to_jiffies(TIMEOUT);
+ while (!time_after(jiffies, timeout)) {
+ ret = regmap_bulk_read(dev->regmap, 0x80ed23, buf, 2);
+ if (ret)
+ goto err;
+
+ utmp = (buf[1] << 8) | (buf[0] << 0);
+ if (utmp)
+ break;
+ }
+
+ dev_dbg(&dev->client->dev, "r_fbc_m_bdry took %u ms, val %u\n",
+ jiffies_to_msecs(jiffies) -
+ (jiffies_to_msecs(timeout) - TIMEOUT), utmp);
+
+ dev->fn_min = dev->xtal * utmp;
+ dev->fn_min /= (dev->fdiv * nv_val);
+ dev->fn_min *= 1000;
+ dev_dbg(&dev->client->dev, "fn_min %u\n", dev->fn_min);
+
+ /*
+ * Chip version BX never sets that flag so we just wait 50ms in that
+ * case. It is possible poll BX similarly than AX and then timeout in
+ * order to get 50ms delay, but that causes about 120 extra I2C
+ * messages. As for now, we just wait and reduce IO.
+ */
+ if (dev->chip_ver == 1) {
+ #define TIMEOUT 50
+ timeout = jiffies + msecs_to_jiffies(TIMEOUT);
+ while (!time_after(jiffies, timeout)) {
+ ret = regmap_read(dev->regmap, 0x80ec82, &utmp);
+ if (ret)
+ goto err;
+
+ if (utmp)
+ break;
+ }
+
+ dev_dbg(&dev->client->dev, "p_tsm_init_mode took %u ms, val %u\n",
+ jiffies_to_msecs(jiffies) -
+ (jiffies_to_msecs(timeout) - TIMEOUT), utmp);
+ } else {
+ msleep(50);
+ }
+
+ ret = regmap_write(dev->regmap, 0x80ed81, iqik_m_cal);
+ if (ret)
+ goto err;
+
+ ret = regmap_write(dev->regmap, 0x80ec57, 0x00);
+ if (ret)
+ goto err;
+
+ ret = regmap_write(dev->regmap, 0x80ec58, 0x00);
+ if (ret)
+ goto err;
+
+ ret = regmap_write(dev->regmap, 0x80ec40, 0x01);
+ if (ret)
+ goto err;
+
+ dev->active = true;
+
+ return 0;
+err:
+ dev_dbg(&dev->client->dev, "failed %d\n", ret);
+ return ret;
+}
+
+static int it913x_sleep(struct dvb_frontend *fe)
+{
+ struct it913x_dev *dev = fe->tuner_priv;
+ int ret, len;
+
+ dev_dbg(&dev->client->dev, "role %u\n", dev->role);
+
+ dev->active = false;
+
+ ret = regmap_bulk_write(dev->regmap, 0x80ec40, "\x00", 1);
+ if (ret)
+ goto err;
+
+ /*
+ * Writing '0x00' to master tuner register '0x80ec08' causes slave tuner
+ * communication lost. Due to that, we cannot put master full sleep.
+ */
+ if (dev->role == IT913X_ROLE_DUAL_MASTER)
+ len = 4;
+ else
+ len = 15;
+
+ dev_dbg(&dev->client->dev, "role %u, len %d\n", dev->role, len);
+
+ ret = regmap_bulk_write(dev->regmap, 0x80ec02,
+ "\x3f\x1f\x3f\x3e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+ len);
+ if (ret)
+ goto err;
+
+ ret = regmap_bulk_write(dev->regmap, 0x80ec12, "\x00\x00\x00\x00", 4);
+ if (ret)
+ goto err;
+
+ ret = regmap_bulk_write(dev->regmap, 0x80ec17,
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00", 9);
+ if (ret)
+ goto err;
+
+ ret = regmap_bulk_write(dev->regmap, 0x80ec22,
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 10);
+ if (ret)
+ goto err;
+
+ ret = regmap_bulk_write(dev->regmap, 0x80ec20, "\x00", 1);
+ if (ret)
+ goto err;
+
+ ret = regmap_bulk_write(dev->regmap, 0x80ec3f, "\x01", 1);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&dev->client->dev, "failed %d\n", ret);
+ return ret;
+}
+
+static int it913x_set_params(struct dvb_frontend *fe)
+{
+ struct it913x_dev *dev = fe->tuner_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ unsigned int utmp;
+ u32 pre_lo_freq, t_cal_freq;
+ u16 iqik_m_cal, n_div;
+ u8 u8tmp, n, l_band, lna_band;
+
+ dev_dbg(&dev->client->dev, "role=%u, frequency %u, bandwidth_hz %u\n",
+ dev->role, c->frequency, c->bandwidth_hz);
+
+ if (!dev->active) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (c->frequency <= 74000000) {
+ n_div = 48;
+ n = 0;
+ } else if (c->frequency <= 111000000) {
+ n_div = 32;
+ n = 1;
+ } else if (c->frequency <= 148000000) {
+ n_div = 24;
+ n = 2;
+ } else if (c->frequency <= 222000000) {
+ n_div = 16;
+ n = 3;
+ } else if (c->frequency <= 296000000) {
+ n_div = 12;
+ n = 4;
+ } else if (c->frequency <= 445000000) {
+ n_div = 8;
+ n = 5;
+ } else if (c->frequency <= dev->fn_min) {
+ n_div = 6;
+ n = 6;
+ } else if (c->frequency <= 950000000) {
+ n_div = 4;
+ n = 7;
+ } else {
+ n_div = 2;
+ n = 0;
+ }
+
+ ret = regmap_read(dev->regmap, 0x80ed81, &utmp);
+ if (ret)
+ goto err;
+
+ iqik_m_cal = utmp * n_div;
+
+ if (utmp < 0x20) {
+ if (dev->clk_mode == 0)
+ iqik_m_cal = (iqik_m_cal * 9) >> 5;
+ else
+ iqik_m_cal >>= 1;
+ } else {
+ iqik_m_cal = 0x40 - iqik_m_cal;
+ if (dev->clk_mode == 0)
+ iqik_m_cal = ~((iqik_m_cal * 9) >> 5);
+ else
+ iqik_m_cal = ~(iqik_m_cal >> 1);
+ }
+
+ t_cal_freq = (c->frequency / 1000) * n_div * dev->fdiv;
+ pre_lo_freq = t_cal_freq / dev->xtal;
+ utmp = pre_lo_freq * dev->xtal;
+
+ if ((t_cal_freq - utmp) >= (dev->xtal >> 1))
+ pre_lo_freq++;
+
+ pre_lo_freq += (u32) n << 13;
+ /* Frequency OMEGA_IQIK_M_CAL_MID*/
+ t_cal_freq = pre_lo_freq + (u32)iqik_m_cal;
+ dev_dbg(&dev->client->dev, "t_cal_freq %u, pre_lo_freq %u\n",
+ t_cal_freq, pre_lo_freq);
+
+ if (c->frequency <= 440000000) {
+ l_band = 0;
+ lna_band = 0;
+ } else if (c->frequency <= 484000000) {
+ l_band = 1;
+ lna_band = 1;
+ } else if (c->frequency <= 533000000) {
+ l_band = 1;
+ lna_band = 2;
+ } else if (c->frequency <= 587000000) {
+ l_band = 1;
+ lna_band = 3;
+ } else if (c->frequency <= 645000000) {
+ l_band = 1;
+ lna_band = 4;
+ } else if (c->frequency <= 710000000) {
+ l_band = 1;
+ lna_band = 5;
+ } else if (c->frequency <= 782000000) {
+ l_band = 1;
+ lna_band = 6;
+ } else if (c->frequency <= 860000000) {
+ l_band = 1;
+ lna_band = 7;
+ } else if (c->frequency <= 1492000000) {
+ l_band = 1;
+ lna_band = 0;
+ } else if (c->frequency <= 1685000000) {
+ l_band = 1;
+ lna_band = 1;
+ } else {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* XXX: latest windows driver does not set that at all */
+ ret = regmap_write(dev->regmap, 0x80ee06, lna_band);
+ if (ret)
+ goto err;
+
+ if (c->bandwidth_hz <= 5000000)
+ u8tmp = 0;
+ else if (c->bandwidth_hz <= 6000000)
+ u8tmp = 2;
+ else if (c->bandwidth_hz <= 7000000)
+ u8tmp = 4;
+ else
+ u8tmp = 6; /* 8000000 */
+
+ ret = regmap_write(dev->regmap, 0x80ec56, u8tmp);
+ if (ret)
+ goto err;
+
+ /* XXX: latest windows driver sets different value (a8 != 68) */
+ ret = regmap_write(dev->regmap, 0x80ec4c, 0xa0 | (l_band << 3));
+ if (ret)
+ goto err;
+
+ ret = regmap_write(dev->regmap, 0x80ec4d, (t_cal_freq >> 0) & 0xff);
+ if (ret)
+ goto err;
+
+ ret = regmap_write(dev->regmap, 0x80ec4e, (t_cal_freq >> 8) & 0xff);
+ if (ret)
+ goto err;
+
+ ret = regmap_write(dev->regmap, 0x80011e, (pre_lo_freq >> 0) & 0xff);
+ if (ret)
+ goto err;
+
+ ret = regmap_write(dev->regmap, 0x80011f, (pre_lo_freq >> 8) & 0xff);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&dev->client->dev, "failed %d\n", ret);
+ return ret;
+}
+
+static const struct dvb_tuner_ops it913x_tuner_ops = {
+ .info = {
+ .name = "ITE IT913X",
+ .frequency_min = 174000000,
+ .frequency_max = 862000000,
+ },
+
+ .init = it913x_init,
+ .sleep = it913x_sleep,
+ .set_params = it913x_set_params,
+};
+
+static int it913x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct it913x_config *cfg = client->dev.platform_data;
+ struct dvb_frontend *fe = cfg->fe;
+ struct it913x_dev *dev;
+ int ret;
+ char *chip_ver_str;
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 24,
+ .val_bits = 8,
+ };
+
+ dev = kzalloc(sizeof(struct it913x_dev), GFP_KERNEL);
+ if (dev == NULL) {
+ ret = -ENOMEM;
+ dev_err(&client->dev, "kzalloc() failed\n");
+ goto err;
+ }
+
+ dev->client = client;
+ dev->fe = cfg->fe;
+ dev->chip_ver = cfg->chip_ver;
+ dev->role = cfg->role;
+ dev->regmap = regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(dev->regmap)) {
+ ret = PTR_ERR(dev->regmap);
+ goto err_kfree;
+ }
+
+ fe->tuner_priv = dev;
+ memcpy(&fe->ops.tuner_ops, &it913x_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ i2c_set_clientdata(client, dev);
+
+ if (dev->chip_ver == 1)
+ chip_ver_str = "AX";
+ else if (dev->chip_ver == 2)
+ chip_ver_str = "BX";
+ else
+ chip_ver_str = "??";
+
+ dev_info(&dev->client->dev, "ITE IT913X %s successfully attached\n",
+ chip_ver_str);
+ dev_dbg(&dev->client->dev, "chip_ver %u, role %u\n",
+ dev->chip_ver, dev->role);
+ return 0;
+
+err_kfree:
+ kfree(dev);
+err:
+ dev_dbg(&client->dev, "failed %d\n", ret);
+ return ret;
+}
+
+static int it913x_remove(struct i2c_client *client)
+{
+ struct it913x_dev *dev = i2c_get_clientdata(client);
+ struct dvb_frontend *fe = dev->fe;
+
+ dev_dbg(&client->dev, "\n");
+
+ memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
+ fe->tuner_priv = NULL;
+ regmap_exit(dev->regmap);
+ kfree(dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id it913x_id_table[] = {
+ {"it913x", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, it913x_id_table);
+
+static struct i2c_driver it913x_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "it913x",
+ },
+ .probe = it913x_probe,
+ .remove = it913x_remove,
+ .id_table = it913x_id_table,
+};
+
+module_i2c_driver(it913x_driver);
+
+MODULE_DESCRIPTION("ITE IT913X silicon tuner driver");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/tuner_it913x.h b/drivers/media/tuners/it913x.h
index 12dd36bd9e79..33de53d4a566 100644
--- a/drivers/media/tuners/tuner_it913x.h
+++ b/drivers/media/tuners/it913x.h
@@ -25,21 +25,30 @@
#include "dvb_frontend.h"
-#if defined(CONFIG_MEDIA_TUNER_IT913X) || \
- (defined(CONFIG_MEDIA_TUNER_IT913X_MODULE) && defined(MODULE))
-extern struct dvb_frontend *it913x_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c_adap,
- u8 i2c_addr,
- u8 config);
-#else
-static inline struct dvb_frontend *it913x_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c_adap,
- u8 i2c_addr,
- u8 config)
-{
- pr_warn("%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
-#endif
+/*
+ * I2C address
+ * 0x38, 0x3a, 0x3c, 0x3e
+ */
+struct it913x_config {
+ /*
+ * pointer to DVB frontend
+ */
+ struct dvb_frontend *fe;
+
+ /*
+ * chip version
+ * 1 = IT9135 AX
+ * 2 = IT9135 BX
+ */
+ unsigned int chip_ver:2;
+
+ /*
+ * tuner role
+ */
+#define IT913X_ROLE_SINGLE 0
+#define IT913X_ROLE_DUAL_MASTER 1
+#define IT913X_ROLE_DUAL_SLAVE 2
+ unsigned int role:2;
+};
#endif
diff --git a/drivers/media/tuners/m88ts2022.c b/drivers/media/tuners/m88ts2022.c
index 40c42dec721b..caa542346891 100644
--- a/drivers/media/tuners/m88ts2022.c
+++ b/drivers/media/tuners/m88ts2022.c
@@ -18,120 +18,11 @@
#include "m88ts2022_priv.h"
-/* write multiple registers */
-static int m88ts2022_wr_regs(struct m88ts2022_priv *priv,
- u8 reg, const u8 *val, int len)
+static int m88ts2022_cmd(struct m88ts2022_dev *dev, int op, int sleep, u8 reg,
+ u8 mask, u8 val, u8 *reg_val)
{
-#define MAX_WR_LEN 3
-#define MAX_WR_XFER_LEN (MAX_WR_LEN + 1)
- int ret;
- u8 buf[MAX_WR_XFER_LEN];
- struct i2c_msg msg[1] = {
- {
- .addr = priv->client->addr,
- .flags = 0,
- .len = 1 + len,
- .buf = buf,
- }
- };
-
- if (WARN_ON(len > MAX_WR_LEN))
- return -EINVAL;
-
- buf[0] = reg;
- memcpy(&buf[1], val, len);
-
- ret = i2c_transfer(priv->client->adapter, msg, 1);
- if (ret == 1) {
- ret = 0;
- } else {
- dev_warn(&priv->client->dev,
- "%s: i2c wr failed=%d reg=%02x len=%d\n",
- KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
-
- return ret;
-}
-
-/* read multiple registers */
-static int m88ts2022_rd_regs(struct m88ts2022_priv *priv, u8 reg,
- u8 *val, int len)
-{
-#define MAX_RD_LEN 1
-#define MAX_RD_XFER_LEN (MAX_RD_LEN)
- int ret;
- u8 buf[MAX_RD_XFER_LEN];
- struct i2c_msg msg[2] = {
- {
- .addr = priv->client->addr,
- .flags = 0,
- .len = 1,
- .buf = &reg,
- }, {
- .addr = priv->client->addr,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
-
- if (WARN_ON(len > MAX_RD_LEN))
- return -EINVAL;
-
- ret = i2c_transfer(priv->client->adapter, msg, 2);
- if (ret == 2) {
- memcpy(val, buf, len);
- ret = 0;
- } else {
- dev_warn(&priv->client->dev,
- "%s: i2c rd failed=%d reg=%02x len=%d\n",
- KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
-
- return ret;
-}
-
-/* write single register */
-static int m88ts2022_wr_reg(struct m88ts2022_priv *priv, u8 reg, u8 val)
-{
- return m88ts2022_wr_regs(priv, reg, &val, 1);
-}
-
-/* read single register */
-static int m88ts2022_rd_reg(struct m88ts2022_priv *priv, u8 reg, u8 *val)
-{
- return m88ts2022_rd_regs(priv, reg, val, 1);
-}
-
-/* write single register with mask */
-static int m88ts2022_wr_reg_mask(struct m88ts2022_priv *priv,
- u8 reg, u8 val, u8 mask)
-{
- int ret;
- u8 u8tmp;
-
- /* no need for read if whole reg is written */
- if (mask != 0xff) {
- ret = m88ts2022_rd_regs(priv, reg, &u8tmp, 1);
- if (ret)
- return ret;
-
- val &= mask;
- u8tmp &= ~mask;
- val |= u8tmp;
- }
-
- return m88ts2022_wr_regs(priv, reg, &val, 1);
-}
-
-static int m88ts2022_cmd(struct dvb_frontend *fe,
- int op, int sleep, u8 reg, u8 mask, u8 val, u8 *reg_val)
-{
- struct m88ts2022_priv *priv = fe->tuner_priv;
int ret, i;
- u8 u8tmp;
+ unsigned int utmp;
struct m88ts2022_reg_val reg_vals[] = {
{0x51, 0x1f - op},
{0x51, 0x1f},
@@ -140,12 +31,12 @@ static int m88ts2022_cmd(struct dvb_frontend *fe,
};
for (i = 0; i < 2; i++) {
- dev_dbg(&priv->client->dev,
- "%s: i=%d op=%02x reg=%02x mask=%02x val=%02x\n",
- __func__, i, op, reg, mask, val);
+ dev_dbg(&dev->client->dev,
+ "i=%d op=%02x reg=%02x mask=%02x val=%02x\n",
+ i, op, reg, mask, val);
for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
- ret = m88ts2022_wr_reg(priv, reg_vals[i].reg,
+ ret = regmap_write(dev->regmap, reg_vals[i].reg,
reg_vals[i].val);
if (ret)
goto err;
@@ -153,37 +44,38 @@ static int m88ts2022_cmd(struct dvb_frontend *fe,
usleep_range(sleep * 1000, sleep * 10000);
- ret = m88ts2022_rd_reg(priv, reg, &u8tmp);
+ ret = regmap_read(dev->regmap, reg, &utmp);
if (ret)
goto err;
- if ((u8tmp & mask) != val)
+ if ((utmp & mask) != val)
break;
}
if (reg_val)
- *reg_val = u8tmp;
+ *reg_val = utmp;
err:
return ret;
}
static int m88ts2022_set_params(struct dvb_frontend *fe)
{
- struct m88ts2022_priv *priv = fe->tuner_priv;
+ struct m88ts2022_dev *dev = fe->tuner_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- unsigned int frequency_khz, frequency_offset_khz, f_3db_hz;
+ unsigned int utmp, frequency_khz, frequency_offset_khz, f_3db_hz;
unsigned int f_ref_khz, f_vco_khz, div_ref, div_out, pll_n, gdiv28;
u8 buf[3], u8tmp, cap_code, lpf_gm, lpf_mxdiv, div_max, div_min;
u16 u16tmp;
- dev_dbg(&priv->client->dev,
- "%s: frequency=%d symbol_rate=%d rolloff=%d\n",
- __func__, c->frequency, c->symbol_rate, c->rolloff);
+
+ dev_dbg(&dev->client->dev,
+ "frequency=%d symbol_rate=%d rolloff=%d\n",
+ c->frequency, c->symbol_rate, c->rolloff);
/*
* Integer-N PLL synthesizer
* kHz is used for all calculations to keep calculations within 32-bit
*/
- f_ref_khz = DIV_ROUND_CLOSEST(priv->cfg.clock, 1000);
+ f_ref_khz = DIV_ROUND_CLOSEST(dev->cfg.clock, 1000);
div_ref = DIV_ROUND_CLOSEST(f_ref_khz, 2000);
if (c->symbol_rate < 5000000)
@@ -203,14 +95,14 @@ static int m88ts2022_set_params(struct dvb_frontend *fe)
buf[0] = u8tmp;
buf[1] = 0x40;
- ret = m88ts2022_wr_regs(priv, 0x10, buf, 2);
+ ret = regmap_bulk_write(dev->regmap, 0x10, buf, 2);
if (ret)
goto err;
f_vco_khz = frequency_khz * div_out;
pll_n = f_vco_khz * div_ref / f_ref_khz;
pll_n += pll_n % 2;
- priv->frequency_khz = pll_n * f_ref_khz / div_ref / div_out;
+ dev->frequency_khz = pll_n * f_ref_khz / div_ref / div_out;
if (pll_n < 4095)
u16tmp = pll_n - 1024;
@@ -222,88 +114,87 @@ static int m88ts2022_set_params(struct dvb_frontend *fe)
buf[0] = (u16tmp >> 8) & 0x3f;
buf[1] = (u16tmp >> 0) & 0xff;
buf[2] = div_ref - 8;
- ret = m88ts2022_wr_regs(priv, 0x01, buf, 3);
+ ret = regmap_bulk_write(dev->regmap, 0x01, buf, 3);
if (ret)
goto err;
- dev_dbg(&priv->client->dev,
- "%s: frequency=%u offset=%d f_vco_khz=%u pll_n=%u div_ref=%u div_out=%u\n",
- __func__, priv->frequency_khz,
- priv->frequency_khz - c->frequency, f_vco_khz, pll_n,
- div_ref, div_out);
+ dev_dbg(&dev->client->dev,
+ "frequency=%u offset=%d f_vco_khz=%u pll_n=%u div_ref=%u div_out=%u\n",
+ dev->frequency_khz, dev->frequency_khz - c->frequency,
+ f_vco_khz, pll_n, div_ref, div_out);
- ret = m88ts2022_cmd(fe, 0x10, 5, 0x15, 0x40, 0x00, NULL);
+ ret = m88ts2022_cmd(dev, 0x10, 5, 0x15, 0x40, 0x00, NULL);
if (ret)
goto err;
- ret = m88ts2022_rd_reg(priv, 0x14, &u8tmp);
+ ret = regmap_read(dev->regmap, 0x14, &utmp);
if (ret)
goto err;
- u8tmp &= 0x7f;
- if (u8tmp < 64) {
- ret = m88ts2022_wr_reg_mask(priv, 0x10, 0x80, 0x80);
+ utmp &= 0x7f;
+ if (utmp < 64) {
+ ret = regmap_update_bits(dev->regmap, 0x10, 0x80, 0x80);
if (ret)
goto err;
- ret = m88ts2022_wr_reg(priv, 0x11, 0x6f);
+ ret = regmap_write(dev->regmap, 0x11, 0x6f);
if (ret)
goto err;
- ret = m88ts2022_cmd(fe, 0x10, 5, 0x15, 0x40, 0x00, NULL);
+ ret = m88ts2022_cmd(dev, 0x10, 5, 0x15, 0x40, 0x00, NULL);
if (ret)
goto err;
}
- ret = m88ts2022_rd_reg(priv, 0x14, &u8tmp);
+ ret = regmap_read(dev->regmap, 0x14, &utmp);
if (ret)
goto err;
- u8tmp &= 0x1f;
- if (u8tmp > 19) {
- ret = m88ts2022_wr_reg_mask(priv, 0x10, 0x00, 0x02);
+ utmp &= 0x1f;
+ if (utmp > 19) {
+ ret = regmap_update_bits(dev->regmap, 0x10, 0x02, 0x00);
if (ret)
goto err;
}
- ret = m88ts2022_cmd(fe, 0x08, 5, 0x3c, 0xff, 0x00, NULL);
+ ret = m88ts2022_cmd(dev, 0x08, 5, 0x3c, 0xff, 0x00, NULL);
if (ret)
goto err;
- ret = m88ts2022_wr_reg(priv, 0x25, 0x00);
+ ret = regmap_write(dev->regmap, 0x25, 0x00);
if (ret)
goto err;
- ret = m88ts2022_wr_reg(priv, 0x27, 0x70);
+ ret = regmap_write(dev->regmap, 0x27, 0x70);
if (ret)
goto err;
- ret = m88ts2022_wr_reg(priv, 0x41, 0x09);
+ ret = regmap_write(dev->regmap, 0x41, 0x09);
if (ret)
goto err;
- ret = m88ts2022_wr_reg(priv, 0x08, 0x0b);
+ ret = regmap_write(dev->regmap, 0x08, 0x0b);
if (ret)
goto err;
/* filters */
gdiv28 = DIV_ROUND_CLOSEST(f_ref_khz * 1694U, 1000000U);
- ret = m88ts2022_wr_reg(priv, 0x04, gdiv28);
+ ret = regmap_write(dev->regmap, 0x04, gdiv28);
if (ret)
goto err;
- ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+ ret = m88ts2022_cmd(dev, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
if (ret)
goto err;
cap_code = u8tmp & 0x3f;
- ret = m88ts2022_wr_reg(priv, 0x41, 0x0d);
+ ret = regmap_write(dev->regmap, 0x41, 0x0d);
if (ret)
goto err;
- ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+ ret = m88ts2022_cmd(dev, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
if (ret)
goto err;
@@ -314,7 +205,7 @@ static int m88ts2022_set_params(struct dvb_frontend *fe)
div_min = gdiv28 * 78 / 100;
div_max = clamp_val(div_max, 0U, 63U);
- f_3db_hz = c->symbol_rate * 135UL / 200UL;
+ f_3db_hz = mult_frac(c->symbol_rate, 135, 200);
f_3db_hz += 2000000U + (frequency_offset_khz * 1000U);
f_3db_hz = clamp(f_3db_hz, 7000000U, 40000000U);
@@ -327,25 +218,25 @@ static int m88ts2022_set_params(struct dvb_frontend *fe)
lpf_mxdiv = DIV_ROUND_CLOSEST(++lpf_gm * LPF_COEFF * f_ref_khz, f_3db_hz);
lpf_mxdiv = clamp_val(lpf_mxdiv, 0U, div_max);
- ret = m88ts2022_wr_reg(priv, 0x04, lpf_mxdiv);
+ ret = regmap_write(dev->regmap, 0x04, lpf_mxdiv);
if (ret)
goto err;
- ret = m88ts2022_wr_reg(priv, 0x06, lpf_gm);
+ ret = regmap_write(dev->regmap, 0x06, lpf_gm);
if (ret)
goto err;
- ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+ ret = m88ts2022_cmd(dev, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
if (ret)
goto err;
cap_code = u8tmp & 0x3f;
- ret = m88ts2022_wr_reg(priv, 0x41, 0x09);
+ ret = regmap_write(dev->regmap, 0x41, 0x09);
if (ret)
goto err;
- ret = m88ts2022_cmd(fe, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
+ ret = m88ts2022_cmd(dev, 0x04, 2, 0x26, 0xff, 0x00, &u8tmp);
if (ret)
goto err;
@@ -353,31 +244,31 @@ static int m88ts2022_set_params(struct dvb_frontend *fe)
cap_code = (cap_code + u8tmp) / 2;
u8tmp = cap_code | 0x80;
- ret = m88ts2022_wr_reg(priv, 0x25, u8tmp);
+ ret = regmap_write(dev->regmap, 0x25, u8tmp);
if (ret)
goto err;
- ret = m88ts2022_wr_reg(priv, 0x27, 0x30);
+ ret = regmap_write(dev->regmap, 0x27, 0x30);
if (ret)
goto err;
- ret = m88ts2022_wr_reg(priv, 0x08, 0x09);
+ ret = regmap_write(dev->regmap, 0x08, 0x09);
if (ret)
goto err;
- ret = m88ts2022_cmd(fe, 0x01, 20, 0x21, 0xff, 0x00, NULL);
+ ret = m88ts2022_cmd(dev, 0x01, 20, 0x21, 0xff, 0x00, NULL);
if (ret)
goto err;
err:
if (ret)
- dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
return ret;
}
static int m88ts2022_init(struct dvb_frontend *fe)
{
- struct m88ts2022_priv *priv = fe->tuner_priv;
+ struct m88ts2022_dev *dev = fe->tuner_priv;
int ret, i;
u8 u8tmp;
static const struct m88ts2022_reg_val reg_vals[] = {
@@ -393,23 +284,24 @@ static int m88ts2022_init(struct dvb_frontend *fe)
{0x24, 0x02},
{0x12, 0xa0},
};
- dev_dbg(&priv->client->dev, "%s:\n", __func__);
- ret = m88ts2022_wr_reg(priv, 0x00, 0x01);
+ dev_dbg(&dev->client->dev, "\n");
+
+ ret = regmap_write(dev->regmap, 0x00, 0x01);
if (ret)
goto err;
- ret = m88ts2022_wr_reg(priv, 0x00, 0x03);
+ ret = regmap_write(dev->regmap, 0x00, 0x03);
if (ret)
goto err;
- switch (priv->cfg.clock_out) {
+ switch (dev->cfg.clock_out) {
case M88TS2022_CLOCK_OUT_DISABLED:
u8tmp = 0x60;
break;
case M88TS2022_CLOCK_OUT_ENABLED:
u8tmp = 0x70;
- ret = m88ts2022_wr_reg(priv, 0x05, priv->cfg.clock_out_div);
+ ret = regmap_write(dev->regmap, 0x05, dev->cfg.clock_out_div);
if (ret)
goto err;
break;
@@ -420,58 +312,61 @@ static int m88ts2022_init(struct dvb_frontend *fe)
goto err;
}
- ret = m88ts2022_wr_reg(priv, 0x42, u8tmp);
+ ret = regmap_write(dev->regmap, 0x42, u8tmp);
if (ret)
goto err;
- if (priv->cfg.loop_through)
+ if (dev->cfg.loop_through)
u8tmp = 0xec;
else
u8tmp = 0x6c;
- ret = m88ts2022_wr_reg(priv, 0x62, u8tmp);
+ ret = regmap_write(dev->regmap, 0x62, u8tmp);
if (ret)
goto err;
for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
- ret = m88ts2022_wr_reg(priv, reg_vals[i].reg, reg_vals[i].val);
+ ret = regmap_write(dev->regmap, reg_vals[i].reg, reg_vals[i].val);
if (ret)
goto err;
}
err:
if (ret)
- dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
return ret;
}
static int m88ts2022_sleep(struct dvb_frontend *fe)
{
- struct m88ts2022_priv *priv = fe->tuner_priv;
+ struct m88ts2022_dev *dev = fe->tuner_priv;
int ret;
- dev_dbg(&priv->client->dev, "%s:\n", __func__);
- ret = m88ts2022_wr_reg(priv, 0x00, 0x00);
+ dev_dbg(&dev->client->dev, "\n");
+
+ ret = regmap_write(dev->regmap, 0x00, 0x00);
if (ret)
goto err;
err:
if (ret)
- dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
return ret;
}
static int m88ts2022_get_frequency(struct dvb_frontend *fe, u32 *frequency)
{
- struct m88ts2022_priv *priv = fe->tuner_priv;
- dev_dbg(&priv->client->dev, "%s:\n", __func__);
+ struct m88ts2022_dev *dev = fe->tuner_priv;
- *frequency = priv->frequency_khz;
+ dev_dbg(&dev->client->dev, "\n");
+
+ *frequency = dev->frequency_khz;
return 0;
}
static int m88ts2022_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
{
- struct m88ts2022_priv *priv = fe->tuner_priv;
- dev_dbg(&priv->client->dev, "%s:\n", __func__);
+ struct m88ts2022_dev *dev = fe->tuner_priv;
+
+ dev_dbg(&dev->client->dev, "\n");
*frequency = 0; /* Zero-IF */
return 0;
@@ -479,31 +374,30 @@ static int m88ts2022_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static int m88ts2022_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
{
- struct m88ts2022_priv *priv = fe->tuner_priv;
+ struct m88ts2022_dev *dev = fe->tuner_priv;
int ret;
- u8 u8tmp;
u16 gain, u16tmp;
- unsigned int gain1, gain2, gain3;
+ unsigned int utmp, gain1, gain2, gain3;
- ret = m88ts2022_rd_reg(priv, 0x3d, &u8tmp);
+ ret = regmap_read(dev->regmap, 0x3d, &utmp);
if (ret)
goto err;
- gain1 = (u8tmp >> 0) & 0x1f;
+ gain1 = (utmp >> 0) & 0x1f;
gain1 = clamp(gain1, 0U, 15U);
- ret = m88ts2022_rd_reg(priv, 0x21, &u8tmp);
+ ret = regmap_read(dev->regmap, 0x21, &utmp);
if (ret)
goto err;
- gain2 = (u8tmp >> 0) & 0x1f;
+ gain2 = (utmp >> 0) & 0x1f;
gain2 = clamp(gain2, 2U, 16U);
- ret = m88ts2022_rd_reg(priv, 0x66, &u8tmp);
+ ret = regmap_read(dev->regmap, 0x66, &utmp);
if (ret)
goto err;
- gain3 = (u8tmp >> 3) & 0x07;
+ gain3 = (utmp >> 3) & 0x07;
gain3 = clamp(gain3, 0U, 6U);
gain = gain1 * 265 + gain2 * 338 + gain3 * 285;
@@ -515,7 +409,7 @@ static int m88ts2022_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
*strength = (u16tmp - 59000) * 0xffff / (61500 - 59000);
err:
if (ret)
- dev_dbg(&priv->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -540,46 +434,56 @@ static int m88ts2022_probe(struct i2c_client *client,
{
struct m88ts2022_config *cfg = client->dev.platform_data;
struct dvb_frontend *fe = cfg->fe;
- struct m88ts2022_priv *priv;
+ struct m88ts2022_dev *dev;
int ret;
- u8 chip_id, u8tmp;
+ u8 u8tmp;
+ unsigned int utmp;
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
ret = -ENOMEM;
- dev_err(&client->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
+ dev_err(&client->dev, "kzalloc() failed\n");
goto err;
}
- memcpy(&priv->cfg, cfg, sizeof(struct m88ts2022_config));
- priv->client = client;
+ memcpy(&dev->cfg, cfg, sizeof(struct m88ts2022_config));
+ dev->client = client;
+ dev->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(dev->regmap)) {
+ ret = PTR_ERR(dev->regmap);
+ goto err;
+ }
/* check if the tuner is there */
- ret = m88ts2022_rd_reg(priv, 0x00, &u8tmp);
+ ret = regmap_read(dev->regmap, 0x00, &utmp);
if (ret)
goto err;
- if ((u8tmp & 0x03) == 0x00) {
- ret = m88ts2022_wr_reg(priv, 0x00, 0x01);
- if (ret < 0)
+ if ((utmp & 0x03) == 0x00) {
+ ret = regmap_write(dev->regmap, 0x00, 0x01);
+ if (ret)
goto err;
usleep_range(2000, 50000);
}
- ret = m88ts2022_wr_reg(priv, 0x00, 0x03);
+ ret = regmap_write(dev->regmap, 0x00, 0x03);
if (ret)
goto err;
usleep_range(2000, 50000);
- ret = m88ts2022_rd_reg(priv, 0x00, &chip_id);
+ ret = regmap_read(dev->regmap, 0x00, &utmp);
if (ret)
goto err;
- dev_dbg(&priv->client->dev, "%s: chip_id=%02x\n", __func__, chip_id);
+ dev_dbg(&dev->client->dev, "chip_id=%02x\n", utmp);
- switch (chip_id) {
+ switch (utmp) {
case 0xc3:
case 0x83:
break;
@@ -587,13 +491,13 @@ static int m88ts2022_probe(struct i2c_client *client,
goto err;
}
- switch (priv->cfg.clock_out) {
+ switch (dev->cfg.clock_out) {
case M88TS2022_CLOCK_OUT_DISABLED:
u8tmp = 0x60;
break;
case M88TS2022_CLOCK_OUT_ENABLED:
u8tmp = 0x70;
- ret = m88ts2022_wr_reg(priv, 0x05, priv->cfg.clock_out_div);
+ ret = regmap_write(dev->regmap, 0x05, dev->cfg.clock_out_div);
if (ret)
goto err;
break;
@@ -604,49 +508,48 @@ static int m88ts2022_probe(struct i2c_client *client,
goto err;
}
- ret = m88ts2022_wr_reg(priv, 0x42, u8tmp);
+ ret = regmap_write(dev->regmap, 0x42, u8tmp);
if (ret)
goto err;
- if (priv->cfg.loop_through)
+ if (dev->cfg.loop_through)
u8tmp = 0xec;
else
u8tmp = 0x6c;
- ret = m88ts2022_wr_reg(priv, 0x62, u8tmp);
+ ret = regmap_write(dev->regmap, 0x62, u8tmp);
if (ret)
goto err;
/* sleep */
- ret = m88ts2022_wr_reg(priv, 0x00, 0x00);
+ ret = regmap_write(dev->regmap, 0x00, 0x00);
if (ret)
goto err;
- dev_info(&priv->client->dev,
- "%s: Montage M88TS2022 successfully identified\n",
- KBUILD_MODNAME);
+ dev_info(&dev->client->dev, "Montage M88TS2022 successfully identified\n");
- fe->tuner_priv = priv;
+ fe->tuner_priv = dev;
memcpy(&fe->ops.tuner_ops, &m88ts2022_tuner_ops,
sizeof(struct dvb_tuner_ops));
- i2c_set_clientdata(client, priv);
+ i2c_set_clientdata(client, dev);
return 0;
err:
- dev_dbg(&client->dev, "%s: failed=%d\n", __func__, ret);
- kfree(priv);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ kfree(dev);
return ret;
}
static int m88ts2022_remove(struct i2c_client *client)
{
- struct m88ts2022_priv *priv = i2c_get_clientdata(client);
- struct dvb_frontend *fe = priv->cfg.fe;
- dev_dbg(&client->dev, "%s:\n", __func__);
+ struct m88ts2022_dev *dev = i2c_get_clientdata(client);
+ struct dvb_frontend *fe = dev->cfg.fe;
+
+ dev_dbg(&client->dev, "\n");
memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = NULL;
- kfree(priv);
+ kfree(dev);
return 0;
}
diff --git a/drivers/media/tuners/m88ts2022_priv.h b/drivers/media/tuners/m88ts2022_priv.h
index 0363dd866a2d..feeb5ad6beef 100644
--- a/drivers/media/tuners/m88ts2022_priv.h
+++ b/drivers/media/tuners/m88ts2022_priv.h
@@ -18,11 +18,12 @@
#define M88TS2022_PRIV_H
#include "m88ts2022.h"
+#include <linux/regmap.h>
-struct m88ts2022_priv {
+struct m88ts2022_dev {
struct m88ts2022_config cfg;
struct i2c_client *client;
- struct dvb_frontend *fe;
+ struct regmap *regmap;
u32 frequency_khz;
};
diff --git a/drivers/media/tuners/msi001.c b/drivers/media/tuners/msi001.c
index ee99e372c943..26019e731993 100644
--- a/drivers/media/tuners/msi001.c
+++ b/drivers/media/tuners/msi001.c
@@ -67,7 +67,8 @@ static int msi001_set_gain(struct msi001 *s, int lna_gain, int mixer_gain,
{
int ret;
u32 reg;
- dev_dbg(&s->spi->dev, "%s: lna=%d mixer=%d if=%d\n", __func__,
+
+ dev_dbg(&s->spi->dev, "lna=%d mixer=%d if=%d\n",
lna_gain, mixer_gain, if_gain);
reg = 1 << 0;
@@ -83,7 +84,7 @@ static int msi001_set_gain(struct msi001 *s, int lna_gain, int mixer_gain,
return 0;
err:
- dev_dbg(&s->spi->dev, "%s: failed %d\n", __func__, ret);
+ dev_dbg(&s->spi->dev, "failed %d\n", ret);
return ret;
};
@@ -94,6 +95,7 @@ static int msi001_set_tuner(struct msi001 *s)
u32 reg;
u64 f_vco, tmp64;
u8 mode, filter_mode, lo_div;
+
static const struct {
u32 rf;
u8 mode;
@@ -145,9 +147,7 @@ static int msi001_set_tuner(struct msi001 *s)
#define R_REF 4
#define F_OUT_STEP 1
- dev_dbg(&s->spi->dev,
- "%s: f_rf=%d f_if=%d\n",
- __func__, f_rf, f_if);
+ dev_dbg(&s->spi->dev, "f_rf=%d f_if=%d\n", f_rf, f_if);
for (i = 0; i < ARRAY_SIZE(band_lut); i++) {
if (f_rf <= band_lut[i].rf) {
@@ -198,8 +198,7 @@ static int msi001_set_tuner(struct msi001 *s)
s->bandwidth->val = bandwidth_lut[i].freq;
- dev_dbg(&s->spi->dev, "%s: bandwidth selected=%d\n",
- __func__, bandwidth_lut[i].freq);
+ dev_dbg(&s->spi->dev, "bandwidth selected=%d\n", bandwidth_lut[i].freq);
f_vco = (u64) (f_rf + f_if + f_if1) * lo_div;
tmp64 = f_vco;
@@ -225,9 +224,8 @@ static int msi001_set_tuner(struct msi001 *s)
tmp += 1ul * F_REF * R_REF * frac / thresh;
tmp /= lo_div;
- dev_dbg(&s->spi->dev,
- "%s: rf=%u:%u n=%d thresh=%d frac=%d\n",
- __func__, f_rf, tmp, n, thresh, frac);
+ dev_dbg(&s->spi->dev, "rf=%u:%u n=%d thresh=%d frac=%d\n",
+ f_rf, tmp, n, thresh, frac);
ret = msi001_wreg(s, 0x00000e);
if (ret)
@@ -276,7 +274,7 @@ static int msi001_set_tuner(struct msi001 *s)
return 0;
err:
- dev_dbg(&s->spi->dev, "%s: failed %d\n", __func__, ret);
+ dev_dbg(&s->spi->dev, "failed %d\n", ret);
return ret;
};
@@ -284,7 +282,8 @@ static int msi001_s_power(struct v4l2_subdev *sd, int on)
{
struct msi001 *s = sd_to_msi001(sd);
int ret;
- dev_dbg(&s->spi->dev, "%s: on=%d\n", __func__, on);
+
+ dev_dbg(&s->spi->dev, "on=%d\n", on);
if (on)
ret = 0;
@@ -301,7 +300,8 @@ static const struct v4l2_subdev_core_ops msi001_core_ops = {
static int msi001_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *v)
{
struct msi001 *s = sd_to_msi001(sd);
- dev_dbg(&s->spi->dev, "%s: index=%d\n", __func__, v->index);
+
+ dev_dbg(&s->spi->dev, "index=%d\n", v->index);
strlcpy(v->name, "Mirics MSi001", sizeof(v->name));
v->type = V4L2_TUNER_RF;
@@ -315,14 +315,16 @@ static int msi001_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *v)
static int msi001_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *v)
{
struct msi001 *s = sd_to_msi001(sd);
- dev_dbg(&s->spi->dev, "%s: index=%d\n", __func__, v->index);
+
+ dev_dbg(&s->spi->dev, "index=%d\n", v->index);
return 0;
}
static int msi001_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
{
struct msi001 *s = sd_to_msi001(sd);
- dev_dbg(&s->spi->dev, "%s: tuner=%d\n", __func__, f->tuner);
+
+ dev_dbg(&s->spi->dev, "tuner=%d\n", f->tuner);
f->frequency = s->f_tuner;
return 0;
}
@@ -332,8 +334,9 @@ static int msi001_s_frequency(struct v4l2_subdev *sd,
{
struct msi001 *s = sd_to_msi001(sd);
unsigned int band;
- dev_dbg(&s->spi->dev, "%s: tuner=%d type=%d frequency=%u\n",
- __func__, f->tuner, f->type, f->frequency);
+
+ dev_dbg(&s->spi->dev, "tuner=%d type=%d frequency=%u\n",
+ f->tuner, f->type, f->frequency);
if (f->frequency < ((bands[0].rangehigh + bands[1].rangelow) / 2))
band = 0;
@@ -349,8 +352,9 @@ static int msi001_enum_freq_bands(struct v4l2_subdev *sd,
struct v4l2_frequency_band *band)
{
struct msi001 *s = sd_to_msi001(sd);
- dev_dbg(&s->spi->dev, "%s: tuner=%d type=%d index=%d\n",
- __func__, band->tuner, band->type, band->index);
+
+ dev_dbg(&s->spi->dev, "tuner=%d type=%d index=%d\n",
+ band->tuner, band->type, band->index);
if (band->index >= ARRAY_SIZE(bands))
return -EINVAL;
@@ -380,9 +384,10 @@ static int msi001_s_ctrl(struct v4l2_ctrl *ctrl)
struct msi001 *s = container_of(ctrl->handler, struct msi001, hdl);
int ret;
+
dev_dbg(&s->spi->dev,
- "%s: id=%d name=%s val=%d min=%lld max=%lld step=%lld\n",
- __func__, ctrl->id, ctrl->name, ctrl->val,
+ "id=%d name=%s val=%d min=%lld max=%lld step=%lld\n",
+ ctrl->id, ctrl->name, ctrl->val,
ctrl->minimum, ctrl->maximum, ctrl->step);
switch (ctrl->id) {
@@ -403,8 +408,7 @@ static int msi001_s_ctrl(struct v4l2_ctrl *ctrl)
s->mixer_gain->cur.val, s->if_gain->val);
break;
default:
- dev_dbg(&s->spi->dev, "%s: unkown control %d\n",
- __func__, ctrl->id);
+ dev_dbg(&s->spi->dev, "unkown control %d\n", ctrl->id);
ret = -EINVAL;
}
@@ -419,7 +423,8 @@ static int msi001_probe(struct spi_device *spi)
{
struct msi001 *s;
int ret;
- dev_dbg(&spi->dev, "%s:\n", __func__);
+
+ dev_dbg(&spi->dev, "\n");
s = kzalloc(sizeof(struct msi001), GFP_KERNEL);
if (s == NULL) {
@@ -466,7 +471,8 @@ static int msi001_remove(struct spi_device *spi)
{
struct v4l2_subdev *sd = spi_get_drvdata(spi);
struct msi001 *s = sd_to_msi001(sd);
- dev_dbg(&spi->dev, "%s:\n", __func__);
+
+ dev_dbg(&spi->dev, "\n");
/*
* Registered by v4l2_spi_new_subdev() from master driver, but we must
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 13381de58a84..b87b2549d58d 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -157,7 +157,6 @@ static int mt2060_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct mt2060_priv *priv;
- int ret=0;
int i=0;
u32 freq;
u8 lnaband;
@@ -240,7 +239,7 @@ static int mt2060_set_params(struct dvb_frontend *fe)
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */
- return ret;
+ return 0;
}
static void mt2060_calibrate(struct mt2060_priv *priv)
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index f640dcf4a81d..9e9c5eb4cb66 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -1216,7 +1216,7 @@ static u32 MT2063_SetReceiverMode(struct mt2063_state *state,
if (status >= 0) {
val =
(state->
- reg[MT2063_REG_PD1_TGT] & (u8) ~0x40) | (RFAGCEN[Mode]
+ reg[MT2063_REG_PD1_TGT] & ~0x40) | (RFAGCEN[Mode]
? 0x40 :
0x00);
if (state->reg[MT2063_REG_PD1_TGT] != val)
@@ -1225,7 +1225,7 @@ static u32 MT2063_SetReceiverMode(struct mt2063_state *state,
/* LNARin */
if (status >= 0) {
- u8 val = (state->reg[MT2063_REG_CTRL_2C] & (u8) ~0x03) |
+ u8 val = (state->reg[MT2063_REG_CTRL_2C] & ~0x03) |
(LNARIN[Mode] & 0x03);
if (state->reg[MT2063_REG_CTRL_2C] != val)
status |= mt2063_setreg(state, MT2063_REG_CTRL_2C, val);
@@ -1235,19 +1235,19 @@ static u32 MT2063_SetReceiverMode(struct mt2063_state *state,
if (status >= 0) {
val =
(state->
- reg[MT2063_REG_FIFF_CTRL2] & (u8) ~0xF0) |
+ reg[MT2063_REG_FIFF_CTRL2] & ~0xF0) |
(FIFFQEN[Mode] << 7) | (FIFFQ[Mode] << 4);
if (state->reg[MT2063_REG_FIFF_CTRL2] != val) {
status |=
mt2063_setreg(state, MT2063_REG_FIFF_CTRL2, val);
/* trigger FIFF calibration, needed after changing FIFFQ */
val =
- (state->reg[MT2063_REG_FIFF_CTRL] | (u8) 0x01);
+ (state->reg[MT2063_REG_FIFF_CTRL] | 0x01);
status |=
mt2063_setreg(state, MT2063_REG_FIFF_CTRL, val);
val =
(state->
- reg[MT2063_REG_FIFF_CTRL] & (u8) ~0x01);
+ reg[MT2063_REG_FIFF_CTRL] & ~0x01);
status |=
mt2063_setreg(state, MT2063_REG_FIFF_CTRL, val);
}
@@ -1259,7 +1259,7 @@ static u32 MT2063_SetReceiverMode(struct mt2063_state *state,
/* acLNAmax */
if (status >= 0) {
- u8 val = (state->reg[MT2063_REG_LNA_OV] & (u8) ~0x1F) |
+ u8 val = (state->reg[MT2063_REG_LNA_OV] & ~0x1F) |
(ACLNAMAX[Mode] & 0x1F);
if (state->reg[MT2063_REG_LNA_OV] != val)
status |= mt2063_setreg(state, MT2063_REG_LNA_OV, val);
@@ -1267,7 +1267,7 @@ static u32 MT2063_SetReceiverMode(struct mt2063_state *state,
/* LNATGT */
if (status >= 0) {
- u8 val = (state->reg[MT2063_REG_LNA_TGT] & (u8) ~0x3F) |
+ u8 val = (state->reg[MT2063_REG_LNA_TGT] & ~0x3F) |
(LNATGT[Mode] & 0x3F);
if (state->reg[MT2063_REG_LNA_TGT] != val)
status |= mt2063_setreg(state, MT2063_REG_LNA_TGT, val);
@@ -1275,7 +1275,7 @@ static u32 MT2063_SetReceiverMode(struct mt2063_state *state,
/* ACRF */
if (status >= 0) {
- u8 val = (state->reg[MT2063_REG_RF_OV] & (u8) ~0x1F) |
+ u8 val = (state->reg[MT2063_REG_RF_OV] & ~0x1F) |
(ACRFMAX[Mode] & 0x1F);
if (state->reg[MT2063_REG_RF_OV] != val)
status |= mt2063_setreg(state, MT2063_REG_RF_OV, val);
@@ -1283,7 +1283,7 @@ static u32 MT2063_SetReceiverMode(struct mt2063_state *state,
/* PD1TGT */
if (status >= 0) {
- u8 val = (state->reg[MT2063_REG_PD1_TGT] & (u8) ~0x3F) |
+ u8 val = (state->reg[MT2063_REG_PD1_TGT] & ~0x3F) |
(PD1TGT[Mode] & 0x3F);
if (state->reg[MT2063_REG_PD1_TGT] != val)
status |= mt2063_setreg(state, MT2063_REG_PD1_TGT, val);
@@ -1294,7 +1294,7 @@ static u32 MT2063_SetReceiverMode(struct mt2063_state *state,
u8 val = ACFIFMAX[Mode];
if (state->reg[MT2063_REG_PART_REV] != MT2063_B3 && val > 5)
val = 5;
- val = (state->reg[MT2063_REG_FIF_OV] & (u8) ~0x1F) |
+ val = (state->reg[MT2063_REG_FIF_OV] & ~0x1F) |
(val & 0x1F);
if (state->reg[MT2063_REG_FIF_OV] != val)
status |= mt2063_setreg(state, MT2063_REG_FIF_OV, val);
@@ -1302,7 +1302,7 @@ static u32 MT2063_SetReceiverMode(struct mt2063_state *state,
/* PD2TGT */
if (status >= 0) {
- u8 val = (state->reg[MT2063_REG_PD2_TGT] & (u8) ~0x3F) |
+ u8 val = (state->reg[MT2063_REG_PD2_TGT] & ~0x3F) |
(PD2TGT[Mode] & 0x3F);
if (state->reg[MT2063_REG_PD2_TGT] != val)
status |= mt2063_setreg(state, MT2063_REG_PD2_TGT, val);
@@ -1310,7 +1310,7 @@ static u32 MT2063_SetReceiverMode(struct mt2063_state *state,
/* Ignore ATN Overload */
if (status >= 0) {
- val = (state->reg[MT2063_REG_LNA_TGT] & (u8) ~0x80) |
+ val = (state->reg[MT2063_REG_LNA_TGT] & ~0x80) |
(RFOVDIS[Mode] ? 0x80 : 0x00);
if (state->reg[MT2063_REG_LNA_TGT] != val)
status |= mt2063_setreg(state, MT2063_REG_LNA_TGT, val);
@@ -1318,7 +1318,7 @@ static u32 MT2063_SetReceiverMode(struct mt2063_state *state,
/* Ignore FIF Overload */
if (status >= 0) {
- val = (state->reg[MT2063_REG_PD1_TGT] & (u8) ~0x80) |
+ val = (state->reg[MT2063_REG_PD1_TGT] & ~0x80) |
(FIFOVDIS[Mode] ? 0x80 : 0x00);
if (state->reg[MT2063_REG_PD1_TGT] != val)
status |= mt2063_setreg(state, MT2063_REG_PD1_TGT, val);
diff --git a/drivers/media/tuners/mxl301rf.c b/drivers/media/tuners/mxl301rf.c
new file mode 100644
index 000000000000..1575a5db776a
--- /dev/null
+++ b/drivers/media/tuners/mxl301rf.c
@@ -0,0 +1,349 @@
+/*
+ * MaxLinear MxL301RF OFDM tuner driver
+ *
+ * Copyright (C) 2014 Akihiro Tsukada <tskd08@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * NOTICE:
+ * This driver is incomplete and lacks init/config of the chips,
+ * as the necessary info is not disclosed.
+ * Other features like get_if_frequency() are missing as well.
+ * It assumes that users of this driver (such as a PCI bridge of
+ * DTV receiver cards) properly init and configure the chip
+ * via I2C *before* calling this driver's init() function.
+ *
+ * Currently, PT3 driver is the only one that uses this driver,
+ * and contains init/config code in its firmware.
+ * Thus some part of the code might be dependent on PT3 specific config.
+ */
+
+#include <linux/kernel.h>
+#include "mxl301rf.h"
+
+struct mxl301rf_state {
+ struct mxl301rf_config cfg;
+ struct i2c_client *i2c;
+};
+
+static struct mxl301rf_state *cfg_to_state(struct mxl301rf_config *c)
+{
+ return container_of(c, struct mxl301rf_state, cfg);
+}
+
+static int raw_write(struct mxl301rf_state *state, const u8 *buf, int len)
+{
+ int ret;
+
+ ret = i2c_master_send(state->i2c, buf, len);
+ if (ret >= 0 && ret < len)
+ ret = -EIO;
+ return (ret == len) ? 0 : ret;
+}
+
+static int reg_write(struct mxl301rf_state *state, u8 reg, u8 val)
+{
+ u8 buf[2] = { reg, val };
+
+ return raw_write(state, buf, 2);
+}
+
+static int reg_read(struct mxl301rf_state *state, u8 reg, u8 *val)
+{
+ u8 wbuf[2] = { 0xfb, reg };
+ int ret;
+
+ ret = raw_write(state, wbuf, sizeof(wbuf));
+ if (ret == 0)
+ ret = i2c_master_recv(state->i2c, val, 1);
+ if (ret >= 0 && ret < 1)
+ ret = -EIO;
+ return (ret == 1) ? 0 : ret;
+}
+
+/* tuner_ops */
+
+/* get RSSI and update propery cache, set to *out in % */
+static int mxl301rf_get_rf_strength(struct dvb_frontend *fe, u16 *out)
+{
+ struct mxl301rf_state *state;
+ int ret;
+ u8 rf_in1, rf_in2, rf_off1, rf_off2;
+ u16 rf_in, rf_off;
+ s64 level;
+ struct dtv_fe_stats *rssi;
+
+ rssi = &fe->dtv_property_cache.strength;
+ rssi->len = 1;
+ rssi->stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ *out = 0;
+
+ state = fe->tuner_priv;
+ ret = reg_write(state, 0x14, 0x01);
+ if (ret < 0)
+ return ret;
+ usleep_range(1000, 2000);
+
+ ret = reg_read(state, 0x18, &rf_in1);
+ if (ret == 0)
+ ret = reg_read(state, 0x19, &rf_in2);
+ if (ret == 0)
+ ret = reg_read(state, 0xd6, &rf_off1);
+ if (ret == 0)
+ ret = reg_read(state, 0xd7, &rf_off2);
+ if (ret != 0)
+ return ret;
+
+ rf_in = (rf_in2 & 0x07) << 8 | rf_in1;
+ rf_off = (rf_off2 & 0x0f) << 5 | (rf_off1 >> 3);
+ level = rf_in - rf_off - (113 << 3); /* x8 dBm */
+ level = level * 1000 / 8;
+ rssi->stat[0].svalue = level;
+ rssi->stat[0].scale = FE_SCALE_DECIBEL;
+ /* *out = (level - min) * 100 / (max - min) */
+ *out = (rf_in - rf_off + (1 << 9) - 1) * 100 / ((5 << 9) - 2);
+ return 0;
+}
+
+/* spur shift parameters */
+struct shf {
+ u32 freq; /* Channel center frequency */
+ u32 ofst_th; /* Offset frequency threshold */
+ u8 shf_val; /* Spur shift value */
+ u8 shf_dir; /* Spur shift direction */
+};
+
+static const struct shf shf_tab[] = {
+ { 64500, 500, 0x92, 0x07 },
+ { 191500, 300, 0xe2, 0x07 },
+ { 205500, 500, 0x2c, 0x04 },
+ { 212500, 500, 0x1e, 0x04 },
+ { 226500, 500, 0xd4, 0x07 },
+ { 99143, 500, 0x9c, 0x07 },
+ { 173143, 500, 0xd4, 0x07 },
+ { 191143, 300, 0xd4, 0x07 },
+ { 207143, 500, 0xce, 0x07 },
+ { 225143, 500, 0xce, 0x07 },
+ { 243143, 500, 0xd4, 0x07 },
+ { 261143, 500, 0xd4, 0x07 },
+ { 291143, 500, 0xd4, 0x07 },
+ { 339143, 500, 0x2c, 0x04 },
+ { 117143, 500, 0x7a, 0x07 },
+ { 135143, 300, 0x7a, 0x07 },
+ { 153143, 500, 0x01, 0x07 }
+};
+
+struct reg_val {
+ u8 reg;
+ u8 val;
+} __attribute__ ((__packed__));
+
+static const struct reg_val set_idac[] = {
+ { 0x0d, 0x00 },
+ { 0x0c, 0x67 },
+ { 0x6f, 0x89 },
+ { 0x70, 0x0c },
+ { 0x6f, 0x8a },
+ { 0x70, 0x0e },
+ { 0x6f, 0x8b },
+ { 0x70, 0x1c },
+};
+
+static int mxl301rf_set_params(struct dvb_frontend *fe)
+{
+ struct reg_val tune0[] = {
+ { 0x13, 0x00 }, /* abort tuning */
+ { 0x3b, 0xc0 },
+ { 0x3b, 0x80 },
+ { 0x10, 0x95 }, /* BW */
+ { 0x1a, 0x05 },
+ { 0x61, 0x00 }, /* spur shift value (placeholder) */
+ { 0x62, 0xa0 } /* spur shift direction (placeholder) */
+ };
+
+ struct reg_val tune1[] = {
+ { 0x11, 0x40 }, /* RF frequency L (placeholder) */
+ { 0x12, 0x0e }, /* RF frequency H (placeholder) */
+ { 0x13, 0x01 } /* start tune */
+ };
+
+ struct mxl301rf_state *state;
+ u32 freq;
+ u16 f;
+ u32 tmp, div;
+ int i, ret;
+
+ state = fe->tuner_priv;
+ freq = fe->dtv_property_cache.frequency;
+
+ /* spur shift function (for analog) */
+ for (i = 0; i < ARRAY_SIZE(shf_tab); i++) {
+ if (freq >= (shf_tab[i].freq - shf_tab[i].ofst_th) * 1000 &&
+ freq <= (shf_tab[i].freq + shf_tab[i].ofst_th) * 1000) {
+ tune0[5].val = shf_tab[i].shf_val;
+ tune0[6].val = 0xa0 | shf_tab[i].shf_dir;
+ break;
+ }
+ }
+ ret = raw_write(state, (u8 *) tune0, sizeof(tune0));
+ if (ret < 0)
+ goto failed;
+ usleep_range(3000, 4000);
+
+ /* convert freq to 10.6 fixed point float [MHz] */
+ f = freq / 1000000;
+ tmp = freq % 1000000;
+ div = 1000000;
+ for (i = 0; i < 6; i++) {
+ f <<= 1;
+ div >>= 1;
+ if (tmp > div) {
+ tmp -= div;
+ f |= 1;
+ }
+ }
+ if (tmp > 7812)
+ f++;
+ tune1[0].val = f & 0xff;
+ tune1[1].val = f >> 8;
+ ret = raw_write(state, (u8 *) tune1, sizeof(tune1));
+ if (ret < 0)
+ goto failed;
+ msleep(31);
+
+ ret = reg_write(state, 0x1a, 0x0d);
+ if (ret < 0)
+ goto failed;
+ ret = raw_write(state, (u8 *) set_idac, sizeof(set_idac));
+ if (ret < 0)
+ goto failed;
+ return 0;
+
+failed:
+ dev_warn(&state->i2c->dev, "(%s) failed. [adap%d-fe%d]\n",
+ __func__, fe->dvb->num, fe->id);
+ return ret;
+}
+
+static const struct reg_val standby_data[] = {
+ { 0x01, 0x00 },
+ { 0x13, 0x00 }
+};
+
+static int mxl301rf_sleep(struct dvb_frontend *fe)
+{
+ struct mxl301rf_state *state;
+ int ret;
+
+ state = fe->tuner_priv;
+ ret = raw_write(state, (u8 *)standby_data, sizeof(standby_data));
+ if (ret < 0)
+ dev_warn(&state->i2c->dev, "(%s) failed. [adap%d-fe%d]\n",
+ __func__, fe->dvb->num, fe->id);
+ return ret;
+}
+
+
+/* init sequence is not public.
+ * the parent must have init'ed the device.
+ * just wake up here.
+ */
+static int mxl301rf_init(struct dvb_frontend *fe)
+{
+ struct mxl301rf_state *state;
+ int ret;
+
+ state = fe->tuner_priv;
+
+ ret = reg_write(state, 0x01, 0x01);
+ if (ret < 0) {
+ dev_warn(&state->i2c->dev, "(%s) failed. [adap%d-fe%d]\n",
+ __func__, fe->dvb->num, fe->id);
+ return ret;
+ }
+ return 0;
+}
+
+/* I2C driver functions */
+
+static const struct dvb_tuner_ops mxl301rf_ops = {
+ .info = {
+ .name = "MaxLinear MxL301RF",
+
+ .frequency_min = 93000000,
+ .frequency_max = 803142857,
+ },
+
+ .init = mxl301rf_init,
+ .sleep = mxl301rf_sleep,
+
+ .set_params = mxl301rf_set_params,
+ .get_rf_strength = mxl301rf_get_rf_strength,
+};
+
+
+static int mxl301rf_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mxl301rf_state *state;
+ struct mxl301rf_config *cfg;
+ struct dvb_frontend *fe;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->i2c = client;
+ cfg = client->dev.platform_data;
+
+ memcpy(&state->cfg, cfg, sizeof(state->cfg));
+ fe = cfg->fe;
+ fe->tuner_priv = state;
+ memcpy(&fe->ops.tuner_ops, &mxl301rf_ops, sizeof(mxl301rf_ops));
+
+ i2c_set_clientdata(client, &state->cfg);
+ dev_info(&client->dev, "MaxLinear MxL301RF attached.\n");
+ return 0;
+}
+
+static int mxl301rf_remove(struct i2c_client *client)
+{
+ struct mxl301rf_state *state;
+
+ state = cfg_to_state(i2c_get_clientdata(client));
+ state->cfg.fe->tuner_priv = NULL;
+ kfree(state);
+ return 0;
+}
+
+
+static const struct i2c_device_id mxl301rf_id[] = {
+ {"mxl301rf", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mxl301rf_id);
+
+static struct i2c_driver mxl301rf_driver = {
+ .driver = {
+ .name = "mxl301rf",
+ },
+ .probe = mxl301rf_probe,
+ .remove = mxl301rf_remove,
+ .id_table = mxl301rf_id,
+};
+
+module_i2c_driver(mxl301rf_driver);
+
+MODULE_DESCRIPTION("MaxLinear MXL301RF tuner");
+MODULE_AUTHOR("Akihiro TSUKADA");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/mxl301rf.h b/drivers/media/tuners/mxl301rf.h
new file mode 100644
index 000000000000..19e68405f00d
--- /dev/null
+++ b/drivers/media/tuners/mxl301rf.h
@@ -0,0 +1,26 @@
+/*
+ * MaxLinear MxL301RF OFDM tuner driver
+ *
+ * Copyright (C) 2014 Akihiro Tsukada <tskd08@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MXL301RF_H
+#define MXL301RF_H
+
+#include "dvb_frontend.h"
+
+struct mxl301rf_config {
+ struct dvb_frontend *fe;
+};
+
+#endif /* MXL301RF_H */
diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c
index b473b76cb278..92a3be4fde87 100644
--- a/drivers/media/tuners/mxl5005s.c
+++ b/drivers/media/tuners/mxl5005s.c
@@ -1692,7 +1692,6 @@ static u16 MXL5005_TunerConfig(struct dvb_frontend *fe,
)
{
struct mxl5005s_state *state = fe->tuner_priv;
- u16 status = 0;
state->Mode = Mode;
state->IF_Mode = IF_mode;
@@ -1715,7 +1714,7 @@ static u16 MXL5005_TunerConfig(struct dvb_frontend *fe,
/* Synthesizer LO frequency calculation */
MXL_SynthIFLO_Calc(fe);
- return status;
+ return 0;
}
static void MXL_SynthIFLO_Calc(struct dvb_frontend *fe)
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
new file mode 100644
index 000000000000..18bc745ed108
--- /dev/null
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -0,0 +1,448 @@
+/*
+ * Sharp QM1D1C0042 8PSK tuner driver
+ *
+ * Copyright (C) 2014 Akihiro Tsukada <tskd08@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * NOTICE:
+ * As the disclosed information on the chip is very limited,
+ * this driver lacks some features, including chip config like IF freq.
+ * It assumes that users of this driver (such as a PCI bridge of
+ * DTV receiver cards) know the relevant info and
+ * configure the chip via I2C if necessary.
+ *
+ * Currently, PT3 driver is the only one that uses this driver,
+ * and contains init/config code in its firmware.
+ * Thus some part of the code might be dependent on PT3 specific config.
+ */
+
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include "qm1d1c0042.h"
+
+#define QM1D1C0042_NUM_REGS 0x20
+
+static const u8 reg_initval[QM1D1C0042_NUM_REGS] = {
+ 0x48, 0x1c, 0xa0, 0x10, 0xbc, 0xc5, 0x20, 0x33,
+ 0x06, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x00, 0xff, 0xf3, 0x00, 0x2a, 0x64, 0xa6, 0x86,
+ 0x8c, 0xcf, 0xb8, 0xf1, 0xa8, 0xf2, 0x89, 0x00
+};
+
+static const struct qm1d1c0042_config default_cfg = {
+ .xtal_freq = 16000,
+ .lpf = 1,
+ .fast_srch = 0,
+ .lpf_wait = 20,
+ .fast_srch_wait = 4,
+ .normal_srch_wait = 15,
+};
+
+struct qm1d1c0042_state {
+ struct qm1d1c0042_config cfg;
+ struct i2c_client *i2c;
+ u8 regs[QM1D1C0042_NUM_REGS];
+};
+
+static struct qm1d1c0042_state *cfg_to_state(struct qm1d1c0042_config *c)
+{
+ return container_of(c, struct qm1d1c0042_state, cfg);
+}
+
+static int reg_write(struct qm1d1c0042_state *state, u8 reg, u8 val)
+{
+ u8 wbuf[2] = { reg, val };
+ int ret;
+
+ ret = i2c_master_send(state->i2c, wbuf, sizeof(wbuf));
+ if (ret >= 0 && ret < sizeof(wbuf))
+ ret = -EIO;
+ return (ret == sizeof(wbuf)) ? 0 : ret;
+}
+
+static int reg_read(struct qm1d1c0042_state *state, u8 reg, u8 *val)
+{
+ struct i2c_msg msgs[2] = {
+ {
+ .addr = state->i2c->addr,
+ .flags = 0,
+ .buf = &reg,
+ .len = 1,
+ },
+ {
+ .addr = state->i2c->addr,
+ .flags = I2C_M_RD,
+ .buf = val,
+ .len = 1,
+ },
+ };
+ int ret;
+
+ ret = i2c_transfer(state->i2c->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret >= 0 && ret < ARRAY_SIZE(msgs))
+ ret = -EIO;
+ return (ret == ARRAY_SIZE(msgs)) ? 0 : ret;
+}
+
+
+static int qm1d1c0042_set_srch_mode(struct qm1d1c0042_state *state, bool fast)
+{
+ if (fast)
+ state->regs[0x03] |= 0x01; /* set fast search mode */
+ else
+ state->regs[0x03] &= ~0x01 & 0xff;
+
+ return reg_write(state, 0x03, state->regs[0x03]);
+}
+
+static int qm1d1c0042_wakeup(struct qm1d1c0042_state *state)
+{
+ int ret;
+
+ state->regs[0x01] |= 1 << 3; /* BB_Reg_enable */
+ state->regs[0x01] &= (~(1 << 0)) & 0xff; /* NORMAL (wake-up) */
+ state->regs[0x05] &= (~(1 << 3)) & 0xff; /* pfd_rst NORMAL */
+ ret = reg_write(state, 0x01, state->regs[0x01]);
+ if (ret == 0)
+ ret = reg_write(state, 0x05, state->regs[0x05]);
+
+ if (ret < 0)
+ dev_warn(&state->i2c->dev, "(%s) failed. [adap%d-fe%d]\n",
+ __func__, state->cfg.fe->dvb->num, state->cfg.fe->id);
+ return ret;
+}
+
+/* tuner_ops */
+
+static int qm1d1c0042_set_config(struct dvb_frontend *fe, void *priv_cfg)
+{
+ struct qm1d1c0042_state *state;
+ struct qm1d1c0042_config *cfg;
+
+ state = fe->tuner_priv;
+ cfg = priv_cfg;
+
+ if (cfg->fe)
+ state->cfg.fe = cfg->fe;
+
+ if (cfg->xtal_freq != QM1D1C0042_CFG_XTAL_DFLT)
+ dev_warn(&state->i2c->dev,
+ "(%s) changing xtal_freq not supported. ", __func__);
+ state->cfg.xtal_freq = default_cfg.xtal_freq;
+
+ state->cfg.lpf = cfg->lpf;
+ state->cfg.fast_srch = cfg->fast_srch;
+
+ if (cfg->lpf_wait != QM1D1C0042_CFG_WAIT_DFLT)
+ state->cfg.lpf_wait = cfg->lpf_wait;
+ else
+ state->cfg.lpf_wait = default_cfg.lpf_wait;
+
+ if (cfg->fast_srch_wait != QM1D1C0042_CFG_WAIT_DFLT)
+ state->cfg.fast_srch_wait = cfg->fast_srch_wait;
+ else
+ state->cfg.fast_srch_wait = default_cfg.fast_srch_wait;
+
+ if (cfg->normal_srch_wait != QM1D1C0042_CFG_WAIT_DFLT)
+ state->cfg.normal_srch_wait = cfg->normal_srch_wait;
+ else
+ state->cfg.normal_srch_wait = default_cfg.normal_srch_wait;
+ return 0;
+}
+
+/* divisor, vco_band parameters */
+/* {maxfreq, param1(band?), param2(div?) */
+static const u32 conv_table[9][3] = {
+ { 2151000, 1, 7 },
+ { 1950000, 1, 6 },
+ { 1800000, 1, 5 },
+ { 1600000, 1, 4 },
+ { 1450000, 1, 3 },
+ { 1250000, 1, 2 },
+ { 1200000, 0, 7 },
+ { 975000, 0, 6 },
+ { 950000, 0, 0 }
+};
+
+static int qm1d1c0042_set_params(struct dvb_frontend *fe)
+{
+ struct qm1d1c0042_state *state;
+ u32 freq;
+ int i, ret;
+ u8 val, mask;
+ u32 a, sd;
+ s32 b;
+
+ state = fe->tuner_priv;
+ freq = fe->dtv_property_cache.frequency;
+
+ state->regs[0x08] &= 0xf0;
+ state->regs[0x08] |= 0x09;
+
+ state->regs[0x13] &= 0x9f;
+ state->regs[0x13] |= 0x20;
+
+ /* div2/vco_band */
+ val = state->regs[0x02] & 0x0f;
+ for (i = 0; i < 8; i++)
+ if (freq < conv_table[i][0] && freq >= conv_table[i + 1][0]) {
+ val |= conv_table[i][1] << 7;
+ val |= conv_table[i][2] << 4;
+ break;
+ }
+ ret = reg_write(state, 0x02, val);
+ if (ret < 0)
+ return ret;
+
+ a = (freq + state->cfg.xtal_freq / 2) / state->cfg.xtal_freq;
+
+ state->regs[0x06] &= 0x40;
+ state->regs[0x06] |= (a - 12) / 4;
+ ret = reg_write(state, 0x06, state->regs[0x06]);
+ if (ret < 0)
+ return ret;
+
+ state->regs[0x07] &= 0xf0;
+ state->regs[0x07] |= (a - 4 * ((a - 12) / 4 + 1) - 5) & 0x0f;
+ ret = reg_write(state, 0x07, state->regs[0x07]);
+ if (ret < 0)
+ return ret;
+
+ /* LPF */
+ val = state->regs[0x08];
+ if (state->cfg.lpf) {
+ /* LPF_CLK, LPF_FC */
+ val &= 0xf0;
+ val |= 0x02;
+ }
+ ret = reg_write(state, 0x08, val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * b = (freq / state->cfg.xtal_freq - a) << 20;
+ * sd = b (b >= 0)
+ * 1<<22 + b (b < 0)
+ */
+ b = (s32)div64_s64(((s64) freq) << 20, state->cfg.xtal_freq)
+ - (((s64) a) << 20);
+
+ if (b >= 0)
+ sd = b;
+ else
+ sd = (1 << 22) + b;
+
+ state->regs[0x09] &= 0xc0;
+ state->regs[0x09] |= (sd >> 16) & 0x3f;
+ state->regs[0x0a] = (sd >> 8) & 0xff;
+ state->regs[0x0b] = sd & 0xff;
+ ret = reg_write(state, 0x09, state->regs[0x09]);
+ if (ret == 0)
+ ret = reg_write(state, 0x0a, state->regs[0x0a]);
+ if (ret == 0)
+ ret = reg_write(state, 0x0b, state->regs[0x0b]);
+ if (ret != 0)
+ return ret;
+
+ if (!state->cfg.lpf) {
+ /* CSEL_Offset */
+ ret = reg_write(state, 0x13, state->regs[0x13]);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* VCO_TM, LPF_TM */
+ mask = state->cfg.lpf ? 0x3f : 0x7f;
+ val = state->regs[0x0c] & mask;
+ ret = reg_write(state, 0x0c, val);
+ if (ret < 0)
+ return ret;
+ usleep_range(2000, 3000);
+ val = state->regs[0x0c] | ~mask;
+ ret = reg_write(state, 0x0c, val);
+ if (ret < 0)
+ return ret;
+
+ if (state->cfg.lpf)
+ msleep(state->cfg.lpf_wait);
+ else if (state->regs[0x03] & 0x01)
+ msleep(state->cfg.fast_srch_wait);
+ else
+ msleep(state->cfg.normal_srch_wait);
+
+ if (state->cfg.lpf) {
+ /* LPF_FC */
+ ret = reg_write(state, 0x08, 0x09);
+ if (ret < 0)
+ return ret;
+
+ /* CSEL_Offset */
+ ret = reg_write(state, 0x13, state->regs[0x13]);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int qm1d1c0042_sleep(struct dvb_frontend *fe)
+{
+ struct qm1d1c0042_state *state;
+ int ret;
+
+ state = fe->tuner_priv;
+ state->regs[0x01] &= (~(1 << 3)) & 0xff; /* BB_Reg_disable */
+ state->regs[0x01] |= 1 << 0; /* STDBY */
+ state->regs[0x05] |= 1 << 3; /* pfd_rst STANDBY */
+ ret = reg_write(state, 0x05, state->regs[0x05]);
+ if (ret == 0)
+ ret = reg_write(state, 0x01, state->regs[0x01]);
+ if (ret < 0)
+ dev_warn(&state->i2c->dev, "(%s) failed. [adap%d-fe%d]\n",
+ __func__, fe->dvb->num, fe->id);
+ return ret;
+}
+
+static int qm1d1c0042_init(struct dvb_frontend *fe)
+{
+ struct qm1d1c0042_state *state;
+ u8 val;
+ int i, ret;
+
+ state = fe->tuner_priv;
+ memcpy(state->regs, reg_initval, sizeof(reg_initval));
+
+ reg_write(state, 0x01, 0x0c);
+ reg_write(state, 0x01, 0x0c);
+
+ ret = reg_write(state, 0x01, 0x0c); /* soft reset on */
+ if (ret < 0)
+ goto failed;
+ usleep_range(2000, 3000);
+
+ val = state->regs[0x01] | 0x10;
+ ret = reg_write(state, 0x01, val); /* soft reset off */
+ if (ret < 0)
+ goto failed;
+
+ /* check ID */
+ ret = reg_read(state, 0x00, &val);
+ if (ret < 0 || val != 0x48)
+ goto failed;
+ usleep_range(2000, 3000);
+
+ state->regs[0x0c] |= 0x40;
+ ret = reg_write(state, 0x0c, state->regs[0x0c]);
+ if (ret < 0)
+ goto failed;
+ msleep(state->cfg.lpf_wait);
+
+ /* set all writable registers */
+ for (i = 1; i <= 0x0c ; i++) {
+ ret = reg_write(state, i, state->regs[i]);
+ if (ret < 0)
+ goto failed;
+ }
+ for (i = 0x11; i < QM1D1C0042_NUM_REGS; i++) {
+ ret = reg_write(state, i, state->regs[i]);
+ if (ret < 0)
+ goto failed;
+ }
+
+ ret = qm1d1c0042_wakeup(state);
+ if (ret < 0)
+ goto failed;
+
+ ret = qm1d1c0042_set_srch_mode(state, state->cfg.fast_srch);
+ if (ret < 0)
+ goto failed;
+
+ return ret;
+
+failed:
+ dev_warn(&state->i2c->dev, "(%s) failed. [adap%d-fe%d]\n",
+ __func__, fe->dvb->num, fe->id);
+ return ret;
+}
+
+/* I2C driver functions */
+
+static const struct dvb_tuner_ops qm1d1c0042_ops = {
+ .info = {
+ .name = "Sharp QM1D1C0042",
+
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ },
+
+ .init = qm1d1c0042_init,
+ .sleep = qm1d1c0042_sleep,
+ .set_config = qm1d1c0042_set_config,
+ .set_params = qm1d1c0042_set_params,
+};
+
+
+static int qm1d1c0042_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct qm1d1c0042_state *state;
+ struct qm1d1c0042_config *cfg;
+ struct dvb_frontend *fe;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+ state->i2c = client;
+
+ cfg = client->dev.platform_data;
+ fe = cfg->fe;
+ fe->tuner_priv = state;
+ qm1d1c0042_set_config(fe, cfg);
+ memcpy(&fe->ops.tuner_ops, &qm1d1c0042_ops, sizeof(qm1d1c0042_ops));
+
+ i2c_set_clientdata(client, &state->cfg);
+ dev_info(&client->dev, "Sharp QM1D1C0042 attached.\n");
+ return 0;
+}
+
+static int qm1d1c0042_remove(struct i2c_client *client)
+{
+ struct qm1d1c0042_state *state;
+
+ state = cfg_to_state(i2c_get_clientdata(client));
+ state->cfg.fe->tuner_priv = NULL;
+ kfree(state);
+ return 0;
+}
+
+
+static const struct i2c_device_id qm1d1c0042_id[] = {
+ {"qm1d1c0042", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, qm1d1c0042_id);
+
+static struct i2c_driver qm1d1c0042_driver = {
+ .driver = {
+ .name = "qm1d1c0042",
+ },
+ .probe = qm1d1c0042_probe,
+ .remove = qm1d1c0042_remove,
+ .id_table = qm1d1c0042_id,
+};
+
+module_i2c_driver(qm1d1c0042_driver);
+
+MODULE_DESCRIPTION("Sharp QM1D1C0042 tuner");
+MODULE_AUTHOR("Akihiro TSUKADA");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/qm1d1c0042.h b/drivers/media/tuners/qm1d1c0042.h
new file mode 100644
index 000000000000..4f5c18816c44
--- /dev/null
+++ b/drivers/media/tuners/qm1d1c0042.h
@@ -0,0 +1,37 @@
+/*
+ * Sharp QM1D1C0042 8PSK tuner driver
+ *
+ * Copyright (C) 2014 Akihiro Tsukada <tskd08@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef QM1D1C0042_H
+#define QM1D1C0042_H
+
+#include "dvb_frontend.h"
+
+
+struct qm1d1c0042_config {
+ struct dvb_frontend *fe;
+
+ u32 xtal_freq; /* [kHz] */ /* currently ignored */
+ bool lpf; /* enable LPF */
+ bool fast_srch; /* enable fast search mode, no LPF */
+ u32 lpf_wait; /* wait in tuning with LPF enabled. [ms] */
+ u32 fast_srch_wait; /* with fast-search mode, no LPF. [ms] */
+ u32 normal_srch_wait; /* with no LPF/fast-search mode. [ms] */
+};
+/* special values indicating to use the default in qm1d1c0042_config */
+#define QM1D1C0042_CFG_XTAL_DFLT 0
+#define QM1D1C0042_CFG_WAIT_DFLT 0
+
+#endif /* QM1D1C0042_H */
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 6c53edb73a63..cf97142e01e6 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -1,5 +1,5 @@
/*
- * Silicon Labs Si2157/2158 silicon tuner driver
+ * Silicon Labs Si2147/2157/2158 silicon tuner driver
*
* Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
*
@@ -55,8 +55,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
break;
}
- dev_dbg(&s->client->dev, "%s: cmd execution took %d ms\n",
- __func__,
+ dev_dbg(&s->client->dev, "cmd execution took %d ms\n",
jiffies_to_msecs(jiffies) -
(jiffies_to_msecs(timeout) - TIMEOUT));
@@ -75,7 +74,7 @@ err_mutex_unlock:
return 0;
err:
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -88,9 +87,12 @@ static int si2157_init(struct dvb_frontend *fe)
u8 *fw_file;
unsigned int chip_id;
- dev_dbg(&s->client->dev, "%s:\n", __func__);
+ dev_dbg(&s->client->dev, "\n");
- /* configure? */
+ if (s->fw_loaded)
+ goto warm;
+
+ /* power up */
memcpy(cmd.args, "\xc0\x00\x0c\x00\x00\x01\x01\x01\x01\x01\x01\x02\x00\x00\x01", 15);
cmd.wlen = 15;
cmd.rlen = 1;
@@ -111,45 +113,47 @@ static int si2157_init(struct dvb_frontend *fe)
#define SI2158_A20 ('A' << 24 | 58 << 16 | '2' << 8 | '0' << 0)
#define SI2157_A30 ('A' << 24 | 57 << 16 | '3' << 8 | '0' << 0)
+ #define SI2147_A30 ('A' << 24 | 47 << 16 | '3' << 8 | '0' << 0)
switch (chip_id) {
case SI2158_A20:
fw_file = SI2158_A20_FIRMWARE;
break;
case SI2157_A30:
+ case SI2147_A30:
goto skip_fw_download;
break;
default:
dev_err(&s->client->dev,
- "%s: unkown chip version Si21%d-%c%c%c\n",
- KBUILD_MODNAME, cmd.args[2], cmd.args[1],
+ "unknown chip version Si21%d-%c%c%c\n",
+ cmd.args[2], cmd.args[1],
cmd.args[3], cmd.args[4]);
ret = -EINVAL;
goto err;
}
/* cold state - try to download firmware */
- dev_info(&s->client->dev, "%s: found a '%s' in cold state\n",
- KBUILD_MODNAME, si2157_ops.info.name);
+ dev_info(&s->client->dev, "found a '%s' in cold state\n",
+ si2157_ops.info.name);
/* request the firmware, this will block and timeout */
ret = request_firmware(&fw, fw_file, &s->client->dev);
if (ret) {
- dev_err(&s->client->dev, "%s: firmware file '%s' not found\n",
- KBUILD_MODNAME, fw_file);
+ dev_err(&s->client->dev, "firmware file '%s' not found\n",
+ fw_file);
goto err;
}
/* firmware should be n chunks of 17 bytes */
if (fw->size % 17 != 0) {
- dev_err(&s->client->dev, "%s: firmware file '%s' is invalid\n",
- KBUILD_MODNAME, fw_file);
+ dev_err(&s->client->dev, "firmware file '%s' is invalid\n",
+ fw_file);
ret = -EINVAL;
goto err;
}
- dev_info(&s->client->dev, "%s: downloading firmware from file '%s'\n",
- KBUILD_MODNAME, fw_file);
+ dev_info(&s->client->dev, "downloading firmware from file '%s'\n",
+ fw_file);
for (remaining = fw->size; remaining > 0; remaining -= 17) {
len = fw->data[fw->size - remaining];
@@ -159,8 +163,8 @@ static int si2157_init(struct dvb_frontend *fe)
ret = si2157_cmd_execute(s, &cmd);
if (ret) {
dev_err(&s->client->dev,
- "%s: firmware download failed=%d\n",
- KBUILD_MODNAME, ret);
+ "firmware download failed=%d\n",
+ ret);
goto err;
}
}
@@ -177,14 +181,17 @@ skip_fw_download:
if (ret)
goto err;
- s->active = true;
+ s->fw_loaded = true;
+warm:
+ s->active = true;
return 0;
+
err:
if (fw)
release_firmware(fw);
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -194,20 +201,21 @@ static int si2157_sleep(struct dvb_frontend *fe)
int ret;
struct si2157_cmd cmd;
- dev_dbg(&s->client->dev, "%s:\n", __func__);
+ dev_dbg(&s->client->dev, "\n");
s->active = false;
- memcpy(cmd.args, "\x13", 1);
- cmd.wlen = 1;
- cmd.rlen = 0;
+ /* standby */
+ memcpy(cmd.args, "\x16\x00", 2);
+ cmd.wlen = 2;
+ cmd.rlen = 1;
ret = si2157_cmd_execute(s, &cmd);
if (ret)
goto err;
return 0;
err:
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -220,8 +228,8 @@ static int si2157_set_params(struct dvb_frontend *fe)
u8 bandwidth, delivery_system;
dev_dbg(&s->client->dev,
- "%s: delivery_system=%d frequency=%u bandwidth_hz=%u\n",
- __func__, c->delivery_system, c->frequency,
+ "delivery_system=%d frequency=%u bandwidth_hz=%u\n",
+ c->delivery_system, c->frequency,
c->bandwidth_hz);
if (!s->active) {
@@ -239,6 +247,9 @@ static int si2157_set_params(struct dvb_frontend *fe)
bandwidth = 0x0f;
switch (c->delivery_system) {
+ case SYS_ATSC:
+ delivery_system = 0x00;
+ break;
case SYS_DVBT:
case SYS_DVBT2: /* it seems DVB-T and DVB-T2 both are 0x20 here */
delivery_system = 0x20;
@@ -256,7 +267,14 @@ static int si2157_set_params(struct dvb_frontend *fe)
if (s->inversion)
cmd.args[5] = 0x01;
cmd.wlen = 6;
- cmd.rlen = 1;
+ cmd.rlen = 4;
+ ret = si2157_cmd_execute(s, &cmd);
+ if (ret)
+ goto err;
+
+ memcpy(cmd.args, "\x14\x00\x02\x07\x01\x00", 6);
+ cmd.wlen = 6;
+ cmd.rlen = 4;
ret = si2157_cmd_execute(s, &cmd);
if (ret)
goto err;
@@ -275,7 +293,7 @@ static int si2157_set_params(struct dvb_frontend *fe)
return 0;
err:
- dev_dbg(&s->client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&s->client->dev, "failed=%d\n", ret);
return ret;
}
@@ -310,13 +328,14 @@ static int si2157_probe(struct i2c_client *client,
s = kzalloc(sizeof(struct si2157), GFP_KERNEL);
if (!s) {
ret = -ENOMEM;
- dev_err(&client->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
+ dev_err(&client->dev, "kzalloc() failed\n");
goto err;
}
s->client = client;
s->fe = cfg->fe;
s->inversion = cfg->inversion;
+ s->fw_loaded = false;
mutex_init(&s->i2c_mutex);
/* check if the tuner is there */
@@ -333,11 +352,10 @@ static int si2157_probe(struct i2c_client *client,
i2c_set_clientdata(client, s);
dev_info(&s->client->dev,
- "%s: Silicon Labs Si2157/Si2158 successfully attached\n",
- KBUILD_MODNAME);
+ "Silicon Labs Si2157/Si2158 successfully attached\n");
return 0;
err:
- dev_dbg(&client->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
kfree(s);
return ret;
@@ -348,7 +366,7 @@ static int si2157_remove(struct i2c_client *client)
struct si2157 *s = i2c_get_clientdata(client);
struct dvb_frontend *fe = s->fe;
- dev_dbg(&client->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = NULL;
diff --git a/drivers/media/tuners/si2157.h b/drivers/media/tuners/si2157.h
index 6da4d5d1c817..d3b19cadb4a1 100644
--- a/drivers/media/tuners/si2157.h
+++ b/drivers/media/tuners/si2157.h
@@ -1,5 +1,5 @@
/*
- * Silicon Labs Si2157/2158 silicon tuner driver
+ * Silicon Labs Si2147/2157/2158 silicon tuner driver
*
* Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
*
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index 3ddab5e6b500..e71ffafed951 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -1,5 +1,5 @@
/*
- * Silicon Labs Si2157/2158 silicon tuner driver
+ * Silicon Labs Si2147/2157/2158 silicon tuner driver
*
* Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
*
@@ -26,6 +26,7 @@ struct si2157 {
struct i2c_client *client;
struct dvb_frontend *fe;
bool active;
+ bool fw_loaded;
bool inversion;
};
diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c
index 05a4ac9edb6b..d93e0667b46b 100644
--- a/drivers/media/tuners/tda18212.c
+++ b/drivers/media/tuners/tda18212.c
@@ -19,125 +19,19 @@
*/
#include "tda18212.h"
+#include <linux/regmap.h>
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 64
-
-struct tda18212_priv {
- struct tda18212_config *cfg;
- struct i2c_adapter *i2c;
+struct tda18212_dev {
+ struct tda18212_config cfg;
+ struct i2c_client *client;
+ struct regmap *regmap;
u32 if_frequency;
};
-/* write multiple registers */
-static int tda18212_wr_regs(struct tda18212_priv *priv, u8 reg, u8 *val,
- int len)
-{
- int ret;
- u8 buf[MAX_XFER_SIZE];
- struct i2c_msg msg[1] = {
- {
- .addr = priv->cfg->i2c_address,
- .flags = 0,
- .len = 1 + len,
- .buf = buf,
- }
- };
-
- if (1 + len > sizeof(buf)) {
- dev_warn(&priv->i2c->dev,
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
- return -EINVAL;
- }
-
- buf[0] = reg;
- memcpy(&buf[1], val, len);
-
- ret = i2c_transfer(priv->i2c, msg, 1);
- if (ret == 1) {
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev, "%s: i2c wr failed=%d reg=%02x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
- return ret;
-}
-
-/* read multiple registers */
-static int tda18212_rd_regs(struct tda18212_priv *priv, u8 reg, u8 *val,
- int len)
-{
- int ret;
- u8 buf[MAX_XFER_SIZE];
- struct i2c_msg msg[2] = {
- {
- .addr = priv->cfg->i2c_address,
- .flags = 0,
- .len = 1,
- .buf = &reg,
- }, {
- .addr = priv->cfg->i2c_address,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
-
- if (len > sizeof(buf)) {
- dev_warn(&priv->i2c->dev,
- "%s: i2c rd reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
- return -EINVAL;
- }
-
- ret = i2c_transfer(priv->i2c, msg, 2);
- if (ret == 2) {
- memcpy(val, buf, len);
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev, "%s: i2c rd failed=%d reg=%02x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
-
- return ret;
-}
-
-/* write single register */
-static int tda18212_wr_reg(struct tda18212_priv *priv, u8 reg, u8 val)
-{
- return tda18212_wr_regs(priv, reg, &val, 1);
-}
-
-/* read single register */
-static int tda18212_rd_reg(struct tda18212_priv *priv, u8 reg, u8 *val)
-{
- return tda18212_rd_regs(priv, reg, val, 1);
-}
-
-#if 0 /* keep, useful when developing driver */
-static void tda18212_dump_regs(struct tda18212_priv *priv)
-{
- int i;
- u8 buf[256];
-
- #define TDA18212_RD_LEN 32
- for (i = 0; i < sizeof(buf); i += TDA18212_RD_LEN)
- tda18212_rd_regs(priv, i, &buf[i], TDA18212_RD_LEN);
-
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 32, 1, buf,
- sizeof(buf), true);
-
- return;
-}
-#endif
-
static int tda18212_set_params(struct dvb_frontend *fe)
{
- struct tda18212_priv *priv = fe->tuner_priv;
+ struct tda18212_dev *dev = fe->tuner_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u32 if_khz;
@@ -166,9 +60,9 @@ static int tda18212_set_params(struct dvb_frontend *fe)
[ATSC_QAM] = { 0x7d, 0x20, 0x63 },
};
- dev_dbg(&priv->i2c->dev,
- "%s: delivery_system=%d frequency=%d bandwidth_hz=%d\n",
- __func__, c->delivery_system, c->frequency,
+ dev_dbg(&dev->client->dev,
+ "delivery_system=%d frequency=%d bandwidth_hz=%d\n",
+ c->delivery_system, c->frequency,
c->bandwidth_hz);
if (fe->ops.i2c_gate_ctrl)
@@ -176,25 +70,25 @@ static int tda18212_set_params(struct dvb_frontend *fe)
switch (c->delivery_system) {
case SYS_ATSC:
- if_khz = priv->cfg->if_atsc_vsb;
+ if_khz = dev->cfg.if_atsc_vsb;
i = ATSC_VSB;
break;
case SYS_DVBC_ANNEX_B:
- if_khz = priv->cfg->if_atsc_qam;
+ if_khz = dev->cfg.if_atsc_qam;
i = ATSC_QAM;
break;
case SYS_DVBT:
switch (c->bandwidth_hz) {
case 6000000:
- if_khz = priv->cfg->if_dvbt_6;
+ if_khz = dev->cfg.if_dvbt_6;
i = DVBT_6;
break;
case 7000000:
- if_khz = priv->cfg->if_dvbt_7;
+ if_khz = dev->cfg.if_dvbt_7;
i = DVBT_7;
break;
case 8000000:
- if_khz = priv->cfg->if_dvbt_8;
+ if_khz = dev->cfg.if_dvbt_8;
i = DVBT_8;
break;
default:
@@ -205,15 +99,15 @@ static int tda18212_set_params(struct dvb_frontend *fe)
case SYS_DVBT2:
switch (c->bandwidth_hz) {
case 6000000:
- if_khz = priv->cfg->if_dvbt2_6;
+ if_khz = dev->cfg.if_dvbt2_6;
i = DVBT2_6;
break;
case 7000000:
- if_khz = priv->cfg->if_dvbt2_7;
+ if_khz = dev->cfg.if_dvbt2_7;
i = DVBT2_7;
break;
case 8000000:
- if_khz = priv->cfg->if_dvbt2_8;
+ if_khz = dev->cfg.if_dvbt2_8;
i = DVBT2_8;
break;
default:
@@ -223,7 +117,7 @@ static int tda18212_set_params(struct dvb_frontend *fe)
break;
case SYS_DVBC_ANNEX_A:
case SYS_DVBC_ANNEX_C:
- if_khz = priv->cfg->if_dvbc;
+ if_khz = dev->cfg.if_dvbc;
i = DVBC_8;
break;
default:
@@ -231,15 +125,15 @@ static int tda18212_set_params(struct dvb_frontend *fe)
goto error;
}
- ret = tda18212_wr_reg(priv, 0x23, bw_params[i][2]);
+ ret = regmap_write(dev->regmap, 0x23, bw_params[i][2]);
if (ret)
goto error;
- ret = tda18212_wr_reg(priv, 0x06, 0x00);
+ ret = regmap_write(dev->regmap, 0x06, 0x00);
if (ret)
goto error;
- ret = tda18212_wr_reg(priv, 0x0f, bw_params[i][0]);
+ ret = regmap_write(dev->regmap, 0x0f, bw_params[i][0]);
if (ret)
goto error;
@@ -252,12 +146,12 @@ static int tda18212_set_params(struct dvb_frontend *fe)
buf[6] = ((c->frequency / 1000) >> 0) & 0xff;
buf[7] = 0xc1;
buf[8] = 0x01;
- ret = tda18212_wr_regs(priv, 0x12, buf, sizeof(buf));
+ ret = regmap_bulk_write(dev->regmap, 0x12, buf, sizeof(buf));
if (ret)
goto error;
/* actual IF rounded as it is on register */
- priv->if_frequency = buf[3] * 50 * 1000;
+ dev->if_frequency = buf[3] * 50 * 1000;
exit:
if (fe->ops.i2c_gate_ctrl)
@@ -266,26 +160,19 @@ exit:
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&dev->client->dev, "failed=%d\n", ret);
goto exit;
}
static int tda18212_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
{
- struct tda18212_priv *priv = fe->tuner_priv;
+ struct tda18212_dev *dev = fe->tuner_priv;
- *frequency = priv->if_frequency;
+ *frequency = dev->if_frequency;
return 0;
}
-static int tda18212_release(struct dvb_frontend *fe)
-{
- kfree(fe->tuner_priv);
- fe->tuner_priv = NULL;
- return 0;
-}
-
static const struct dvb_tuner_ops tda18212_tuner_ops = {
.info = {
.name = "NXP TDA18212",
@@ -295,53 +182,110 @@ static const struct dvb_tuner_ops tda18212_tuner_ops = {
.frequency_step = 1000,
},
- .release = tda18212_release,
-
.set_params = tda18212_set_params,
.get_if_frequency = tda18212_get_if_frequency,
};
-struct dvb_frontend *tda18212_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, struct tda18212_config *cfg)
+static int tda18212_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
- struct tda18212_priv *priv = NULL;
+ struct tda18212_config *cfg = client->dev.platform_data;
+ struct dvb_frontend *fe = cfg->fe;
+ struct tda18212_dev *dev;
int ret;
- u8 val;
+ unsigned int chip_id;
+ char *version;
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
- priv = kzalloc(sizeof(struct tda18212_priv), GFP_KERNEL);
- if (priv == NULL)
- return NULL;
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ ret = -ENOMEM;
+ dev_err(&client->dev, "kzalloc() failed\n");
+ goto err;
+ }
- priv->cfg = cfg;
- priv->i2c = i2c;
- fe->tuner_priv = priv;
+ memcpy(&dev->cfg, cfg, sizeof(struct tda18212_config));
+ dev->client = client;
+ dev->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(dev->regmap)) {
+ ret = PTR_ERR(dev->regmap);
+ goto err;
+ }
+ /* check if the tuner is there */
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
- /* check if the tuner is there */
- ret = tda18212_rd_reg(priv, 0x00, &val);
+ ret = regmap_read(dev->regmap, 0x00, &chip_id);
+ dev_dbg(&dev->client->dev, "chip_id=%02x\n", chip_id);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
- if (!ret)
- dev_dbg(&priv->i2c->dev, "%s: chip id=%02x\n", __func__, val);
- if (ret || val != 0xc7) {
- kfree(priv);
- return NULL;
+ if (ret)
+ goto err;
+
+ switch (chip_id) {
+ case 0xc7:
+ version = "M"; /* master */
+ break;
+ case 0x47:
+ version = "S"; /* slave */
+ break;
+ default:
+ ret = -ENODEV;
+ goto err;
}
- dev_info(&priv->i2c->dev,
- "%s: NXP TDA18212HN successfully identified\n",
- KBUILD_MODNAME);
+ dev_info(&dev->client->dev,
+ "NXP TDA18212HN/%s successfully identified\n", version);
+ fe->tuner_priv = dev;
memcpy(&fe->ops.tuner_ops, &tda18212_tuner_ops,
- sizeof(struct dvb_tuner_ops));
+ sizeof(struct dvb_tuner_ops));
+ i2c_set_clientdata(client, dev);
- return fe;
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ kfree(dev);
+ return ret;
}
-EXPORT_SYMBOL(tda18212_attach);
+
+static int tda18212_remove(struct i2c_client *client)
+{
+ struct tda18212_dev *dev = i2c_get_clientdata(client);
+ struct dvb_frontend *fe = dev->cfg.fe;
+
+ dev_dbg(&client->dev, "\n");
+
+ memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
+ fe->tuner_priv = NULL;
+ kfree(dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id tda18212_id[] = {
+ {"tda18212", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tda18212_id);
+
+static struct i2c_driver tda18212_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tda18212",
+ },
+ .probe = tda18212_probe,
+ .remove = tda18212_remove,
+ .id_table = tda18212_id,
+};
+
+module_i2c_driver(tda18212_driver);
MODULE_DESCRIPTION("NXP TDA18212HN silicon tuner driver");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
diff --git a/drivers/media/tuners/tda18212.h b/drivers/media/tuners/tda18212.h
index c36b49e4b274..e58c9096d79c 100644
--- a/drivers/media/tuners/tda18212.h
+++ b/drivers/media/tuners/tda18212.h
@@ -25,8 +25,6 @@
#include "dvb_frontend.h"
struct tda18212_config {
- u8 i2c_address;
-
u16 if_dvbt_6;
u16 if_dvbt_7;
u16 if_dvbt_8;
@@ -37,18 +35,11 @@ struct tda18212_config {
u16 if_dvbc;
u16 if_atsc_vsb;
u16 if_atsc_qam;
-};
-#if IS_ENABLED(CONFIG_MEDIA_TUNER_TDA18212)
-extern struct dvb_frontend *tda18212_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, struct tda18212_config *cfg);
-#else
-static inline struct dvb_frontend *tda18212_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c, struct tda18212_config *cfg)
-{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
-#endif
+ /*
+ * pointer to DVB frontend
+ */
+ struct dvb_frontend *fe;
+};
#endif
diff --git a/drivers/media/tuners/tda18271-common.c b/drivers/media/tuners/tda18271-common.c
index 18c77afe2e4f..86e5e3110118 100644
--- a/drivers/media/tuners/tda18271-common.c
+++ b/drivers/media/tuners/tda18271-common.c
@@ -714,12 +714,11 @@ fail:
return ret;
}
-int _tda_printk(struct tda18271_priv *state, const char *level,
- const char *func, const char *fmt, ...)
+void _tda_printk(struct tda18271_priv *state, const char *level,
+ const char *func, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int rtn;
va_start(args, fmt);
@@ -727,15 +726,13 @@ int _tda_printk(struct tda18271_priv *state, const char *level,
vaf.va = &args;
if (state)
- rtn = printk("%s%s: [%d-%04x|%c] %pV",
- level, func, i2c_adapter_id(state->i2c_props.adap),
- state->i2c_props.addr,
- (state->role == TDA18271_MASTER) ? 'M' : 'S',
- &vaf);
+ printk("%s%s: [%d-%04x|%c] %pV",
+ level, func, i2c_adapter_id(state->i2c_props.adap),
+ state->i2c_props.addr,
+ (state->role == TDA18271_MASTER) ? 'M' : 'S',
+ &vaf);
else
- rtn = printk("%s%s: %pV", level, func, &vaf);
+ printk("%s%s: %pV", level, func, &vaf);
va_end(args);
-
- return rtn;
}
diff --git a/drivers/media/tuners/tda18271-priv.h b/drivers/media/tuners/tda18271-priv.h
index 454c152ccaa0..b36a7b754772 100644
--- a/drivers/media/tuners/tda18271-priv.h
+++ b/drivers/media/tuners/tda18271-priv.h
@@ -139,8 +139,8 @@ extern int tda18271_debug;
#define DBG_CAL 16
__attribute__((format(printf, 4, 5)))
-int _tda_printk(struct tda18271_priv *state, const char *level,
- const char *func, const char *fmt, ...);
+void _tda_printk(struct tda18271_priv *state, const char *level,
+ const char *func, const char *fmt, ...);
#define tda_printk(st, lvl, fmt, arg...) \
_tda_printk(st, lvl, __func__, fmt, ##arg)
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 565eeebb3aeb..d12f5e4ad8bf 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -178,67 +178,67 @@ static int xc2028_get_reg(struct xc2028_data *priv, u16 reg, u16 *val)
#define dump_firm_type(t) dump_firm_type_and_int_freq(t, 0)
static void dump_firm_type_and_int_freq(unsigned int type, u16 int_freq)
{
- if (type & BASE)
+ if (type & BASE)
printk("BASE ");
- if (type & INIT1)
+ if (type & INIT1)
printk("INIT1 ");
- if (type & F8MHZ)
+ if (type & F8MHZ)
printk("F8MHZ ");
- if (type & MTS)
+ if (type & MTS)
printk("MTS ");
- if (type & D2620)
+ if (type & D2620)
printk("D2620 ");
- if (type & D2633)
+ if (type & D2633)
printk("D2633 ");
- if (type & DTV6)
+ if (type & DTV6)
printk("DTV6 ");
- if (type & QAM)
+ if (type & QAM)
printk("QAM ");
- if (type & DTV7)
+ if (type & DTV7)
printk("DTV7 ");
- if (type & DTV78)
+ if (type & DTV78)
printk("DTV78 ");
- if (type & DTV8)
+ if (type & DTV8)
printk("DTV8 ");
- if (type & FM)
+ if (type & FM)
printk("FM ");
- if (type & INPUT1)
+ if (type & INPUT1)
printk("INPUT1 ");
- if (type & LCD)
+ if (type & LCD)
printk("LCD ");
- if (type & NOGD)
+ if (type & NOGD)
printk("NOGD ");
- if (type & MONO)
+ if (type & MONO)
printk("MONO ");
- if (type & ATSC)
+ if (type & ATSC)
printk("ATSC ");
- if (type & IF)
+ if (type & IF)
printk("IF ");
- if (type & LG60)
+ if (type & LG60)
printk("LG60 ");
- if (type & ATI638)
+ if (type & ATI638)
printk("ATI638 ");
- if (type & OREN538)
+ if (type & OREN538)
printk("OREN538 ");
- if (type & OREN36)
+ if (type & OREN36)
printk("OREN36 ");
- if (type & TOYOTA388)
+ if (type & TOYOTA388)
printk("TOYOTA388 ");
- if (type & TOYOTA794)
+ if (type & TOYOTA794)
printk("TOYOTA794 ");
- if (type & DIBCOM52)
+ if (type & DIBCOM52)
printk("DIBCOM52 ");
- if (type & ZARLINK456)
+ if (type & ZARLINK456)
printk("ZARLINK456 ");
- if (type & CHINA)
+ if (type & CHINA)
printk("CHINA ");
- if (type & F6MHZ)
+ if (type & F6MHZ)
printk("F6MHZ ");
- if (type & INPUT2)
+ if (type & INPUT2)
printk("INPUT2 ");
- if (type & SCODE)
+ if (type & SCODE)
printk("SCODE ");
- if (type & HAS_IF)
+ if (type & HAS_IF)
printk("HAS_IF_%d ", int_freq);
}
diff --git a/drivers/media/tuners/tuner_it913x.c b/drivers/media/tuners/tuner_it913x.c
deleted file mode 100644
index 3d83c425bccf..000000000000
--- a/drivers/media/tuners/tuner_it913x.c
+++ /dev/null
@@ -1,453 +0,0 @@
-/*
- * ITE Tech IT9137 silicon tuner driver
- *
- * Copyright (C) 2011 Malcolm Priestley (tvboxspy@gmail.com)
- * IT9137 Copyright (C) ITE Tech Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
- */
-
-#include "tuner_it913x_priv.h"
-
-struct it913x_state {
- struct i2c_adapter *i2c_adap;
- u8 i2c_addr;
- u8 chip_ver;
- u8 tuner_type;
- u8 firmware_ver;
- u16 tun_xtal;
- u8 tun_fdiv;
- u8 tun_clk_mode;
- u32 tun_fn_min;
-};
-
-/* read multiple registers */
-static int it913x_rd_regs(struct it913x_state *state,
- u32 reg, u8 *data, u8 count)
-{
- int ret;
- u8 b[3];
- struct i2c_msg msg[2] = {
- { .addr = state->i2c_addr, .flags = 0,
- .buf = b, .len = sizeof(b) },
- { .addr = state->i2c_addr, .flags = I2C_M_RD,
- .buf = data, .len = count }
- };
- b[0] = (u8)(reg >> 16) & 0xff;
- b[1] = (u8)(reg >> 8) & 0xff;
- b[2] = (u8) reg & 0xff;
- b[0] |= 0x80; /* All reads from demodulator */
-
- ret = i2c_transfer(state->i2c_adap, msg, 2);
-
- return ret;
-}
-
-/* read single register */
-static int it913x_rd_reg(struct it913x_state *state, u32 reg)
-{
- int ret;
- u8 b[1];
- ret = it913x_rd_regs(state, reg, &b[0], sizeof(b));
- return (ret < 0) ? -ENODEV : b[0];
-}
-
-/* write multiple registers */
-static int it913x_wr_regs(struct it913x_state *state,
- u8 pro, u32 reg, u8 buf[], u8 count)
-{
- u8 b[256];
- struct i2c_msg msg[1] = {
- { .addr = state->i2c_addr, .flags = 0,
- .buf = b, .len = 3 + count }
- };
- int ret;
- b[0] = (u8)(reg >> 16) & 0xff;
- b[1] = (u8)(reg >> 8) & 0xff;
- b[2] = (u8) reg & 0xff;
- memcpy(&b[3], buf, count);
-
- if (pro == PRO_DMOD)
- b[0] |= 0x80;
-
- ret = i2c_transfer(state->i2c_adap, msg, 1);
-
- if (ret < 0)
- return -EIO;
-
- return 0;
-}
-
-/* write single register */
-static int it913x_wr_reg(struct it913x_state *state,
- u8 pro, u32 reg, u32 data)
-{
- int ret;
- u8 b[4];
- u8 s;
-
- b[0] = data >> 24;
- b[1] = (data >> 16) & 0xff;
- b[2] = (data >> 8) & 0xff;
- b[3] = data & 0xff;
- /* expand write as needed */
- if (data < 0x100)
- s = 3;
- else if (data < 0x1000)
- s = 2;
- else if (data < 0x100000)
- s = 1;
- else
- s = 0;
-
- ret = it913x_wr_regs(state, pro, reg, &b[s], sizeof(b) - s);
-
- return ret;
-}
-
-static int it913x_script_loader(struct it913x_state *state,
- struct it913xset *loadscript)
-{
- int ret, i;
- if (loadscript == NULL)
- return -EINVAL;
-
- for (i = 0; i < 1000; ++i) {
- if (loadscript[i].pro == 0xff)
- break;
- ret = it913x_wr_regs(state, loadscript[i].pro,
- loadscript[i].address,
- loadscript[i].reg, loadscript[i].count);
- if (ret < 0)
- return -ENODEV;
- }
- return 0;
-}
-
-static int it913x_init(struct dvb_frontend *fe)
-{
- struct it913x_state *state = fe->tuner_priv;
- int ret, i, reg;
- u8 val, nv_val;
- u8 nv[] = {48, 32, 24, 16, 12, 8, 6, 4, 2};
- u8 b[2];
-
- reg = it913x_rd_reg(state, 0xec86);
- switch (reg) {
- case 0:
- state->tun_clk_mode = reg;
- state->tun_xtal = 2000;
- state->tun_fdiv = 3;
- val = 16;
- break;
- case -ENODEV:
- return -ENODEV;
- case 1:
- default:
- state->tun_clk_mode = reg;
- state->tun_xtal = 640;
- state->tun_fdiv = 1;
- val = 6;
- break;
- }
-
- reg = it913x_rd_reg(state, 0xed03);
-
- if (reg < 0)
- return -ENODEV;
- else if (reg < ARRAY_SIZE(nv))
- nv_val = nv[reg];
- else
- nv_val = 2;
-
- for (i = 0; i < 50; i++) {
- ret = it913x_rd_regs(state, 0xed23, &b[0], sizeof(b));
- reg = (b[1] << 8) + b[0];
- if (reg > 0)
- break;
- if (ret < 0)
- return -ENODEV;
- udelay(2000);
- }
- state->tun_fn_min = state->tun_xtal * reg;
- state->tun_fn_min /= (state->tun_fdiv * nv_val);
- dev_dbg(&state->i2c_adap->dev, "%s: Tuner fn_min %d\n", __func__,
- state->tun_fn_min);
-
- if (state->chip_ver > 1)
- msleep(50);
- else {
- for (i = 0; i < 50; i++) {
- reg = it913x_rd_reg(state, 0xec82);
- if (reg > 0)
- break;
- if (reg < 0)
- return -ENODEV;
- udelay(2000);
- }
- }
-
- /* Power Up Tuner - common all versions */
- ret = it913x_wr_reg(state, PRO_DMOD, 0xec40, 0x1);
- ret |= it913x_wr_reg(state, PRO_DMOD, 0xfba8, 0x0);
- ret |= it913x_wr_reg(state, PRO_DMOD, 0xec57, 0x0);
- ret |= it913x_wr_reg(state, PRO_DMOD, 0xec58, 0x0);
-
- return it913x_wr_reg(state, PRO_DMOD, 0xed81, val);
-}
-
-static int it9137_set_params(struct dvb_frontend *fe)
-{
- struct it913x_state *state = fe->tuner_priv;
- struct it913xset *set_tuner = set_it9137_template;
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- u32 bandwidth = p->bandwidth_hz;
- u32 frequency_m = p->frequency;
- int ret, reg;
- u32 frequency = frequency_m / 1000;
- u32 freq, temp_f, tmp;
- u16 iqik_m_cal;
- u16 n_div;
- u8 n;
- u8 l_band;
- u8 lna_band;
- u8 bw;
-
- if (state->firmware_ver == 1)
- set_tuner = set_it9135_template;
- else
- set_tuner = set_it9137_template;
-
- dev_dbg(&state->i2c_adap->dev, "%s: Tuner Frequency %d Bandwidth %d\n",
- __func__, frequency, bandwidth);
-
- if (frequency >= 51000 && frequency <= 440000) {
- l_band = 0;
- lna_band = 0;
- } else if (frequency > 440000 && frequency <= 484000) {
- l_band = 1;
- lna_band = 1;
- } else if (frequency > 484000 && frequency <= 533000) {
- l_band = 1;
- lna_band = 2;
- } else if (frequency > 533000 && frequency <= 587000) {
- l_band = 1;
- lna_band = 3;
- } else if (frequency > 587000 && frequency <= 645000) {
- l_band = 1;
- lna_band = 4;
- } else if (frequency > 645000 && frequency <= 710000) {
- l_band = 1;
- lna_band = 5;
- } else if (frequency > 710000 && frequency <= 782000) {
- l_band = 1;
- lna_band = 6;
- } else if (frequency > 782000 && frequency <= 860000) {
- l_band = 1;
- lna_band = 7;
- } else if (frequency > 1450000 && frequency <= 1492000) {
- l_band = 1;
- lna_band = 0;
- } else if (frequency > 1660000 && frequency <= 1685000) {
- l_band = 1;
- lna_band = 1;
- } else
- return -EINVAL;
- set_tuner[0].reg[0] = lna_band;
-
- switch (bandwidth) {
- case 5000000:
- bw = 0;
- break;
- case 6000000:
- bw = 2;
- break;
- case 7000000:
- bw = 4;
- break;
- default:
- case 8000000:
- bw = 6;
- break;
- }
-
- set_tuner[1].reg[0] = bw;
- set_tuner[2].reg[0] = 0xa0 | (l_band << 3);
-
- if (frequency > 53000 && frequency <= 74000) {
- n_div = 48;
- n = 0;
- } else if (frequency > 74000 && frequency <= 111000) {
- n_div = 32;
- n = 1;
- } else if (frequency > 111000 && frequency <= 148000) {
- n_div = 24;
- n = 2;
- } else if (frequency > 148000 && frequency <= 222000) {
- n_div = 16;
- n = 3;
- } else if (frequency > 222000 && frequency <= 296000) {
- n_div = 12;
- n = 4;
- } else if (frequency > 296000 && frequency <= 445000) {
- n_div = 8;
- n = 5;
- } else if (frequency > 445000 && frequency <= state->tun_fn_min) {
- n_div = 6;
- n = 6;
- } else if (frequency > state->tun_fn_min && frequency <= 950000) {
- n_div = 4;
- n = 7;
- } else if (frequency > 1450000 && frequency <= 1680000) {
- n_div = 2;
- n = 0;
- } else
- return -EINVAL;
-
- reg = it913x_rd_reg(state, 0xed81);
- iqik_m_cal = (u16)reg * n_div;
-
- if (reg < 0x20) {
- if (state->tun_clk_mode == 0)
- iqik_m_cal = (iqik_m_cal * 9) >> 5;
- else
- iqik_m_cal >>= 1;
- } else {
- iqik_m_cal = 0x40 - iqik_m_cal;
- if (state->tun_clk_mode == 0)
- iqik_m_cal = ~((iqik_m_cal * 9) >> 5);
- else
- iqik_m_cal = ~(iqik_m_cal >> 1);
- }
-
- temp_f = frequency * (u32)n_div * (u32)state->tun_fdiv;
- freq = temp_f / state->tun_xtal;
- tmp = freq * state->tun_xtal;
-
- if ((temp_f - tmp) >= (state->tun_xtal >> 1))
- freq++;
-
- freq += (u32) n << 13;
- /* Frequency OMEGA_IQIK_M_CAL_MID*/
- temp_f = freq + (u32)iqik_m_cal;
-
- set_tuner[3].reg[0] = temp_f & 0xff;
- set_tuner[4].reg[0] = (temp_f >> 8) & 0xff;
-
- dev_dbg(&state->i2c_adap->dev, "%s: High Frequency = %04x\n",
- __func__, temp_f);
-
- /* Lower frequency */
- set_tuner[5].reg[0] = freq & 0xff;
- set_tuner[6].reg[0] = (freq >> 8) & 0xff;
-
- dev_dbg(&state->i2c_adap->dev, "%s: low Frequency = %04x\n",
- __func__, freq);
-
- ret = it913x_script_loader(state, set_tuner);
-
- return (ret < 0) ? -ENODEV : 0;
-}
-
-/* Power sequence */
-/* Power Up Tuner on -> Frontend suspend off -> Tuner clk on */
-/* Power Down Frontend suspend on -> Tuner clk off -> Tuner off */
-
-static int it913x_sleep(struct dvb_frontend *fe)
-{
- struct it913x_state *state = fe->tuner_priv;
- return it913x_script_loader(state, it9137_tuner_off);
-}
-
-static int it913x_release(struct dvb_frontend *fe)
-{
- kfree(fe->tuner_priv);
- return 0;
-}
-
-static const struct dvb_tuner_ops it913x_tuner_ops = {
- .info = {
- .name = "ITE Tech IT913X",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- },
-
- .release = it913x_release,
-
- .init = it913x_init,
- .sleep = it913x_sleep,
- .set_params = it9137_set_params,
-};
-
-struct dvb_frontend *it913x_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c_adap, u8 i2c_addr, u8 config)
-{
- struct it913x_state *state = NULL;
- int ret;
-
- /* allocate memory for the internal state */
- state = kzalloc(sizeof(struct it913x_state), GFP_KERNEL);
- if (state == NULL)
- return NULL;
-
- state->i2c_adap = i2c_adap;
- state->i2c_addr = i2c_addr;
-
- switch (config) {
- case AF9033_TUNER_IT9135_38:
- case AF9033_TUNER_IT9135_51:
- case AF9033_TUNER_IT9135_52:
- state->chip_ver = 0x01;
- break;
- case AF9033_TUNER_IT9135_60:
- case AF9033_TUNER_IT9135_61:
- case AF9033_TUNER_IT9135_62:
- state->chip_ver = 0x02;
- break;
- default:
- dev_dbg(&i2c_adap->dev,
- "%s: invalid config=%02x\n", __func__, config);
- goto error;
- }
-
- state->tuner_type = config;
- state->firmware_ver = 1;
-
- /* tuner RF initial */
- ret = it913x_wr_reg(state, PRO_DMOD, 0xec4c, 0x68);
- if (ret < 0)
- goto error;
-
- fe->tuner_priv = state;
- memcpy(&fe->ops.tuner_ops, &it913x_tuner_ops,
- sizeof(struct dvb_tuner_ops));
-
- dev_info(&i2c_adap->dev,
- "%s: ITE Tech IT913X successfully attached\n",
- KBUILD_MODNAME);
- dev_dbg(&i2c_adap->dev, "%s: config=%02x chip_ver=%02x\n",
- __func__, config, state->chip_ver);
-
- return fe;
-error:
- kfree(state);
- return NULL;
-}
-EXPORT_SYMBOL(it913x_attach);
-
-MODULE_DESCRIPTION("ITE Tech IT913X silicon tuner driver");
-MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/tuner_it913x_priv.h b/drivers/media/tuners/tuner_it913x_priv.h
deleted file mode 100644
index ce652108aa5d..000000000000
--- a/drivers/media/tuners/tuner_it913x_priv.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * ITE Tech IT9137 silicon tuner driver
- *
- * Copyright (C) 2011 Malcolm Priestley (tvboxspy@gmail.com)
- * IT9137 Copyright (C) ITE Tech Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.=
- */
-
-#ifndef IT913X_PRIV_H
-#define IT913X_PRIV_H
-
-#include "tuner_it913x.h"
-#include "af9033.h"
-
-#define PRO_LINK 0x0
-#define PRO_DMOD 0x1
-#define TRIGGER_OFSM 0x0000
-
-struct it913xset { u32 pro;
- u32 address;
- u8 reg[15];
- u8 count;
-};
-
-/* Tuner setting scripts (still keeping it9137) */
-static struct it913xset it9137_tuner_off[] = {
- {PRO_DMOD, 0xfba8, {0x01}, 0x01}, /* Tuner Clock Off */
- {PRO_DMOD, 0xec40, {0x00}, 0x01}, /* Power Down Tuner */
- {PRO_DMOD, 0xec02, {0x3f, 0x1f, 0x3f, 0x3f}, 0x04},
- {PRO_DMOD, 0xec06, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}, 0x0c},
- {PRO_DMOD, 0xec12, {0x00, 0x00, 0x00, 0x00}, 0x04},
- {PRO_DMOD, 0xec17, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00}, 0x09},
- {PRO_DMOD, 0xec22, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00}, 0x0a},
- {PRO_DMOD, 0xec20, {0x00}, 0x01},
- {PRO_DMOD, 0xec3f, {0x01}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00}, /* Terminating Entry */
-};
-
-static struct it913xset set_it9135_template[] = {
- {PRO_DMOD, 0xee06, {0x00}, 0x01},
- {PRO_DMOD, 0xec56, {0x00}, 0x01},
- {PRO_DMOD, 0xec4c, {0x00}, 0x01},
- {PRO_DMOD, 0xec4d, {0x00}, 0x01},
- {PRO_DMOD, 0xec4e, {0x00}, 0x01},
- {PRO_DMOD, 0x011e, {0x00}, 0x01}, /* Older Devices */
- {PRO_DMOD, 0x011f, {0x00}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00}, /* Terminating Entry */
-};
-
-static struct it913xset set_it9137_template[] = {
- {PRO_DMOD, 0xee06, {0x00}, 0x01},
- {PRO_DMOD, 0xec56, {0x00}, 0x01},
- {PRO_DMOD, 0xec4c, {0x00}, 0x01},
- {PRO_DMOD, 0xec4d, {0x00}, 0x01},
- {PRO_DMOD, 0xec4e, {0x00}, 0x01},
- {PRO_DMOD, 0xec4f, {0x00}, 0x01},
- {PRO_DMOD, 0xec50, {0x00}, 0x01},
- {0xff, 0x0000, {0x00}, 0x00}, /* Terminating Entry */
-};
-
-#endif
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index f9ab79e3432d..219ebafae70f 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -569,67 +569,67 @@ static int xc4000_readreg(struct xc4000_priv *priv, u16 reg, u16 *val)
#define dump_firm_type(t) dump_firm_type_and_int_freq(t, 0)
static void dump_firm_type_and_int_freq(unsigned int type, u16 int_freq)
{
- if (type & BASE)
+ if (type & BASE)
printk(KERN_CONT "BASE ");
- if (type & INIT1)
+ if (type & INIT1)
printk(KERN_CONT "INIT1 ");
- if (type & F8MHZ)
+ if (type & F8MHZ)
printk(KERN_CONT "F8MHZ ");
- if (type & MTS)
+ if (type & MTS)
printk(KERN_CONT "MTS ");
- if (type & D2620)
+ if (type & D2620)
printk(KERN_CONT "D2620 ");
- if (type & D2633)
+ if (type & D2633)
printk(KERN_CONT "D2633 ");
- if (type & DTV6)
+ if (type & DTV6)
printk(KERN_CONT "DTV6 ");
- if (type & QAM)
+ if (type & QAM)
printk(KERN_CONT "QAM ");
- if (type & DTV7)
+ if (type & DTV7)
printk(KERN_CONT "DTV7 ");
- if (type & DTV78)
+ if (type & DTV78)
printk(KERN_CONT "DTV78 ");
- if (type & DTV8)
+ if (type & DTV8)
printk(KERN_CONT "DTV8 ");
- if (type & FM)
+ if (type & FM)
printk(KERN_CONT "FM ");
- if (type & INPUT1)
+ if (type & INPUT1)
printk(KERN_CONT "INPUT1 ");
- if (type & LCD)
+ if (type & LCD)
printk(KERN_CONT "LCD ");
- if (type & NOGD)
+ if (type & NOGD)
printk(KERN_CONT "NOGD ");
- if (type & MONO)
+ if (type & MONO)
printk(KERN_CONT "MONO ");
- if (type & ATSC)
+ if (type & ATSC)
printk(KERN_CONT "ATSC ");
- if (type & IF)
+ if (type & IF)
printk(KERN_CONT "IF ");
- if (type & LG60)
+ if (type & LG60)
printk(KERN_CONT "LG60 ");
- if (type & ATI638)
+ if (type & ATI638)
printk(KERN_CONT "ATI638 ");
- if (type & OREN538)
+ if (type & OREN538)
printk(KERN_CONT "OREN538 ");
- if (type & OREN36)
+ if (type & OREN36)
printk(KERN_CONT "OREN36 ");
- if (type & TOYOTA388)
+ if (type & TOYOTA388)
printk(KERN_CONT "TOYOTA388 ");
- if (type & TOYOTA794)
+ if (type & TOYOTA794)
printk(KERN_CONT "TOYOTA794 ");
- if (type & DIBCOM52)
+ if (type & DIBCOM52)
printk(KERN_CONT "DIBCOM52 ");
- if (type & ZARLINK456)
+ if (type & ZARLINK456)
printk(KERN_CONT "ZARLINK456 ");
- if (type & CHINA)
+ if (type & CHINA)
printk(KERN_CONT "CHINA ");
- if (type & F6MHZ)
+ if (type & F6MHZ)
printk(KERN_CONT "F6MHZ ");
- if (type & INPUT2)
+ if (type & INPUT2)
printk(KERN_CONT "INPUT2 ");
- if (type & SCODE)
+ if (type & SCODE)
printk(KERN_CONT "SCODE ");
- if (type & HAS_IF)
+ if (type & HAS_IF)
printk(KERN_CONT "HAS_IF_%d ", int_freq);
}
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index e135760f7d48..e44c8aba6074 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -59,6 +59,7 @@ struct xc5000_priv {
u32 freq_hz, freq_offset;
u32 bandwidth;
u8 video_standard;
+ unsigned int mode;
u8 rf_mode;
u8 radio_input;
@@ -69,6 +70,8 @@ struct xc5000_priv {
struct dvb_frontend *fe;
struct delayed_work timer_sleep;
+
+ const struct firmware *firmware;
};
/* Misc Defines */
@@ -712,9 +715,50 @@ static void xc_debug_dump(struct xc5000_priv *priv)
}
}
-static int xc5000_set_params(struct dvb_frontend *fe)
+static int xc5000_tune_digital(struct dvb_frontend *fe)
+{
+ struct xc5000_priv *priv = fe->tuner_priv;
+ int ret;
+ u32 bw = fe->dtv_property_cache.bandwidth_hz;
+
+ ret = xc_set_signal_source(priv, priv->rf_mode);
+ if (ret != 0) {
+ printk(KERN_ERR
+ "xc5000: xc_set_signal_source(%d) failed\n",
+ priv->rf_mode);
+ return -EREMOTEIO;
+ }
+
+ ret = xc_set_tv_standard(priv,
+ xc5000_standard[priv->video_standard].video_mode,
+ xc5000_standard[priv->video_standard].audio_mode, 0);
+ if (ret != 0) {
+ printk(KERN_ERR "xc5000: xc_set_tv_standard failed\n");
+ return -EREMOTEIO;
+ }
+
+ ret = xc_set_IF_frequency(priv, priv->if_khz);
+ if (ret != 0) {
+ printk(KERN_ERR "xc5000: xc_Set_IF_frequency(%d) failed\n",
+ priv->if_khz);
+ return -EIO;
+ }
+
+ xc_write_reg(priv, XREG_OUTPUT_AMP, 0x8a);
+
+ xc_tune_channel(priv, priv->freq_hz, XC_TUNE_DIGITAL);
+
+ if (debug)
+ xc_debug_dump(priv);
+
+ priv->bandwidth = bw;
+
+ return 0;
+}
+
+static int xc5000_set_digital_params(struct dvb_frontend *fe)
{
- int ret, b;
+ int b;
struct xc5000_priv *priv = fe->tuner_priv;
u32 bw = fe->dtv_property_cache.bandwidth_hz;
u32 freq = fe->dtv_property_cache.frequency;
@@ -794,43 +838,12 @@ static int xc5000_set_params(struct dvb_frontend *fe)
}
priv->freq_hz = freq - priv->freq_offset;
+ priv->mode = V4L2_TUNER_DIGITAL_TV;
dprintk(1, "%s() frequency=%d (compensated to %d)\n",
__func__, freq, priv->freq_hz);
- ret = xc_set_signal_source(priv, priv->rf_mode);
- if (ret != 0) {
- printk(KERN_ERR
- "xc5000: xc_set_signal_source(%d) failed\n",
- priv->rf_mode);
- return -EREMOTEIO;
- }
-
- ret = xc_set_tv_standard(priv,
- xc5000_standard[priv->video_standard].video_mode,
- xc5000_standard[priv->video_standard].audio_mode, 0);
- if (ret != 0) {
- printk(KERN_ERR "xc5000: xc_set_tv_standard failed\n");
- return -EREMOTEIO;
- }
-
- ret = xc_set_IF_frequency(priv, priv->if_khz);
- if (ret != 0) {
- printk(KERN_ERR "xc5000: xc_Set_IF_frequency(%d) failed\n",
- priv->if_khz);
- return -EIO;
- }
-
- xc_write_reg(priv, XREG_OUTPUT_AMP, 0x8a);
-
- xc_tune_channel(priv, priv->freq_hz, XC_TUNE_DIGITAL);
-
- if (debug)
- xc_debug_dump(priv);
-
- priv->bandwidth = bw;
-
- return 0;
+ return xc5000_tune_digital(fe);
}
static int xc5000_is_firmware_loaded(struct dvb_frontend *fe)
@@ -852,12 +865,10 @@ static int xc5000_is_firmware_loaded(struct dvb_frontend *fe)
return ret;
}
-static int xc5000_set_tv_freq(struct dvb_frontend *fe,
- struct analog_parameters *params)
+static void xc5000_config_tv(struct dvb_frontend *fe,
+ struct analog_parameters *params)
{
struct xc5000_priv *priv = fe->tuner_priv;
- u16 pll_lock_status;
- int ret;
dprintk(1, "%s() frequency=%d (in units of 62.5khz)\n",
__func__, params->frequency);
@@ -876,42 +887,49 @@ static int xc5000_set_tv_freq(struct dvb_frontend *fe,
if (params->std & V4L2_STD_MN) {
/* default to BTSC audio standard */
priv->video_standard = MN_NTSC_PAL_BTSC;
- goto tune_channel;
+ return;
}
if (params->std & V4L2_STD_PAL_BG) {
/* default to NICAM audio standard */
priv->video_standard = BG_PAL_NICAM;
- goto tune_channel;
+ return;
}
if (params->std & V4L2_STD_PAL_I) {
/* default to NICAM audio standard */
priv->video_standard = I_PAL_NICAM;
- goto tune_channel;
+ return;
}
if (params->std & V4L2_STD_PAL_DK) {
/* default to NICAM audio standard */
priv->video_standard = DK_PAL_NICAM;
- goto tune_channel;
+ return;
}
if (params->std & V4L2_STD_SECAM_DK) {
/* default to A2 DK1 audio standard */
priv->video_standard = DK_SECAM_A2DK1;
- goto tune_channel;
+ return;
}
if (params->std & V4L2_STD_SECAM_L) {
priv->video_standard = L_SECAM_NICAM;
- goto tune_channel;
+ return;
}
if (params->std & V4L2_STD_SECAM_LC) {
priv->video_standard = LC_SECAM_NICAM;
- goto tune_channel;
+ return;
}
+}
+
+static int xc5000_set_tv_freq(struct dvb_frontend *fe)
+{
+ struct xc5000_priv *priv = fe->tuner_priv;
+ u16 pll_lock_status;
+ int ret;
tune_channel:
ret = xc_set_signal_source(priv, priv->rf_mode);
@@ -955,12 +973,11 @@ tune_channel:
return 0;
}
-static int xc5000_set_radio_freq(struct dvb_frontend *fe,
- struct analog_parameters *params)
+static int xc5000_config_radio(struct dvb_frontend *fe,
+ struct analog_parameters *params)
+
{
struct xc5000_priv *priv = fe->tuner_priv;
- int ret = -EINVAL;
- u8 radio_input;
dprintk(1, "%s() frequency=%d (in units of khz)\n",
__func__, params->frequency);
@@ -970,6 +987,18 @@ static int xc5000_set_radio_freq(struct dvb_frontend *fe,
return -EINVAL;
}
+ priv->freq_hz = params->frequency * 125 / 2;
+ priv->rf_mode = XC_RF_MODE_AIR;
+
+ return 0;
+}
+
+static int xc5000_set_radio_freq(struct dvb_frontend *fe)
+{
+ struct xc5000_priv *priv = fe->tuner_priv;
+ int ret;
+ u8 radio_input;
+
if (priv->radio_input == XC5000_RADIO_FM1)
radio_input = FM_RADIO_INPUT1;
else if (priv->radio_input == XC5000_RADIO_FM2)
@@ -982,10 +1011,6 @@ static int xc5000_set_radio_freq(struct dvb_frontend *fe,
return -EINVAL;
}
- priv->freq_hz = params->frequency * 125 / 2;
-
- priv->rf_mode = XC_RF_MODE_AIR;
-
ret = xc_set_tv_standard(priv, xc5000_standard[radio_input].video_mode,
xc5000_standard[radio_input].audio_mode, radio_input);
@@ -1013,34 +1038,53 @@ static int xc5000_set_radio_freq(struct dvb_frontend *fe,
return 0;
}
-static int xc5000_set_analog_params(struct dvb_frontend *fe,
- struct analog_parameters *params)
+static int xc5000_set_params(struct dvb_frontend *fe)
{
struct xc5000_priv *priv = fe->tuner_priv;
- int ret = -EINVAL;
-
- if (priv->i2c_props.adap == NULL)
- return -EINVAL;
if (xc_load_fw_and_init_tuner(fe, 0) != 0) {
dprintk(1, "Unable to load firmware and init tuner\n");
return -EINVAL;
}
+ switch (priv->mode) {
+ case V4L2_TUNER_RADIO:
+ return xc5000_set_radio_freq(fe);
+ case V4L2_TUNER_ANALOG_TV:
+ return xc5000_set_tv_freq(fe);
+ case V4L2_TUNER_DIGITAL_TV:
+ return xc5000_tune_digital(fe);
+ }
+
+ return 0;
+}
+
+static int xc5000_set_analog_params(struct dvb_frontend *fe,
+ struct analog_parameters *params)
+{
+ struct xc5000_priv *priv = fe->tuner_priv;
+ int ret;
+
+ if (priv->i2c_props.adap == NULL)
+ return -EINVAL;
+
switch (params->mode) {
case V4L2_TUNER_RADIO:
- ret = xc5000_set_radio_freq(fe, params);
+ ret = xc5000_config_radio(fe, params);
+ if (ret)
+ return ret;
break;
case V4L2_TUNER_ANALOG_TV:
- case V4L2_TUNER_DIGITAL_TV:
- ret = xc5000_set_tv_freq(fe, params);
+ xc5000_config_tv(fe, params);
+ break;
+ default:
break;
}
+ priv->mode = params->mode;
- return ret;
+ return xc5000_set_params(fe);
}
-
static int xc5000_get_frequency(struct dvb_frontend *fe, u32 *freq)
{
struct xc5000_priv *priv = fe->tuner_priv;
@@ -1094,20 +1138,23 @@ static int xc_load_fw_and_init_tuner(struct dvb_frontend *fe, int force)
if (!force && xc5000_is_firmware_loaded(fe) == 0)
return 0;
- ret = request_firmware(&fw, desired_fw->name,
- priv->i2c_props.adap->dev.parent);
- if (ret) {
- printk(KERN_ERR "xc5000: Upload failed. (file not found?)\n");
- return ret;
- }
-
- dprintk(1, "firmware read %Zu bytes.\n", fw->size);
+ if (!priv->firmware) {
+ ret = request_firmware(&fw, desired_fw->name,
+ priv->i2c_props.adap->dev.parent);
+ if (ret) {
+ pr_err("xc5000: Upload failed. rc %d\n", ret);
+ return ret;
+ }
+ dprintk(1, "firmware read %Zu bytes.\n", fw->size);
- if (fw->size != desired_fw->size) {
- printk(KERN_ERR "xc5000: Firmware file with incorrect size\n");
- ret = -EINVAL;
- goto err;
- }
+ if (fw->size != desired_fw->size) {
+ pr_err("xc5000: Firmware file with incorrect size\n");
+ release_firmware(fw);
+ return -EINVAL;
+ }
+ priv->firmware = fw;
+ } else
+ fw = priv->firmware;
/* Try up to 5 times to load firmware */
for (i = 0; i < 5; i++) {
@@ -1190,7 +1237,6 @@ err:
else
printk(KERN_CONT " - too many retries. Giving up\n");
- release_firmware(fw);
return ret;
}
@@ -1229,6 +1275,38 @@ static int xc5000_sleep(struct dvb_frontend *fe)
return 0;
}
+static int xc5000_suspend(struct dvb_frontend *fe)
+{
+ struct xc5000_priv *priv = fe->tuner_priv;
+ int ret;
+
+ dprintk(1, "%s()\n", __func__);
+
+ cancel_delayed_work(&priv->timer_sleep);
+
+ ret = xc5000_tuner_reset(fe);
+ if (ret != 0)
+ printk(KERN_ERR
+ "xc5000: %s() unable to shutdown tuner\n",
+ __func__);
+
+ return 0;
+}
+
+static int xc5000_resume(struct dvb_frontend *fe)
+{
+ struct xc5000_priv *priv = fe->tuner_priv;
+
+ dprintk(1, "%s()\n", __func__);
+
+ /* suspended before firmware is loaded.
+ Avoid firmware load in resume path. */
+ if (!priv->firmware)
+ return 0;
+
+ return xc5000_set_params(fe);
+}
+
static int xc5000_init(struct dvb_frontend *fe)
{
struct xc5000_priv *priv = fe->tuner_priv;
@@ -1256,6 +1334,8 @@ static int xc5000_release(struct dvb_frontend *fe)
if (priv) {
cancel_delayed_work(&priv->timer_sleep);
hybrid_tuner_release_state(priv);
+ if (priv->firmware)
+ release_firmware(priv->firmware);
}
mutex_unlock(&xc5000_list_mutex);
@@ -1293,9 +1373,11 @@ static const struct dvb_tuner_ops xc5000_tuner_ops = {
.release = xc5000_release,
.init = xc5000_init,
.sleep = xc5000_sleep,
+ .suspend = xc5000_suspend,
+ .resume = xc5000_resume,
.set_config = xc5000_set_config,
- .set_params = xc5000_set_params,
+ .set_params = xc5000_set_digital_params,
.set_analog_params = xc5000_set_analog_params,
.get_frequency = xc5000_get_frequency,
.get_if_frequency = xc5000_get_if_frequency,
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index 94d51e092db3..056181f2f569 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -46,6 +46,7 @@ source "drivers/media/usb/ttusb-budget/Kconfig"
source "drivers/media/usb/ttusb-dec/Kconfig"
source "drivers/media/usb/siano/Kconfig"
source "drivers/media/usb/b2c2/Kconfig"
+source "drivers/media/usb/as102/Kconfig"
endif
if (MEDIA_CAMERA_SUPPORT || MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT)
@@ -55,8 +56,9 @@ endif
if MEDIA_SDR_SUPPORT
comment "Software defined radio USB devices"
-source "drivers/media/usb/msi2500/Kconfig"
source "drivers/media/usb/airspy/Kconfig"
+source "drivers/media/usb/hackrf/Kconfig"
+source "drivers/media/usb/msi2500/Kconfig"
endif
endif #MEDIA_USB_SUPPORT
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index f438efffefc5..6f2eb7c8416c 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -9,8 +9,9 @@ obj-y += zr364xx/ stkwebcam/ s2255/
obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
obj-$(CONFIG_USB_GSPCA) += gspca/
obj-$(CONFIG_USB_PWC) += pwc/
-obj-$(CONFIG_USB_MSI2500) += msi2500/
obj-$(CONFIG_USB_AIRSPY) += airspy/
+obj-$(CONFIG_USB_HACKRF) += hackrf/
+obj-$(CONFIG_USB_MSI2500) += msi2500/
obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
obj-$(CONFIG_VIDEO_AU0828) += au0828/
obj-$(CONFIG_VIDEO_HDPVR) += hdpvr/
@@ -23,3 +24,4 @@ obj-$(CONFIG_VIDEO_TM6000) += tm6000/
obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
obj-$(CONFIG_VIDEO_USBTV) += usbtv/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
+obj-$(CONFIG_DVB_AS102) += as102/
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index cb0e515d80ae..4069234abed5 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -107,6 +107,7 @@ struct airspy {
#define USB_STATE_URB_BUF (1 << 3)
unsigned long flags;
+ struct device *dev;
struct usb_device *udev;
struct video_device vdev;
struct v4l2_device v4l2_dev;
@@ -154,16 +155,15 @@ struct airspy {
unsigned int sample_measured;
};
-#define airspy_dbg_usb_control_msg(_udev, _r, _t, _v, _i, _b, _l) { \
+#define airspy_dbg_usb_control_msg(_dev, _r, _t, _v, _i, _b, _l) { \
char *_direction; \
if (_t & USB_DIR_IN) \
_direction = "<<<"; \
else \
_direction = ">>>"; \
- dev_dbg(&_udev->dev, "%s: %02x %02x %02x %02x %02x %02x %02x %02x " \
- "%s %*ph\n", __func__, _t, _r, _v & 0xff, _v >> 8, \
- _i & 0xff, _i >> 8, _l & 0xff, _l >> 8, _direction, \
- _l, _b); \
+ dev_dbg(_dev, "%02x %02x %02x %02x %02x %02x %02x %02x %s %*ph\n", \
+ _t, _r, _v & 0xff, _v >> 8, _i & 0xff, _i >> 8, \
+ _l & 0xff, _l >> 8, _direction, _l, _b); \
}
/* execute firmware command */
@@ -192,7 +192,7 @@ static int airspy_ctrl_msg(struct airspy *s, u8 request, u16 value, u16 index,
requesttype = (USB_TYPE_VENDOR | USB_DIR_IN);
break;
default:
- dev_err(&s->udev->dev, "Unknown command %02x\n", request);
+ dev_err(s->dev, "Unknown command %02x\n", request);
ret = -EINVAL;
goto err;
}
@@ -203,11 +203,10 @@ static int airspy_ctrl_msg(struct airspy *s, u8 request, u16 value, u16 index,
ret = usb_control_msg(s->udev, pipe, request, requesttype, value,
index, s->buf, size, 1000);
- airspy_dbg_usb_control_msg(s->udev, request, requesttype, value,
+ airspy_dbg_usb_control_msg(s->dev, request, requesttype, value,
index, s->buf, size);
if (ret < 0) {
- dev_err(&s->udev->dev,
- "usb_control_msg() failed %d request %02x\n",
+ dev_err(s->dev, "usb_control_msg() failed %d request %02x\n",
ret, request);
goto err;
}
@@ -224,7 +223,7 @@ err:
/* Private functions */
static struct airspy_frame_buf *airspy_get_next_fill_buf(struct airspy *s)
{
- unsigned long flags = 0;
+ unsigned long flags;
struct airspy_frame_buf *buf = NULL;
spin_lock_irqsave(&s->queued_bufs_lock, flags);
@@ -251,16 +250,18 @@ static unsigned int airspy_convert_stream(struct airspy *s,
dst_len = 0;
}
- /* calculate samping rate and output it in 10 seconds intervals */
+ /* calculate sample rate and output it in 10 seconds intervals */
if (unlikely(time_is_before_jiffies(s->jiffies_next))) {
#define MSECS 10000UL
+ unsigned int msecs = jiffies_to_msecs(jiffies -
+ s->jiffies_next + msecs_to_jiffies(MSECS));
unsigned int samples = s->sample - s->sample_measured;
+
s->jiffies_next = jiffies + msecs_to_jiffies(MSECS);
s->sample_measured = s->sample;
- dev_dbg(&s->udev->dev,
- "slen=%d samples=%u msecs=%lu sample rate=%lu\n",
- src_len, samples, MSECS,
- samples * 1000UL / MSECS);
+ dev_dbg(s->dev, "slen=%u samples=%u msecs=%u sample rate=%lu\n",
+ src_len, samples, msecs,
+ samples * 1000UL / msecs);
}
/* total number of samples */
@@ -278,9 +279,8 @@ static void airspy_urb_complete(struct urb *urb)
struct airspy *s = urb->context;
struct airspy_frame_buf *fbuf;
- dev_dbg_ratelimited(&s->udev->dev,
- "%s: status=%d length=%d/%d errors=%d\n",
- __func__, urb->status, urb->actual_length,
+ dev_dbg_ratelimited(s->dev, "status=%d length=%d/%d errors=%d\n",
+ urb->status, urb->actual_length,
urb->transfer_buffer_length, urb->error_count);
switch (urb->status) {
@@ -292,8 +292,7 @@ static void airspy_urb_complete(struct urb *urb)
case -ESHUTDOWN:
return;
default: /* error */
- dev_err_ratelimited(&s->udev->dev, "URB failed %d\n",
- urb->status);
+ dev_err_ratelimited(s->dev, "URB failed %d\n", urb->status);
break;
}
@@ -304,7 +303,7 @@ static void airspy_urb_complete(struct urb *urb)
fbuf = airspy_get_next_fill_buf(s);
if (unlikely(fbuf == NULL)) {
s->vb_full++;
- dev_notice_ratelimited(&s->udev->dev,
+ dev_notice_ratelimited(s->dev,
"videobuf is full, %d packets dropped\n",
s->vb_full);
goto skip;
@@ -328,7 +327,7 @@ static int airspy_kill_urbs(struct airspy *s)
int i;
for (i = s->urbs_submitted - 1; i >= 0; i--) {
- dev_dbg(&s->udev->dev, "%s: kill urb=%d\n", __func__, i);
+ dev_dbg(s->dev, "kill urb=%d\n", i);
/* stop the URB */
usb_kill_urb(s->urb_list[i]);
}
@@ -342,11 +341,10 @@ static int airspy_submit_urbs(struct airspy *s)
int i, ret;
for (i = 0; i < s->urbs_initialized; i++) {
- dev_dbg(&s->udev->dev, "%s: submit urb=%d\n", __func__, i);
+ dev_dbg(s->dev, "submit urb=%d\n", i);
ret = usb_submit_urb(s->urb_list[i], GFP_ATOMIC);
if (ret) {
- dev_err(&s->udev->dev,
- "Could not submit URB no. %d - get them all back\n",
+ dev_err(s->dev, "Could not submit URB no. %d - get them all back\n",
i);
airspy_kill_urbs(s);
return ret;
@@ -362,8 +360,7 @@ static int airspy_free_stream_bufs(struct airspy *s)
if (s->flags & USB_STATE_URB_BUF) {
while (s->buf_num) {
s->buf_num--;
- dev_dbg(&s->udev->dev, "%s: free buf=%d\n",
- __func__, s->buf_num);
+ dev_dbg(s->dev, "free buf=%d\n", s->buf_num);
usb_free_coherent(s->udev, s->buf_size,
s->buf_list[s->buf_num],
s->dma_addr[s->buf_num]);
@@ -379,23 +376,20 @@ static int airspy_alloc_stream_bufs(struct airspy *s)
s->buf_num = 0;
s->buf_size = BULK_BUFFER_SIZE;
- dev_dbg(&s->udev->dev,
- "%s: all in all I will use %u bytes for streaming\n",
- __func__, MAX_BULK_BUFS * BULK_BUFFER_SIZE);
+ dev_dbg(s->dev, "all in all I will use %u bytes for streaming\n",
+ MAX_BULK_BUFS * BULK_BUFFER_SIZE);
for (s->buf_num = 0; s->buf_num < MAX_BULK_BUFS; s->buf_num++) {
s->buf_list[s->buf_num] = usb_alloc_coherent(s->udev,
BULK_BUFFER_SIZE, GFP_ATOMIC,
&s->dma_addr[s->buf_num]);
if (!s->buf_list[s->buf_num]) {
- dev_dbg(&s->udev->dev, "%s: alloc buf=%d failed\n",
- __func__, s->buf_num);
+ dev_dbg(s->dev, "alloc buf=%d failed\n", s->buf_num);
airspy_free_stream_bufs(s);
return -ENOMEM;
}
- dev_dbg(&s->udev->dev, "%s: alloc buf=%d %p (dma %llu)\n",
- __func__, s->buf_num,
+ dev_dbg(s->dev, "alloc buf=%d %p (dma %llu)\n", s->buf_num,
s->buf_list[s->buf_num],
(long long)s->dma_addr[s->buf_num]);
s->flags |= USB_STATE_URB_BUF;
@@ -412,8 +406,7 @@ static int airspy_free_urbs(struct airspy *s)
for (i = s->urbs_initialized - 1; i >= 0; i--) {
if (s->urb_list[i]) {
- dev_dbg(&s->udev->dev, "%s: free urb=%d\n",
- __func__, i);
+ dev_dbg(s->dev, "free urb=%d\n", i);
/* free the URBs */
usb_free_urb(s->urb_list[i]);
}
@@ -429,10 +422,10 @@ static int airspy_alloc_urbs(struct airspy *s)
/* allocate the URBs */
for (i = 0; i < MAX_BULK_BUFS; i++) {
- dev_dbg(&s->udev->dev, "%s: alloc urb=%d\n", __func__, i);
+ dev_dbg(s->dev, "alloc urb=%d\n", i);
s->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
if (!s->urb_list[i]) {
- dev_dbg(&s->udev->dev, "%s: failed\n", __func__);
+ dev_dbg(s->dev, "failed\n");
for (j = 0; j < i; j++)
usb_free_urb(s->urb_list[j]);
return -ENOMEM;
@@ -455,13 +448,14 @@ static int airspy_alloc_urbs(struct airspy *s)
/* Must be called with vb_queue_lock hold */
static void airspy_cleanup_queued_bufs(struct airspy *s)
{
- unsigned long flags = 0;
+ unsigned long flags;
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(s->dev, "\n");
spin_lock_irqsave(&s->queued_bufs_lock, flags);
while (!list_empty(&s->queued_bufs)) {
struct airspy_frame_buf *buf;
+
buf = list_entry(s->queued_bufs.next,
struct airspy_frame_buf, list);
list_del(&buf->list);
@@ -476,7 +470,7 @@ static void airspy_disconnect(struct usb_interface *intf)
struct v4l2_device *v = usb_get_intfdata(intf);
struct airspy *s = container_of(v, struct airspy, v4l2_dev);
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(s->dev, "\n");
mutex_lock(&s->vb_queue_lock);
mutex_lock(&s->v4l2_lock);
@@ -497,7 +491,7 @@ static int airspy_queue_setup(struct vb2_queue *vq,
{
struct airspy *s = vb2_get_drv_priv(vq);
- dev_dbg(&s->udev->dev, "%s: *nbuffers=%d\n", __func__, *nbuffers);
+ dev_dbg(s->dev, "nbuffers=%d\n", *nbuffers);
/* Need at least 8 buffers */
if (vq->num_buffers + *nbuffers < 8)
@@ -505,8 +499,7 @@ static int airspy_queue_setup(struct vb2_queue *vq,
*nplanes = 1;
sizes[0] = PAGE_ALIGN(s->buffersize);
- dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n",
- __func__, *nbuffers, sizes[0]);
+ dev_dbg(s->dev, "nbuffers=%d sizes[0]=%d\n", *nbuffers, sizes[0]);
return 0;
}
@@ -515,7 +508,7 @@ static void airspy_buf_queue(struct vb2_buffer *vb)
struct airspy *s = vb2_get_drv_priv(vb->vb2_queue);
struct airspy_frame_buf *buf =
container_of(vb, struct airspy_frame_buf, vb);
- unsigned long flags = 0;
+ unsigned long flags;
/* Check the device has not disconnected between prep and queuing */
if (unlikely(!s->udev)) {
@@ -533,34 +526,56 @@ static int airspy_start_streaming(struct vb2_queue *vq, unsigned int count)
struct airspy *s = vb2_get_drv_priv(vq);
int ret;
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(s->dev, "\n");
if (!s->udev)
return -ENODEV;
mutex_lock(&s->v4l2_lock);
- set_bit(POWER_ON, &s->flags);
-
s->sequence = 0;
+ set_bit(POWER_ON, &s->flags);
+
ret = airspy_alloc_stream_bufs(s);
if (ret)
- goto err;
+ goto err_clear_bit;
ret = airspy_alloc_urbs(s);
if (ret)
- goto err;
+ goto err_free_stream_bufs;
ret = airspy_submit_urbs(s);
if (ret)
- goto err;
+ goto err_free_urbs;
/* start hardware streaming */
ret = airspy_ctrl_msg(s, CMD_RECEIVER_MODE, 1, 0, NULL, 0);
if (ret)
- goto err;
-err:
+ goto err_kill_urbs;
+
+ goto exit_mutex_unlock;
+
+err_kill_urbs:
+ airspy_kill_urbs(s);
+err_free_urbs:
+ airspy_free_urbs(s);
+err_free_stream_bufs:
+ airspy_free_stream_bufs(s);
+err_clear_bit:
+ clear_bit(POWER_ON, &s->flags);
+
+ /* return all queued buffers to vb2 */
+ {
+ struct airspy_frame_buf *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &s->queued_bufs, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+ }
+
+exit_mutex_unlock:
mutex_unlock(&s->v4l2_lock);
return ret;
@@ -570,7 +585,7 @@ static void airspy_stop_streaming(struct vb2_queue *vq)
{
struct airspy *s = vb2_get_drv_priv(vq);
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(s->dev, "\n");
mutex_lock(&s->v4l2_lock);
@@ -602,8 +617,6 @@ static int airspy_querycap(struct file *file, void *fh,
{
struct airspy *s = video_drvdata(file);
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
-
strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
strlcpy(cap->card, s->vdev.name, sizeof(cap->card));
usb_make_path(s->udev, cap->bus_info, sizeof(cap->bus_info));
@@ -617,10 +630,6 @@ static int airspy_querycap(struct file *file, void *fh,
static int airspy_enum_fmt_sdr_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct airspy *s = video_drvdata(file);
-
- dev_dbg(&s->udev->dev, "%s: index=%d\n", __func__, f->index);
-
if (f->index >= NUM_FORMATS)
return -EINVAL;
@@ -635,9 +644,6 @@ static int airspy_g_fmt_sdr_cap(struct file *file, void *priv,
{
struct airspy *s = video_drvdata(file);
- dev_dbg(&s->udev->dev, "%s: pixelformat fourcc %4.4s\n", __func__,
- (char *)&s->pixelformat);
-
f->fmt.sdr.pixelformat = s->pixelformat;
f->fmt.sdr.buffersize = s->buffersize;
memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
@@ -652,9 +658,6 @@ static int airspy_s_fmt_sdr_cap(struct file *file, void *priv,
struct vb2_queue *q = &s->vb_queue;
int i;
- dev_dbg(&s->udev->dev, "%s: pixelformat fourcc %4.4s\n", __func__,
- (char *)&f->fmt.sdr.pixelformat);
-
if (vb2_is_busy(q))
return -EBUSY;
@@ -679,12 +682,8 @@ static int airspy_s_fmt_sdr_cap(struct file *file, void *priv,
static int airspy_try_fmt_sdr_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct airspy *s = video_drvdata(file);
int i;
- dev_dbg(&s->udev->dev, "%s: pixelformat fourcc %4.4s\n", __func__,
- (char *)&f->fmt.sdr.pixelformat);
-
memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
for (i = 0; i < NUM_FORMATS; i++) {
if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
@@ -702,11 +701,8 @@ static int airspy_try_fmt_sdr_cap(struct file *file, void *priv,
static int airspy_s_tuner(struct file *file, void *priv,
const struct v4l2_tuner *v)
{
- struct airspy *s = video_drvdata(file);
int ret;
- dev_dbg(&s->udev->dev, "%s: index=%d\n", __func__, v->index);
-
if (v->index == 0)
ret = 0;
else if (v->index == 1)
@@ -719,11 +715,8 @@ static int airspy_s_tuner(struct file *file, void *priv,
static int airspy_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v)
{
- struct airspy *s = video_drvdata(file);
int ret;
- dev_dbg(&s->udev->dev, "%s: index=%d\n", __func__, v->index);
-
if (v->index == 0) {
strlcpy(v->name, "AirSpy ADC", sizeof(v->name));
v->type = V4L2_TUNER_ADC;
@@ -749,17 +742,18 @@ static int airspy_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct airspy *s = video_drvdata(file);
- int ret = 0;
- dev_dbg(&s->udev->dev, "%s: tuner=%d type=%d\n",
- __func__, f->tuner, f->type);
+ int ret;
if (f->tuner == 0) {
f->type = V4L2_TUNER_ADC;
f->frequency = s->f_adc;
+ dev_dbg(s->dev, "ADC frequency=%u Hz\n", s->f_adc);
ret = 0;
} else if (f->tuner == 1) {
f->type = V4L2_TUNER_RF;
f->frequency = s->f_rf;
+ dev_dbg(s->dev, "RF frequency=%u Hz\n", s->f_rf);
+ ret = 0;
} else {
ret = -EINVAL;
}
@@ -774,22 +768,17 @@ static int airspy_s_frequency(struct file *file, void *priv,
int ret;
u8 buf[4];
- dev_dbg(&s->udev->dev, "%s: tuner=%d type=%d frequency=%u\n",
- __func__, f->tuner, f->type, f->frequency);
-
if (f->tuner == 0) {
s->f_adc = clamp_t(unsigned int, f->frequency,
bands[0].rangelow,
bands[0].rangehigh);
- dev_dbg(&s->udev->dev, "%s: ADC frequency=%u Hz\n",
- __func__, s->f_adc);
+ dev_dbg(s->dev, "ADC frequency=%u Hz\n", s->f_adc);
ret = 0;
} else if (f->tuner == 1) {
s->f_rf = clamp_t(unsigned int, f->frequency,
bands_rf[0].rangelow,
bands_rf[0].rangehigh);
- dev_dbg(&s->udev->dev, "%s: RF frequency=%u Hz\n",
- __func__, s->f_rf);
+ dev_dbg(s->dev, "RF frequency=%u Hz\n", s->f_rf);
buf[0] = (s->f_rf >> 0) & 0xff;
buf[1] = (s->f_rf >> 8) & 0xff;
buf[2] = (s->f_rf >> 16) & 0xff;
@@ -805,10 +794,7 @@ static int airspy_s_frequency(struct file *file, void *priv,
static int airspy_enum_freq_bands(struct file *file, void *priv,
struct v4l2_frequency_band *band)
{
- struct airspy *s = video_drvdata(file);
int ret;
- dev_dbg(&s->udev->dev, "%s: tuner=%d type=%d index=%d\n",
- __func__, band->tuner, band->type, band->index);
if (band->tuner == 0) {
if (band->index >= ARRAY_SIZE(bands)) {
@@ -892,10 +878,9 @@ static int airspy_set_lna_gain(struct airspy *s)
int ret;
u8 u8tmp;
- dev_dbg(&s->udev->dev, "%s: lna auto=%d->%d val=%d->%d\n",
- __func__, s->lna_gain_auto->cur.val,
- s->lna_gain_auto->val, s->lna_gain->cur.val,
- s->lna_gain->val);
+ dev_dbg(s->dev, "lna auto=%d->%d val=%d->%d\n",
+ s->lna_gain_auto->cur.val, s->lna_gain_auto->val,
+ s->lna_gain->cur.val, s->lna_gain->val);
ret = airspy_ctrl_msg(s, CMD_SET_LNA_AGC, 0, s->lna_gain_auto->val,
&u8tmp, 1);
@@ -910,7 +895,7 @@ static int airspy_set_lna_gain(struct airspy *s)
}
err:
if (ret)
- dev_dbg(&s->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(s->dev, "failed=%d\n", ret);
return ret;
}
@@ -920,10 +905,9 @@ static int airspy_set_mixer_gain(struct airspy *s)
int ret;
u8 u8tmp;
- dev_dbg(&s->udev->dev, "%s: mixer auto=%d->%d val=%d->%d\n",
- __func__, s->mixer_gain_auto->cur.val,
- s->mixer_gain_auto->val, s->mixer_gain->cur.val,
- s->mixer_gain->val);
+ dev_dbg(s->dev, "mixer auto=%d->%d val=%d->%d\n",
+ s->mixer_gain_auto->cur.val, s->mixer_gain_auto->val,
+ s->mixer_gain->cur.val, s->mixer_gain->val);
ret = airspy_ctrl_msg(s, CMD_SET_MIXER_AGC, 0, s->mixer_gain_auto->val,
&u8tmp, 1);
@@ -938,7 +922,7 @@ static int airspy_set_mixer_gain(struct airspy *s)
}
err:
if (ret)
- dev_dbg(&s->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(s->dev, "failed=%d\n", ret);
return ret;
}
@@ -948,8 +932,7 @@ static int airspy_set_if_gain(struct airspy *s)
int ret;
u8 u8tmp;
- dev_dbg(&s->udev->dev, "%s: val=%d->%d\n",
- __func__, s->if_gain->cur.val, s->if_gain->val);
+ dev_dbg(s->dev, "val=%d->%d\n", s->if_gain->cur.val, s->if_gain->val);
ret = airspy_ctrl_msg(s, CMD_SET_VGA_GAIN, 0, s->if_gain->val,
&u8tmp, 1);
@@ -957,7 +940,7 @@ static int airspy_set_if_gain(struct airspy *s)
goto err;
err:
if (ret)
- dev_dbg(&s->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(s->dev, "failed=%d\n", ret);
return ret;
}
@@ -980,8 +963,8 @@ static int airspy_s_ctrl(struct v4l2_ctrl *ctrl)
ret = airspy_set_if_gain(s);
break;
default:
- dev_dbg(&s->udev->dev, "%s: unknown ctrl: id=%d name=%s\n",
- __func__, ctrl->id, ctrl->name);
+ dev_dbg(s->dev, "unknown ctrl: id=%d name=%s\n",
+ ctrl->id, ctrl->name);
ret = -EINVAL;
}
@@ -995,15 +978,13 @@ static const struct v4l2_ctrl_ops airspy_ctrl_ops = {
static int airspy_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct usb_device *udev = interface_to_usbdev(intf);
- struct airspy *s = NULL;
+ struct airspy *s;
int ret;
u8 u8tmp, buf[BUF_SIZE];
s = kzalloc(sizeof(struct airspy), GFP_KERNEL);
if (s == NULL) {
- dev_err(&udev->dev,
- "Could not allocate memory for airspy state\n");
+ dev_err(&intf->dev, "Could not allocate memory for state\n");
return -ENOMEM;
}
@@ -1011,7 +992,8 @@ static int airspy_probe(struct usb_interface *intf,
mutex_init(&s->vb_queue_lock);
spin_lock_init(&s->queued_bufs_lock);
INIT_LIST_HEAD(&s->queued_bufs);
- s->udev = udev;
+ s->dev = &intf->dev;
+ s->udev = interface_to_usbdev(intf);
s->f_adc = bands[0].rangelow;
s->f_rf = bands_rf[0].rangelow;
s->pixelformat = formats[0].pixelformat;
@@ -1023,14 +1005,14 @@ static int airspy_probe(struct usb_interface *intf,
ret = airspy_ctrl_msg(s, CMD_VERSION_STRING_READ, 0, 0,
buf, BUF_SIZE);
if (ret) {
- dev_err(&s->udev->dev, "Could not detect board\n");
+ dev_err(s->dev, "Could not detect board\n");
goto err_free_mem;
}
buf[BUF_SIZE - 1] = '\0';
- dev_info(&s->udev->dev, "Board ID: %02x\n", u8tmp);
- dev_info(&s->udev->dev, "Firmware version: %s\n", buf);
+ dev_info(s->dev, "Board ID: %02x\n", u8tmp);
+ dev_info(s->dev, "Firmware version: %s\n", buf);
/* Init videobuf2 queue structure */
s->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
@@ -1042,7 +1024,7 @@ static int airspy_probe(struct usb_interface *intf,
s->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
ret = vb2_queue_init(&s->vb_queue);
if (ret) {
- dev_err(&s->udev->dev, "Could not initialize vb2 queue\n");
+ dev_err(s->dev, "Could not initialize vb2 queue\n");
goto err_free_mem;
}
@@ -1056,8 +1038,7 @@ static int airspy_probe(struct usb_interface *intf,
s->v4l2_dev.release = airspy_video_release;
ret = v4l2_device_register(&intf->dev, &s->v4l2_dev);
if (ret) {
- dev_err(&s->udev->dev,
- "Failed to register v4l2-device (%d)\n", ret);
+ dev_err(s->dev, "Failed to register v4l2-device (%d)\n", ret);
goto err_free_mem;
}
@@ -1077,7 +1058,7 @@ static int airspy_probe(struct usb_interface *intf,
V4L2_CID_RF_TUNER_IF_GAIN, 0, 15, 1, 0);
if (s->hdl.error) {
ret = s->hdl.error;
- dev_err(&s->udev->dev, "Could not initialize controls\n");
+ dev_err(s->dev, "Could not initialize controls\n");
goto err_free_controls;
}
@@ -1089,16 +1070,13 @@ static int airspy_probe(struct usb_interface *intf,
ret = video_register_device(&s->vdev, VFL_TYPE_SDR, -1);
if (ret) {
- dev_err(&s->udev->dev,
- "Failed to register as video device (%d)\n",
+ dev_err(s->dev, "Failed to register as video device (%d)\n",
ret);
goto err_unregister_v4l2_dev;
}
- dev_info(&s->udev->dev, "Registered as %s\n",
+ dev_info(s->dev, "Registered as %s\n",
video_device_node_name(&s->vdev));
- dev_notice(&s->udev->dev,
- "%s: SDR API is still slightly experimental and functionality changes may follow\n",
- KBUILD_MODNAME);
+ dev_notice(s->dev, "SDR API is still slightly experimental and functionality changes may follow\n");
return 0;
err_free_controls:
diff --git a/drivers/staging/media/as102/Kconfig b/drivers/media/usb/as102/Kconfig
index 28aba00dc629..28aba00dc629 100644
--- a/drivers/staging/media/as102/Kconfig
+++ b/drivers/media/usb/as102/Kconfig
diff --git a/drivers/staging/media/as102/Makefile b/drivers/media/usb/as102/Makefile
index 8916d8a909bc..22f43eee4a3b 100644
--- a/drivers/staging/media/as102/Makefile
+++ b/drivers/media/usb/as102/Makefile
@@ -1,6 +1,7 @@
dvb-as102-objs := as102_drv.o as102_fw.o as10x_cmd.o as10x_cmd_stream.o \
- as102_fe.o as102_usb_drv.o as10x_cmd_cfg.o
+ as102_usb_drv.o as10x_cmd_cfg.o
obj-$(CONFIG_DVB_AS102) += dvb-as102.o
ccflags-y += -Idrivers/media/dvb-core
+ccflags-y += -Idrivers/media/dvb-frontends
diff --git a/drivers/staging/media/as102/as102_drv.c b/drivers/media/usb/as102/as102_drv.c
index 09d64cd67502..8be1474b2c36 100644
--- a/drivers/staging/media/as102/as102_drv.c
+++ b/drivers/media/usb/as102/as102_drv.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -28,13 +24,11 @@
/* header file for usb device driver*/
#include "as102_drv.h"
+#include "as10x_cmd.h"
+#include "as102_fe.h"
#include "as102_fw.h"
#include "dvbdev.h"
-int as102_debug;
-module_param_named(debug, as102_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Turn on/off debugging (default: off)");
-
int dual_tuner;
module_param_named(dual_tuner, dual_tuner, int, 0644);
MODULE_PARM_DESC(dual_tuner, "Activate Dual-Tuner config (default: off)");
@@ -74,7 +68,8 @@ static void as102_stop_stream(struct as102_dev_t *dev)
return;
if (as10x_cmd_stop_streaming(bus_adap) < 0)
- dprintk(debug, "as10x_cmd_stop_streaming failed\n");
+ dev_dbg(&dev->bus_adap.usb_dev->dev,
+ "as10x_cmd_stop_streaming failed\n");
mutex_unlock(&dev->bus_adap.lock);
}
@@ -112,14 +107,16 @@ static int as10x_pid_filter(struct as102_dev_t *dev,
int ret = -EFAULT;
if (mutex_lock_interruptible(&dev->bus_adap.lock)) {
- dprintk(debug, "mutex_lock_interruptible(lock) failed !\n");
+ dev_dbg(&dev->bus_adap.usb_dev->dev,
+ "amutex_lock_interruptible(lock) failed !\n");
return -EBUSY;
}
switch (onoff) {
case 0:
ret = as10x_cmd_del_PID_filter(bus_adap, (uint16_t) pid);
- dprintk(debug, "DEL_PID_FILTER([%02d] 0x%04x) ret = %d\n",
+ dev_dbg(&dev->bus_adap.usb_dev->dev,
+ "DEL_PID_FILTER([%02d] 0x%04x) ret = %d\n",
index, pid, ret);
break;
case 1:
@@ -131,7 +128,7 @@ static int as10x_pid_filter(struct as102_dev_t *dev,
filter.pid = pid;
ret = as10x_cmd_add_PID_filter(bus_adap, &filter);
- dprintk(debug,
+ dev_dbg(&dev->bus_adap.usb_dev->dev,
"ADD_PID_FILTER([%02d -> %02d], 0x%04x) ret = %d\n",
index, filter.idx, filter.pid, ret);
break;
@@ -181,6 +178,119 @@ static int as102_dvb_dmx_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
return 0;
}
+static int as102_set_tune(void *priv, struct as10x_tune_args *tune_args)
+{
+ struct as10x_bus_adapter_t *bus_adap = priv;
+ int ret;
+
+ /* Set frontend arguments */
+ if (mutex_lock_interruptible(&bus_adap->lock))
+ return -EBUSY;
+
+ ret = as10x_cmd_set_tune(bus_adap, tune_args);
+ if (ret != 0)
+ dev_dbg(&bus_adap->usb_dev->dev,
+ "as10x_cmd_set_tune failed. (err = %d)\n", ret);
+
+ mutex_unlock(&bus_adap->lock);
+
+ return ret;
+}
+
+static int as102_get_tps(void *priv, struct as10x_tps *tps)
+{
+ struct as10x_bus_adapter_t *bus_adap = priv;
+ int ret;
+
+ if (mutex_lock_interruptible(&bus_adap->lock))
+ return -EBUSY;
+
+ /* send abilis command: GET_TPS */
+ ret = as10x_cmd_get_tps(bus_adap, tps);
+
+ mutex_unlock(&bus_adap->lock);
+
+ return ret;
+}
+
+static int as102_get_status(void *priv, struct as10x_tune_status *tstate)
+{
+ struct as10x_bus_adapter_t *bus_adap = priv;
+ int ret;
+
+ if (mutex_lock_interruptible(&bus_adap->lock))
+ return -EBUSY;
+
+ /* send abilis command: GET_TUNE_STATUS */
+ ret = as10x_cmd_get_tune_status(bus_adap, tstate);
+ if (ret < 0) {
+ dev_dbg(&bus_adap->usb_dev->dev,
+ "as10x_cmd_get_tune_status failed (err = %d)\n",
+ ret);
+ }
+
+ mutex_unlock(&bus_adap->lock);
+
+ return ret;
+}
+
+static int as102_get_stats(void *priv, struct as10x_demod_stats *demod_stats)
+{
+ struct as10x_bus_adapter_t *bus_adap = priv;
+ int ret;
+
+ if (mutex_lock_interruptible(&bus_adap->lock))
+ return -EBUSY;
+
+ /* send abilis command: GET_TUNE_STATUS */
+ ret = as10x_cmd_get_demod_stats(bus_adap, demod_stats);
+ if (ret < 0) {
+ dev_dbg(&bus_adap->usb_dev->dev,
+ "as10x_cmd_get_demod_stats failed (probably not tuned)\n");
+ } else {
+ dev_dbg(&bus_adap->usb_dev->dev,
+ "demod status: fc: 0x%08x, bad fc: 0x%08x, bytes corrected: 0x%08x , MER: 0x%04x\n",
+ demod_stats->frame_count,
+ demod_stats->bad_frame_count,
+ demod_stats->bytes_fixed_by_rs,
+ demod_stats->mer);
+ }
+ mutex_unlock(&bus_adap->lock);
+
+ return ret;
+}
+
+static int as102_stream_ctrl(void *priv, int acquire, uint32_t elna_cfg)
+{
+ struct as10x_bus_adapter_t *bus_adap = priv;
+ int ret;
+
+ if (mutex_lock_interruptible(&bus_adap->lock))
+ return -EBUSY;
+
+ if (acquire) {
+ if (elna_enable)
+ as10x_cmd_set_context(bus_adap,
+ CONTEXT_LNA, elna_cfg);
+
+ ret = as10x_cmd_turn_on(bus_adap);
+ } else {
+ ret = as10x_cmd_turn_off(bus_adap);
+ }
+
+ mutex_unlock(&bus_adap->lock);
+
+ return ret;
+}
+
+static const struct as102_fe_ops as102_fe_ops = {
+ .set_tune = as102_set_tune,
+ .get_tps = as102_get_tps,
+ .get_status = as102_get_status,
+ .get_stats = as102_get_stats,
+ .stream_ctrl = as102_stream_ctrl,
+};
+
int as102_dvb_register(struct as102_dev_t *as102_dev)
{
struct device *dev = &as102_dev->bus_adap.usb_dev->dev;
@@ -221,7 +331,18 @@ int as102_dvb_register(struct as102_dev_t *as102_dev)
goto edmxdinit;
}
- ret = as102_dvb_register_fe(as102_dev, &as102_dev->dvb_fe);
+ /* Attach the frontend */
+ as102_dev->dvb_fe = dvb_attach(as102_attach, as102_dev->name,
+ &as102_fe_ops,
+ &as102_dev->bus_adap,
+ as102_dev->elna_cfg);
+ if (!as102_dev->dvb_fe) {
+ dev_err(dev, "%s: as102_attach() failed: %d",
+ __func__, ret);
+ goto efereg;
+ }
+
+ ret = dvb_register_frontend(&as102_dev->dvb_adap, as102_dev->dvb_fe);
if (ret < 0) {
dev_err(dev, "%s: as102_dvb_register_frontend() failed: %d",
__func__, ret);
@@ -257,7 +378,10 @@ edmxinit:
void as102_dvb_unregister(struct as102_dev_t *as102_dev)
{
/* unregister as102 frontend */
- as102_dvb_unregister_fe(&as102_dev->dvb_fe);
+ dvb_unregister_frontend(as102_dev->dvb_fe);
+
+ /* detach frontend */
+ dvb_frontend_detach(as102_dev->dvb_fe);
/* unregister demux device */
dvb_dmxdev_release(&as102_dev->dvb_dmxdev);
diff --git a/drivers/staging/media/as102/as102_drv.h b/drivers/media/usb/as102/as102_drv.h
index a06837dcc05d..aee2d76e8dfc 100644
--- a/drivers/staging/media/as102/as102_drv.h
+++ b/drivers/media/usb/as102/as102_drv.h
@@ -11,33 +11,25 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#ifndef _AS102_DRV_H
+#define _AS102_DRV_H
#include <linux/usb.h>
#include <dvb_demux.h>
#include <dvb_frontend.h>
#include <dmxdev.h>
+#include "as10x_handle.h"
#include "as10x_cmd.h"
#include "as102_usb_drv.h"
#define DRIVER_FULL_NAME "Abilis Systems as10x usb driver"
#define DRIVER_NAME "as10x_usb"
-extern int as102_debug;
#define debug as102_debug
extern struct usb_driver as102_usb_driver;
extern int elna_enable;
-#define dprintk(debug, args...) \
- do { if (debug) { \
- pr_debug("%s: ", __func__); \
- printk(args); \
- } } while (0)
-
#define AS102_DEVICE_MAJOR 192
#define AS102_USB_BUF_SIZE 512
@@ -71,17 +63,10 @@ struct as102_dev_t {
uint8_t elna_cfg;
struct dvb_adapter dvb_adap;
- struct dvb_frontend dvb_fe;
+ struct dvb_frontend *dvb_fe;
struct dvb_demux dvb_dmx;
struct dmxdev dvb_dmxdev;
- /* demodulator stats */
- struct as10x_demod_stats demod_stats;
- /* signal strength */
- uint16_t signal_strength;
- /* bit error rate */
- uint32_t ber;
-
/* timer handle to trig ts stream download */
struct timer_list timer_handle;
@@ -95,5 +80,4 @@ struct as102_dev_t {
int as102_dvb_register(struct as102_dev_t *dev);
void as102_dvb_unregister(struct as102_dev_t *dev);
-int as102_dvb_register_fe(struct as102_dev_t *dev, struct dvb_frontend *fe);
-int as102_dvb_unregister_fe(struct dvb_frontend *dev);
+#endif
diff --git a/drivers/staging/media/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c
index f33f752c0aad..07d08c49f4d4 100644
--- a/drivers/staging/media/as102/as102_fw.c
+++ b/drivers/media/usb/as102/as102_fw.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/staging/media/as102/as102_fw.h b/drivers/media/usb/as102/as102_fw.h
index 4bfc6849d95a..2732b784216d 100644
--- a/drivers/staging/media/as102/as102_fw.h
+++ b/drivers/media/usb/as102/as102_fw.h
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define MAX_FW_PKT_SIZE 64
diff --git a/drivers/staging/media/as102/as102_usb_drv.c b/drivers/media/usb/as102/as102_usb_drv.c
index e6f6278e97d6..3f669066ccf6 100644
--- a/drivers/staging/media/as102/as102_usb_drv.c
+++ b/drivers/media/usb/as102/as102_usb_drv.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -104,21 +100,22 @@ static int as102_usb_xfer_cmd(struct as10x_bus_adapter_t *bus_adap,
send_buf, send_buf_len,
USB_CTRL_SET_TIMEOUT /* 200 */);
if (ret < 0) {
- dprintk(debug, "usb_control_msg(send) failed, err %i\n",
- ret);
+ dev_dbg(&bus_adap->usb_dev->dev,
+ "usb_control_msg(send) failed, err %i\n", ret);
return ret;
}
if (ret != send_buf_len) {
- dprintk(debug, "only wrote %d of %d bytes\n",
- ret, send_buf_len);
+ dev_dbg(&bus_adap->usb_dev->dev,
+ "only wrote %d of %d bytes\n", ret, send_buf_len);
return -1;
}
}
if (recv_buf != NULL) {
#ifdef TRACE
- dprintk(debug, "want to read: %d bytes\n", recv_buf_len);
+ dev_dbg(bus_adap->usb_dev->dev,
+ "want to read: %d bytes\n", recv_buf_len);
#endif
ret = usb_control_msg(bus_adap->usb_dev,
usb_rcvctrlpipe(bus_adap->usb_dev, 0),
@@ -130,12 +127,13 @@ static int as102_usb_xfer_cmd(struct as10x_bus_adapter_t *bus_adap,
recv_buf, recv_buf_len,
USB_CTRL_GET_TIMEOUT /* 200 */);
if (ret < 0) {
- dprintk(debug, "usb_control_msg(recv) failed, err %i\n",
- ret);
+ dev_dbg(&bus_adap->usb_dev->dev,
+ "usb_control_msg(recv) failed, err %i\n", ret);
return ret;
}
#ifdef TRACE
- dprintk(debug, "read %d bytes\n", recv_buf_len);
+ dev_dbg(bus_adap->usb_dev->dev,
+ "read %d bytes\n", recv_buf_len);
#endif
}
@@ -147,28 +145,29 @@ static int as102_send_ep1(struct as10x_bus_adapter_t *bus_adap,
int send_buf_len,
int swap32)
{
- int ret = 0, actual_len;
+ int ret, actual_len;
ret = usb_bulk_msg(bus_adap->usb_dev,
usb_sndbulkpipe(bus_adap->usb_dev, 1),
send_buf, send_buf_len, &actual_len, 200);
if (ret) {
- dprintk(debug, "usb_bulk_msg(send) failed, err %i\n", ret);
+ dev_dbg(&bus_adap->usb_dev->dev,
+ "usb_bulk_msg(send) failed, err %i\n", ret);
return ret;
}
if (actual_len != send_buf_len) {
- dprintk(debug, "only wrote %d of %d bytes\n",
- actual_len, send_buf_len);
+ dev_dbg(&bus_adap->usb_dev->dev, "only wrote %d of %d bytes\n",
+ actual_len, send_buf_len);
return -1;
}
- return ret ? ret : actual_len;
+ return actual_len;
}
static int as102_read_ep2(struct as10x_bus_adapter_t *bus_adap,
unsigned char *recv_buf, int recv_buf_len)
{
- int ret = 0, actual_len;
+ int ret, actual_len;
if (recv_buf == NULL)
return -EINVAL;
@@ -177,16 +176,17 @@ static int as102_read_ep2(struct as10x_bus_adapter_t *bus_adap,
usb_rcvbulkpipe(bus_adap->usb_dev, 2),
recv_buf, recv_buf_len, &actual_len, 200);
if (ret) {
- dprintk(debug, "usb_bulk_msg(recv) failed, err %i\n", ret);
+ dev_dbg(&bus_adap->usb_dev->dev,
+ "usb_bulk_msg(recv) failed, err %i\n", ret);
return ret;
}
if (actual_len != recv_buf_len) {
- dprintk(debug, "only read %d of %d bytes\n",
- actual_len, recv_buf_len);
+ dev_dbg(&bus_adap->usb_dev->dev, "only read %d of %d bytes\n",
+ actual_len, recv_buf_len);
return -1;
}
- return ret ? ret : actual_len;
+ return actual_len;
}
static struct as102_priv_ops_t as102_priv_ops = {
@@ -211,7 +211,8 @@ static int as102_submit_urb_stream(struct as102_dev_t *dev, struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err)
- dprintk(debug, "%s: usb_submit_urb failed\n", __func__);
+ dev_dbg(&urb->dev->dev,
+ "%s: usb_submit_urb failed\n", __func__);
return err;
}
@@ -256,7 +257,8 @@ static int as102_alloc_usb_stream_buffer(struct as102_dev_t *dev)
GFP_KERNEL,
&dev->dma_addr);
if (!dev->stream) {
- dprintk(debug, "%s: usb_buffer_alloc failed\n", __func__);
+ dev_dbg(&dev->bus_adap.usb_dev->dev,
+ "%s: usb_buffer_alloc failed\n", __func__);
return -ENOMEM;
}
@@ -268,7 +270,8 @@ static int as102_alloc_usb_stream_buffer(struct as102_dev_t *dev)
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb == NULL) {
- dprintk(debug, "%s: usb_alloc_urb failed\n", __func__);
+ dev_dbg(&dev->bus_adap.usb_dev->dev,
+ "%s: usb_alloc_urb failed\n", __func__);
as102_free_usb_stream_buffer(dev);
return -ENOMEM;
}
diff --git a/drivers/staging/media/as102/as102_usb_drv.h b/drivers/media/usb/as102/as102_usb_drv.h
index 1ad1ec52b11e..4fb1baa8cac0 100644
--- a/drivers/staging/media/as102/as102_usb_drv.h
+++ b/drivers/media/usb/as102/as102_usb_drv.h
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _AS102_USB_DRV_H_
#define _AS102_USB_DRV_H_
diff --git a/drivers/staging/media/as102/as10x_cmd.c b/drivers/media/usb/as102/as10x_cmd.c
index 9e49f15a7c9f..870617994410 100644
--- a/drivers/staging/media/as102/as10x_cmd.c
+++ b/drivers/media/usb/as102/as10x_cmd.c
@@ -12,15 +12,10 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include "as102_drv.h"
-#include "as10x_types.h"
#include "as10x_cmd.h"
/**
@@ -126,7 +121,7 @@ int as10x_cmd_set_tune(struct as10x_bus_adapter_t *adap,
/* fill command */
preq->body.set_tune.req.proc_id = cpu_to_le16(CONTROL_PROC_SETTUNE);
- preq->body.set_tune.req.args.freq = cpu_to_le32(ptune->freq);
+ preq->body.set_tune.req.args.freq = (__force __u32)cpu_to_le32(ptune->freq);
preq->body.set_tune.req.args.bandwidth = ptune->bandwidth;
preq->body.set_tune.req.args.hier_select = ptune->hier_select;
preq->body.set_tune.req.args.modulation = ptune->modulation;
@@ -204,9 +199,9 @@ int as10x_cmd_get_tune_status(struct as10x_bus_adapter_t *adap,
/* Response OK -> get response data */
pstatus->tune_state = prsp->body.get_tune_status.rsp.sts.tune_state;
pstatus->signal_strength =
- le16_to_cpu(prsp->body.get_tune_status.rsp.sts.signal_strength);
- pstatus->PER = le16_to_cpu(prsp->body.get_tune_status.rsp.sts.PER);
- pstatus->BER = le16_to_cpu(prsp->body.get_tune_status.rsp.sts.BER);
+ le16_to_cpu((__force __le16)prsp->body.get_tune_status.rsp.sts.signal_strength);
+ pstatus->PER = le16_to_cpu((__force __le16)prsp->body.get_tune_status.rsp.sts.PER);
+ pstatus->BER = le16_to_cpu((__force __le16)prsp->body.get_tune_status.rsp.sts.BER);
out:
return error;
@@ -264,7 +259,7 @@ int as10x_cmd_get_tps(struct as10x_bus_adapter_t *adap, struct as10x_tps *ptps)
ptps->transmission_mode = prsp->body.get_tps.rsp.tps.transmission_mode;
ptps->DVBH_mask_HP = prsp->body.get_tps.rsp.tps.DVBH_mask_HP;
ptps->DVBH_mask_LP = prsp->body.get_tps.rsp.tps.DVBH_mask_LP;
- ptps->cell_ID = le16_to_cpu(prsp->body.get_tps.rsp.tps.cell_ID);
+ ptps->cell_ID = le16_to_cpu((__force __le16)prsp->body.get_tps.rsp.tps.cell_ID);
out:
return error;
@@ -315,13 +310,13 @@ int as10x_cmd_get_demod_stats(struct as10x_bus_adapter_t *adap,
/* Response OK -> get response data */
pdemod_stats->frame_count =
- le32_to_cpu(prsp->body.get_demod_stats.rsp.stats.frame_count);
+ le32_to_cpu((__force __le32)prsp->body.get_demod_stats.rsp.stats.frame_count);
pdemod_stats->bad_frame_count =
- le32_to_cpu(prsp->body.get_demod_stats.rsp.stats.bad_frame_count);
+ le32_to_cpu((__force __le32)prsp->body.get_demod_stats.rsp.stats.bad_frame_count);
pdemod_stats->bytes_fixed_by_rs =
- le32_to_cpu(prsp->body.get_demod_stats.rsp.stats.bytes_fixed_by_rs);
+ le32_to_cpu((__force __le32)prsp->body.get_demod_stats.rsp.stats.bytes_fixed_by_rs);
pdemod_stats->mer =
- le16_to_cpu(prsp->body.get_demod_stats.rsp.stats.mer);
+ le16_to_cpu((__force __le16)prsp->body.get_demod_stats.rsp.stats.mer);
pdemod_stats->has_started =
prsp->body.get_demod_stats.rsp.stats.has_started;
diff --git a/drivers/staging/media/as102/as10x_cmd.h b/drivers/media/usb/as102/as10x_cmd.h
index e21ec6c702a9..e06b84e2ff79 100644
--- a/drivers/staging/media/as102/as10x_cmd.h
+++ b/drivers/media/usb/as102/as10x_cmd.h
@@ -11,19 +11,13 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _AS10X_CMD_H_
#define _AS10X_CMD_H_
-#ifdef __KERNEL__
#include <linux/kernel.h>
-#endif
-#include "as10x_types.h"
+#include "as102_fe_types.h"
/*********************************/
/* MACRO DEFINITIONS */
@@ -98,12 +92,12 @@ union as10x_turn_on {
/* request */
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
} __packed req;
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* error */
uint8_t error;
} __packed rsp;
@@ -113,12 +107,12 @@ union as10x_turn_off {
/* request */
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
} __packed req;
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* error */
uint8_t err;
} __packed rsp;
@@ -128,14 +122,14 @@ union as10x_set_tune {
/* request */
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* tune params */
struct as10x_tune_args args;
} __packed req;
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* response error */
uint8_t error;
} __packed rsp;
@@ -145,12 +139,12 @@ union as10x_get_tune_status {
/* request */
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
} __packed req;
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* response error */
uint8_t error;
/* tune status */
@@ -162,12 +156,12 @@ union as10x_get_tps {
/* request */
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
} __packed req;
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* response error */
uint8_t error;
/* tps details */
@@ -179,12 +173,12 @@ union as10x_common {
/* request */
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
} __packed req;
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* response error */
uint8_t error;
} __packed rsp;
@@ -194,9 +188,9 @@ union as10x_add_pid_filter {
/* request */
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* PID to filter */
- uint16_t pid;
+ __le16 pid;
/* stream type (MPE, PSI/SI or PES )*/
uint8_t stream_type;
/* PID index in filter table */
@@ -205,7 +199,7 @@ union as10x_add_pid_filter {
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* response error */
uint8_t error;
/* Filter id */
@@ -217,14 +211,14 @@ union as10x_del_pid_filter {
/* request */
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* PID to remove */
- uint16_t pid;
+ __le16 pid;
} __packed req;
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* response error */
uint8_t error;
} __packed rsp;
@@ -234,12 +228,12 @@ union as10x_start_streaming {
/* request */
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
} __packed req;
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* error */
uint8_t error;
} __packed rsp;
@@ -249,12 +243,12 @@ union as10x_stop_streaming {
/* request */
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
} __packed req;
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* error */
uint8_t error;
} __packed rsp;
@@ -264,12 +258,12 @@ union as10x_get_demod_stats {
/* request */
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
} __packed req;
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* error */
uint8_t error;
/* demod stats */
@@ -281,12 +275,12 @@ union as10x_get_impulse_resp {
/* request */
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
} __packed req;
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* error */
uint8_t error;
/* impulse response ready */
@@ -298,22 +292,22 @@ union as10x_fw_context {
/* request */
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* value to write (for set context)*/
struct as10x_register_value reg_val;
/* context tag */
- uint16_t tag;
+ __le16 tag;
/* context request type */
- uint16_t type;
+ __le16 type;
} __packed req;
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* value read (for get context) */
struct as10x_register_value reg_val;
/* context request type */
- uint16_t type;
+ __le16 type;
/* error */
uint8_t error;
} __packed rsp;
@@ -323,7 +317,7 @@ union as10x_set_register {
/* request */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* register description */
struct as10x_register_addr reg_addr;
/* register content */
@@ -332,7 +326,7 @@ union as10x_set_register {
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* error */
uint8_t error;
} __packed rsp;
@@ -342,14 +336,14 @@ union as10x_get_register {
/* request */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* register description */
struct as10x_register_addr reg_addr;
} __packed req;
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* error */
uint8_t error;
/* register content */
@@ -361,24 +355,24 @@ union as10x_cfg_change_mode {
/* request */
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* mode */
uint8_t mode;
} __packed req;
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* error */
uint8_t error;
} __packed rsp;
} __packed;
struct as10x_cmd_header_t {
- uint16_t req_id;
- uint16_t prog;
- uint16_t version;
- uint16_t data_len;
+ __le16 req_id;
+ __le16 prog;
+ __le16 version;
+ __le16 data_len;
} __packed;
#define DUMP_BLOCK_SIZE 16
@@ -387,18 +381,18 @@ union as10x_dump_memory {
/* request */
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* dump memory type request */
uint8_t dump_req;
/* register description */
struct as10x_register_addr reg_addr;
/* nb blocks to read */
- uint16_t num_blocks;
+ __le16 num_blocks;
} __packed req;
/* response */
struct {
/* response identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* error */
uint8_t error;
/* dump response */
@@ -406,8 +400,8 @@ union as10x_dump_memory {
/* data */
union {
uint8_t data8[DUMP_BLOCK_SIZE];
- uint16_t data16[DUMP_BLOCK_SIZE / sizeof(uint16_t)];
- uint32_t data32[DUMP_BLOCK_SIZE / sizeof(uint32_t)];
+ __le16 data16[DUMP_BLOCK_SIZE / sizeof(__le16)];
+ __le32 data32[DUMP_BLOCK_SIZE / sizeof(__le32)];
} __packed u;
} __packed rsp;
} __packed;
@@ -415,13 +409,13 @@ union as10x_dump_memory {
union as10x_dumplog_memory {
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* dump memory type request */
uint8_t dump_req;
} __packed req;
struct {
/* request identifier */
- uint16_t proc_id;
+ __le16 proc_id;
/* error */
uint8_t error;
/* dump response */
@@ -434,13 +428,13 @@ union as10x_dumplog_memory {
union as10x_raw_data {
/* request */
struct {
- uint16_t proc_id;
+ __le16 proc_id;
uint8_t data[64 - sizeof(struct as10x_cmd_header_t)
- 2 /* proc_id */];
} __packed req;
/* response */
struct {
- uint16_t proc_id;
+ __le16 proc_id;
uint8_t error;
uint8_t data[64 - sizeof(struct as10x_cmd_header_t)
- 2 /* proc_id */ - 1 /* rc */];
diff --git a/drivers/staging/media/as102/as10x_cmd_cfg.c b/drivers/media/usb/as102/as10x_cmd_cfg.c
index b1e300d88753..c87f2ca223a2 100644
--- a/drivers/staging/media/as102/as10x_cmd_cfg.c
+++ b/drivers/media/usb/as102/as10x_cmd_cfg.c
@@ -11,15 +11,10 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include "as102_drv.h"
-#include "as10x_types.h"
#include "as10x_cmd.h"
/***************************/
@@ -74,7 +69,7 @@ int as10x_cmd_get_context(struct as10x_bus_adapter_t *adap, uint16_t tag,
if (error == 0) {
/* Response OK -> get response data */
- *pvalue = le32_to_cpu(prsp->body.context.rsp.reg_val.u.value32);
+ *pvalue = le32_to_cpu((__force __le32)prsp->body.context.rsp.reg_val.u.value32);
/* value returned is always a 32-bit value */
}
@@ -106,7 +101,7 @@ int as10x_cmd_set_context(struct as10x_bus_adapter_t *adap, uint16_t tag,
/* fill command */
pcmd->body.context.req.proc_id = cpu_to_le16(CONTROL_PROC_CONTEXT);
/* pcmd->body.context.req.reg_val.mode initialization is not required */
- pcmd->body.context.req.reg_val.u.value32 = cpu_to_le32(value);
+ pcmd->body.context.req.reg_val.u.value32 = (__force u32)cpu_to_le32(value);
pcmd->body.context.req.tag = cpu_to_le16(tag);
pcmd->body.context.req.type = cpu_to_le16(SET_CONTEXT_DATA);
diff --git a/drivers/staging/media/as102/as10x_cmd_stream.c b/drivers/media/usb/as102/as10x_cmd_stream.c
index 1088ca1fe92f..126aea976639 100644
--- a/drivers/staging/media/as102/as10x_cmd_stream.c
+++ b/drivers/media/usb/as102/as10x_cmd_stream.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/media/as102/as10x_handle.h b/drivers/media/usb/as102/as10x_handle.h
index 5638b191b780..d6b58c770500 100644
--- a/drivers/staging/media/as102/as10x_handle.h
+++ b/drivers/media/usb/as102/as10x_handle.h
@@ -11,12 +11,9 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#ifdef __KERNEL__
+#ifndef _AS10X_HANDLE_H
+#define _AS10X_HANDLE_H
struct as10x_bus_adapter_t;
struct as102_dev_t;
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index 2c6b7da137ed..9eb77ac2153b 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -46,6 +46,8 @@ struct au0828_board au0828_boards[] = {
.name = "Hauppauge HVR850",
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x61,
+ .has_ir_i2c = 1,
+ .has_analog = 1,
.i2c_clk_divider = AU0828_I2C_CLK_250KHZ,
.input = {
{
@@ -72,12 +74,7 @@ struct au0828_board au0828_boards[] = {
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x61,
.has_ir_i2c = 1,
- /* The au0828 hardware i2c implementation does not properly
- support the xc5000's i2c clock stretching. So we need to
- lower the clock frequency enough where the 15us clock
- stretch fits inside of a normal clock cycle, or else the
- au0828 fails to set the STOP bit. A 30 KHz clock puts the
- clock pulse width at 18us */
+ .has_analog = 1,
.i2c_clk_divider = AU0828_I2C_CLK_250KHZ,
.input = {
{
@@ -101,20 +98,20 @@ struct au0828_board au0828_boards[] = {
},
[AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL] = {
.name = "Hauppauge HVR950Q rev xxF8",
- .tuner_type = UNSET,
- .tuner_addr = ADDR_UNSET,
+ .tuner_type = TUNER_XC5000,
+ .tuner_addr = 0x61,
.i2c_clk_divider = AU0828_I2C_CLK_250KHZ,
},
[AU0828_BOARD_DVICO_FUSIONHDTV7] = {
.name = "DViCO FusionHDTV USB",
- .tuner_type = UNSET,
- .tuner_addr = ADDR_UNSET,
+ .tuner_type = TUNER_XC5000,
+ .tuner_addr = 0x61,
.i2c_clk_divider = AU0828_I2C_CLK_250KHZ,
},
[AU0828_BOARD_HAUPPAUGE_WOODBURY] = {
.name = "Hauppauge Woodbury",
- .tuner_type = UNSET,
- .tuner_addr = ADDR_UNSET,
+ .tuner_type = TUNER_NXP_TDA18271,
+ .tuner_addr = 0x60,
.i2c_clk_divider = AU0828_I2C_CLK_250KHZ,
},
};
@@ -142,8 +139,7 @@ int au0828_tuner_callback(void *priv, int component, int command, int arg)
mdelay(10);
return 0;
} else {
- printk(KERN_ERR
- "%s(): Unknown command.\n", __func__);
+ pr_err("%s(): Unknown command.\n", __func__);
return -EINVAL;
}
break;
@@ -177,12 +173,12 @@ static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data)
case 72500: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM */
break;
default:
- printk(KERN_WARNING "%s: warning: "
- "unknown hauppauge model #%d\n", __func__, tv.model);
+ pr_warn("%s: warning: unknown hauppauge model #%d\n",
+ __func__, tv.model);
break;
}
- printk(KERN_INFO "%s: hauppauge eeprom: model=%d\n",
+ pr_info("%s: hauppauge eeprom: model=%d\n",
__func__, tv.model);
}
@@ -228,16 +224,16 @@ void au0828_card_analog_fe_setup(struct au0828_dev *dev)
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
"au8522", 0x8e >> 1, NULL);
if (sd == NULL)
- printk(KERN_ERR "analog subdev registration failed\n");
+ pr_err("analog subdev registration failed\n");
}
/* Setup tuners */
- if (dev->board.tuner_type != TUNER_ABSENT) {
+ if (dev->board.tuner_type != TUNER_ABSENT && dev->board.has_analog) {
/* Load the tuner module, which does the attach */
sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
"tuner", dev->board.tuner_addr, NULL);
if (sd == NULL)
- printk(KERN_ERR "tuner subdev registration fail\n");
+ pr_err("tuner subdev registration fail\n");
tun_setup.mode_mask = mode_mask;
tun_setup.type = dev->board.tuner_type;
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 56025e689442..bc064803b6c7 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -19,14 +19,14 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "au0828.h"
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <linux/mutex.h>
-#include "au0828.h"
-
/*
* 1 = General debug messages
* 2 = USB handling
@@ -90,7 +90,7 @@ static int send_control_msg(struct au0828_dev *dev, u16 request, u32 value,
status = min(status, 0);
if (status < 0) {
- printk(KERN_ERR "%s() Failed sending control message, error %d.\n",
+ pr_err("%s() Failed sending control message, error %d.\n",
__func__, status);
}
@@ -115,7 +115,7 @@ static int recv_control_msg(struct au0828_dev *dev, u16 request, u32 value,
status = min(status, 0);
if (status < 0) {
- printk(KERN_ERR "%s() Failed receiving control message, error %d.\n",
+ pr_err("%s() Failed receiving control message, error %d.\n",
__func__, status);
}
@@ -153,9 +153,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
dprintk(1, "%s()\n", __func__);
-#ifdef CONFIG_VIDEO_AU0828_RC
au0828_rc_unregister(dev);
-#endif
/* Digital TV */
au0828_dvb_unregister(dev);
@@ -199,15 +197,14 @@ static int au0828_usb_probe(struct usb_interface *interface,
* not enough even for most Digital TV streams.
*/
if (usbdev->speed != USB_SPEED_HIGH && disable_usb_speed_check == 0) {
- printk(KERN_ERR "au0828: Device initialization failed.\n");
- printk(KERN_ERR "au0828: Device must be connected to a "
- "high-speed USB 2.0 port.\n");
+ pr_err("au0828: Device initialization failed.\n");
+ pr_err("au0828: Device must be connected to a high-speed USB 2.0 port.\n");
return -ENODEV;
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
- printk(KERN_ERR "%s() Unable to allocate memory\n", __func__);
+ pr_err("%s() Unable to allocate memory\n", __func__);
return -ENOMEM;
}
@@ -266,10 +263,8 @@ static int au0828_usb_probe(struct usb_interface *interface,
pr_err("%s() au0282_dev_register failed\n",
__func__);
-#ifdef CONFIG_VIDEO_AU0828_RC
/* Remote controller */
au0828_rc_register(dev);
-#endif
/*
* Store the pointer to the au0828_dev so it can be accessed in
@@ -277,7 +272,7 @@ static int au0828_usb_probe(struct usb_interface *interface,
*/
usb_set_intfdata(interface, dev);
- printk(KERN_INFO "Registered device AU0828 [%s]\n",
+ pr_info("Registered device AU0828 [%s]\n",
dev->board.name == NULL ? "Unset" : dev->board.name);
mutex_unlock(&dev->lock);
@@ -285,13 +280,56 @@ static int au0828_usb_probe(struct usb_interface *interface,
return retval;
}
+static int au0828_suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ struct au0828_dev *dev = usb_get_intfdata(interface);
+
+ if (!dev)
+ return 0;
+
+ pr_info("Suspend\n");
+
+ au0828_rc_suspend(dev);
+ au0828_v4l2_suspend(dev);
+ au0828_dvb_suspend(dev);
+
+ /* FIXME: should suspend also ATV/DTV */
+
+ return 0;
+}
+
+static int au0828_resume(struct usb_interface *interface)
+{
+ struct au0828_dev *dev = usb_get_intfdata(interface);
+ if (!dev)
+ return 0;
+
+ pr_info("Resume\n");
+
+ /* Power Up the bridge */
+ au0828_write(dev, REG_600, 1 << 4);
+
+ /* Bring up the GPIO's and supporting devices */
+ au0828_gpio_setup(dev);
+
+ au0828_rc_resume(dev);
+ au0828_v4l2_resume(dev);
+ au0828_dvb_resume(dev);
+
+ /* FIXME: should resume also ATV/DTV */
+
+ return 0;
+}
+
static struct usb_driver au0828_usb_driver = {
- .name = DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.probe = au0828_usb_probe,
.disconnect = au0828_usb_disconnect,
.id_table = au0828_usb_id_table,
-
- /* FIXME: Add suspend and resume functions */
+ .suspend = au0828_suspend,
+ .resume = au0828_resume,
+ .reset_resume = au0828_resume,
};
static int __init au0828_init(void)
@@ -299,27 +337,27 @@ static int __init au0828_init(void)
int ret;
if (au0828_debug & 1)
- printk(KERN_INFO "%s() Debugging is enabled\n", __func__);
+ pr_info("%s() Debugging is enabled\n", __func__);
if (au0828_debug & 2)
- printk(KERN_INFO "%s() USB Debugging is enabled\n", __func__);
+ pr_info("%s() USB Debugging is enabled\n", __func__);
if (au0828_debug & 4)
- printk(KERN_INFO "%s() I2C Debugging is enabled\n", __func__);
+ pr_info("%s() I2C Debugging is enabled\n", __func__);
if (au0828_debug & 8)
- printk(KERN_INFO "%s() Bridge Debugging is enabled\n",
+ pr_info("%s() Bridge Debugging is enabled\n",
__func__);
if (au0828_debug & 16)
- printk(KERN_INFO "%s() IR Debugging is enabled\n",
+ pr_info("%s() IR Debugging is enabled\n",
__func__);
- printk(KERN_INFO "au0828 driver loaded\n");
+ pr_info("au0828 driver loaded\n");
ret = usb_register(&au0828_usb_driver);
if (ret)
- printk(KERN_ERR "usb_register failed, error = %d\n", ret);
+ pr_err("usb_register failed, error = %d\n", ret);
return ret;
}
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index d8b5d9480279..00ab1563d142 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -19,15 +19,15 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "au0828.h"
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
-#include <linux/suspend.h>
#include <media/v4l2-common.h>
#include <media/tuner.h>
-#include "au0828.h"
#include "au8522.h"
#include "xc5000.h"
#include "mxl5007t.h"
@@ -121,13 +121,13 @@ static void urb_completion(struct urb *purb)
return;
}
- if (dev->urb_streaming == 0) {
+ if (!dev->urb_streaming) {
dprintk(2, "%s: not streaming!\n", __func__);
return;
}
if (ptype != PIPE_BULK) {
- printk(KERN_ERR "%s: Unsupported URB type %d\n",
+ pr_err("%s: Unsupported URB type %d\n",
__func__, ptype);
return;
}
@@ -159,7 +159,10 @@ static int stop_urb_transfer(struct au0828_dev *dev)
dprintk(2, "%s()\n", __func__);
- dev->urb_streaming = 0;
+ if (!dev->urb_streaming)
+ return 0;
+
+ dev->urb_streaming = false;
for (i = 0; i < URB_COUNT; i++) {
if (dev->urbs[i]) {
usb_kill_urb(dev->urbs[i]);
@@ -202,8 +205,7 @@ static int start_urb_transfer(struct au0828_dev *dev)
if (!purb->transfer_buffer) {
usb_free_urb(purb);
dev->urbs[i] = NULL;
- printk(KERN_ERR
- "%s: failed big buffer allocation, err = %d\n",
+ pr_err("%s: failed big buffer allocation, err = %d\n",
__func__, ret);
goto err;
}
@@ -224,13 +226,13 @@ static int start_urb_transfer(struct au0828_dev *dev)
ret = usb_submit_urb(dev->urbs[i], GFP_ATOMIC);
if (ret != 0) {
stop_urb_transfer(dev);
- printk(KERN_ERR "%s: failed urb submission, "
- "err = %d\n", __func__, ret);
+ pr_err("%s: failed urb submission, err = %d\n",
+ __func__, ret);
return ret;
}
}
- dev->urb_streaming = 1;
+ dev->urb_streaming = true;
ret = 0;
err:
@@ -268,7 +270,7 @@ static int au0828_dvb_start_feed(struct dvb_demux_feed *feed)
if (!demux->dmx.frontend)
return -EINVAL;
- if (dvb) {
+ if (dvb->frontend) {
mutex_lock(&dvb->lock);
dvb->start_count++;
dprintk(1, "%s(), start_count: %d, stop_count: %d\n", __func__,
@@ -297,7 +299,7 @@ static int au0828_dvb_stop_feed(struct dvb_demux_feed *feed)
dprintk(1, "%s()\n", __func__);
- if (dvb) {
+ if (dvb->frontend) {
cancel_work_sync(&dev->restart_streaming);
mutex_lock(&dvb->lock);
@@ -324,7 +326,7 @@ static void au0828_restart_dvb_streaming(struct work_struct *work)
restart_streaming);
struct au0828_dvb *dvb = &dev->dvb;
- if (dev->urb_streaming == 0)
+ if (!dev->urb_streaming)
return;
dprintk(1, "Restarting streaming...!\n");
@@ -393,9 +395,8 @@ static int dvb_register(struct au0828_dev *dev)
if (!dev->dig_transfer_buffer[i]) {
result = -ENOMEM;
- printk(KERN_ERR
- "%s: failed buffer allocation (errno = %d)\n",
- DRIVER_NAME, result);
+ pr_err("failed buffer allocation (errno = %d)\n",
+ result);
goto fail_adapter;
}
}
@@ -404,11 +405,12 @@ static int dvb_register(struct au0828_dev *dev)
INIT_WORK(&dev->restart_streaming, au0828_restart_dvb_streaming);
/* register adapter */
- result = dvb_register_adapter(&dvb->adapter, DRIVER_NAME, THIS_MODULE,
+ result = dvb_register_adapter(&dvb->adapter,
+ KBUILD_MODNAME, THIS_MODULE,
&dev->usbdev->dev, adapter_nr);
if (result < 0) {
- printk(KERN_ERR "%s: dvb_register_adapter failed "
- "(errno = %d)\n", DRIVER_NAME, result);
+ pr_err("dvb_register_adapter failed (errno = %d)\n",
+ result);
goto fail_adapter;
}
dvb->adapter.priv = dev;
@@ -416,8 +418,8 @@ static int dvb_register(struct au0828_dev *dev)
/* register frontend */
result = dvb_register_frontend(&dvb->adapter, dvb->frontend);
if (result < 0) {
- printk(KERN_ERR "%s: dvb_register_frontend failed "
- "(errno = %d)\n", DRIVER_NAME, result);
+ pr_err("dvb_register_frontend failed (errno = %d)\n",
+ result);
goto fail_frontend;
}
@@ -436,8 +438,7 @@ static int dvb_register(struct au0828_dev *dev)
dvb->demux.stop_feed = au0828_dvb_stop_feed;
result = dvb_dmx_init(&dvb->demux);
if (result < 0) {
- printk(KERN_ERR "%s: dvb_dmx_init failed (errno = %d)\n",
- DRIVER_NAME, result);
+ pr_err("dvb_dmx_init failed (errno = %d)\n", result);
goto fail_dmx;
}
@@ -446,31 +447,29 @@ static int dvb_register(struct au0828_dev *dev)
dvb->dmxdev.capabilities = 0;
result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter);
if (result < 0) {
- printk(KERN_ERR "%s: dvb_dmxdev_init failed (errno = %d)\n",
- DRIVER_NAME, result);
+ pr_err("dvb_dmxdev_init failed (errno = %d)\n", result);
goto fail_dmxdev;
}
dvb->fe_hw.source = DMX_FRONTEND_0;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
- printk(KERN_ERR "%s: add_frontend failed "
- "(DMX_FRONTEND_0, errno = %d)\n", DRIVER_NAME, result);
+ pr_err("add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
+ result);
goto fail_fe_hw;
}
dvb->fe_mem.source = DMX_MEMORY_FE;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
if (result < 0) {
- printk(KERN_ERR "%s: add_frontend failed "
- "(DMX_MEMORY_FE, errno = %d)\n", DRIVER_NAME, result);
+ pr_err("add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
+ result);
goto fail_fe_mem;
}
result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
- printk(KERN_ERR "%s: connect_frontend failed (errno = %d)\n",
- DRIVER_NAME, result);
+ pr_err("connect_frontend failed (errno = %d)\n", result);
goto fail_fe_conn;
}
@@ -530,8 +529,7 @@ void au0828_dvb_unregister(struct au0828_dev *dev)
for (i = 0; i < URB_COUNT; i++)
kfree(dev->dig_transfer_buffer[i]);
}
-
-
+ dvb->frontend = NULL;
}
/* All the DVB attach calls go here, this function get's modified
@@ -596,12 +594,11 @@ int au0828_dvb_register(struct au0828_dev *dev)
}
break;
default:
- printk(KERN_WARNING "The frontend of your DVB/ATSC card "
- "isn't supported yet\n");
+ pr_warn("The frontend of your DVB/ATSC card isn't supported yet\n");
break;
}
if (NULL == dvb->frontend) {
- printk(KERN_ERR "%s() Frontend initialization failed\n",
+ pr_err("%s() Frontend initialization failed\n",
__func__);
return -1;
}
@@ -613,8 +610,49 @@ int au0828_dvb_register(struct au0828_dev *dev)
if (ret < 0) {
if (dvb->frontend->ops.release)
dvb->frontend->ops.release(dvb->frontend);
+ dvb->frontend = NULL;
return ret;
}
return 0;
}
+
+void au0828_dvb_suspend(struct au0828_dev *dev)
+{
+ struct au0828_dvb *dvb = &dev->dvb;
+ int rc;
+
+ if (dvb->frontend) {
+ if (dev->urb_streaming) {
+ cancel_work_sync(&dev->restart_streaming);
+ /* Stop transport */
+ mutex_lock(&dvb->lock);
+ stop_urb_transfer(dev);
+ au0828_stop_transport(dev, 1);
+ mutex_unlock(&dvb->lock);
+ dev->need_urb_start = true;
+ }
+ /* suspend frontend - does tuner and fe to sleep */
+ rc = dvb_frontend_suspend(dvb->frontend);
+ pr_info("au0828_dvb_suspend(): Suspending DVB fe %d\n", rc);
+ }
+}
+
+void au0828_dvb_resume(struct au0828_dev *dev)
+{
+ struct au0828_dvb *dvb = &dev->dvb;
+ int rc;
+
+ if (dvb->frontend) {
+ /* resume frontend - does fe and tuner init */
+ rc = dvb_frontend_resume(dvb->frontend);
+ pr_info("au0828_dvb_resume(): Resuming DVB fe %d\n", rc);
+ if (dev->need_urb_start) {
+ /* Start transport */
+ mutex_lock(&dvb->lock);
+ au0828_start_transport(dev);
+ start_urb_transfer(dev);
+ mutex_unlock(&dvb->lock);
+ }
+ }
+}
diff --git a/drivers/media/usb/au0828/au0828-i2c.c b/drivers/media/usb/au0828/au0828-i2c.c
index daaeaf1b089c..ae7ac6669769 100644
--- a/drivers/media/usb/au0828/au0828-i2c.c
+++ b/drivers/media/usb/au0828/au0828-i2c.c
@@ -19,13 +19,14 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "au0828.h"
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include "au0828.h"
#include "media/tuner.h"
#include <media/v4l2-common.h>
@@ -340,7 +341,7 @@ static struct i2c_algorithm au0828_i2c_algo_template = {
/* ----------------------------------------------------------------------- */
static struct i2c_adapter au0828_i2c_adap_template = {
- .name = DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.owner = THIS_MODULE,
.algo = &au0828_i2c_algo_template,
};
@@ -365,7 +366,7 @@ static void do_i2c_scan(char *name, struct i2c_client *c)
rc = i2c_master_recv(c, &buf, 0);
if (rc < 0)
continue;
- printk(KERN_INFO "%s: i2c scan: found device @ 0x%x [%s]\n",
+ pr_info("%s: i2c scan: found device @ 0x%x [%s]\n",
name, i << 1, i2c_devs[i] ? i2c_devs[i] : "???");
}
}
@@ -381,7 +382,7 @@ int au0828_i2c_register(struct au0828_dev *dev)
dev->i2c_adap.dev.parent = &dev->usbdev->dev;
- strlcpy(dev->i2c_adap.name, DRIVER_NAME,
+ strlcpy(dev->i2c_adap.name, KBUILD_MODNAME,
sizeof(dev->i2c_adap.name));
dev->i2c_adap.algo = &dev->i2c_algo;
@@ -396,11 +397,11 @@ int au0828_i2c_register(struct au0828_dev *dev)
dev->i2c_client.adapter = &dev->i2c_adap;
if (0 == dev->i2c_rc) {
- printk(KERN_INFO "%s: i2c bus registered\n", DRIVER_NAME);
+ pr_info("i2c bus registered\n");
if (i2c_scan)
- do_i2c_scan(DRIVER_NAME, &dev->i2c_client);
+ do_i2c_scan(KBUILD_MODNAME, &dev->i2c_client);
} else
- printk(KERN_INFO "%s: i2c bus register FAILED\n", DRIVER_NAME);
+ pr_info("i2c bus register FAILED\n");
return dev->i2c_rc;
}
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index fd0d3a90ce7d..63995f97dc65 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -17,6 +17,8 @@
GNU General Public License for more details.
*/
+#include "au0828.h"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -25,7 +27,9 @@
#include <linux/slab.h>
#include <media/rc-core.h>
-#include "au0828.h"
+static int disable_ir;
+module_param(disable_ir, int, 0444);
+MODULE_PARM_DESC(disable_ir, "disable infrared remote support");
struct au0828_rc {
struct au0828_dev *dev;
@@ -90,14 +94,19 @@ static int au8522_rc_read(struct au0828_rc *ir, u16 reg, int val,
static int au8522_rc_andor(struct au0828_rc *ir, u16 reg, u8 mask, u8 value)
{
int rc;
- char buf;
+ char buf, oldbuf;
rc = au8522_rc_read(ir, reg, -1, &buf, 1);
if (rc < 0)
return rc;
+ oldbuf = buf;
buf = (buf & ~mask) | (value & mask);
+ /* Nothing to do, just return */
+ if (buf == oldbuf)
+ return 0;
+
return au8522_rc_write(ir, reg, buf);
}
@@ -122,8 +131,11 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
/* Check IR int */
rc = au8522_rc_read(ir, 0xe1, -1, buf, 1);
- if (rc < 0 || !(buf[0] & (1 << 4)))
+ if (rc < 0 || !(buf[0] & (1 << 4))) {
+ /* Be sure that IR is enabled */
+ au8522_rc_set(ir, 0xe0, 1 << 4);
return 0;
+ }
/* Something arrived. Get the data */
rc = au8522_rc_read(ir, 0xe3, 0x11, buf, sizeof(buf));
@@ -135,8 +147,6 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
/* Disable IR */
au8522_rc_clear(ir, 0xe0, 1 << 4);
- usleep_range(45000, 46000);
-
/* Enable IR */
au8522_rc_set(ir, 0xe0, 1 << 4);
@@ -243,10 +253,10 @@ static void au0828_rc_stop(struct rc_dev *rc)
{
struct au0828_rc *ir = rc->priv;
+ cancel_delayed_work_sync(&ir->work);
+
/* Disable IR */
au8522_rc_clear(ir, 0xe0, 1 << 4);
-
- cancel_delayed_work_sync(&ir->work);
}
static int au0828_probe_i2c_ir(struct au0828_dev *dev)
@@ -273,7 +283,7 @@ int au0828_rc_register(struct au0828_dev *dev)
int err = -ENOMEM;
u16 i2c_rc_dev_addr = 0;
- if (!dev->board.has_ir_i2c)
+ if (!dev->board.has_ir_i2c || disable_ir)
return 0;
i2c_rc_dev_addr = au0828_probe_i2c_ir(dev);
@@ -368,8 +378,13 @@ int au0828_rc_suspend(struct au0828_dev *dev)
if (!ir)
return 0;
+ pr_info("Stopping RC\n");
+
cancel_delayed_work_sync(&ir->work);
+ /* Disable IR */
+ au8522_rc_clear(ir, 0xe0, 1 << 4);
+
return 0;
}
@@ -380,6 +395,11 @@ int au0828_rc_resume(struct au0828_dev *dev)
if (!ir)
return 0;
+ pr_info("Restarting RC\n");
+
+ /* Enable IR */
+ au8522_rc_set(ir, 0xe0, 1 << 4);
+
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
return 0;
diff --git a/drivers/media/usb/au0828/au0828-vbi.c b/drivers/media/usb/au0828/au0828-vbi.c
index 63f593070ee8..932d24f42b24 100644
--- a/drivers/media/usb/au0828/au0828-vbi.c
+++ b/drivers/media/usb/au0828/au0828-vbi.c
@@ -21,13 +21,13 @@
02110-1301, USA.
*/
+#include "au0828.h"
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include "au0828.h"
-
static unsigned int vbibufs = 5;
module_param(vbibufs, int, 0644);
MODULE_PARM_DESC(vbibufs, "number of vbi buffers, range 2-32");
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 98f7ea1d6d63..5f337b118bff 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -28,16 +28,16 @@
*
*/
+#include "au0828.h"
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
-#include <linux/suspend.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
#include <media/tuner.h>
-#include "au0828.h"
#include "au0828-reg.h"
static DEFINE_MUTEX(au0828_sysfs_lock);
@@ -53,7 +53,7 @@ MODULE_PARM_DESC(isoc_debug, "enable debug messages [isoc transfers]");
#define au0828_isocdbg(fmt, arg...) \
do {\
if (isoc_debug) { \
- printk(KERN_INFO "au0828 %s :"fmt, \
+ pr_info("au0828 %s :"fmt, \
__func__ , ##arg); \
} \
} while (0)
@@ -106,12 +106,12 @@ static inline void print_err_status(struct au0828_dev *dev,
static int check_dev(struct au0828_dev *dev)
{
if (dev->dev_state & DEV_DISCONNECTED) {
- printk(KERN_INFO "v4l2 ioctl: device not present\n");
+ pr_info("v4l2 ioctl: device not present\n");
return -ENODEV;
}
if (dev->dev_state & DEV_MISCONFIGURED) {
- printk(KERN_INFO "v4l2 ioctl: device is misconfigured; "
+ pr_info("v4l2 ioctl: device is misconfigured; "
"close and open it again\n");
return -EIO;
}
@@ -159,6 +159,7 @@ static void au0828_irq_callback(struct urb *urb)
au0828_isocdbg("urb resubmit failed (error=%i)\n",
urb->status);
}
+ dev->stream_state = STREAM_ON;
}
/*
@@ -198,6 +199,8 @@ static void au0828_uninit_isoc(struct au0828_dev *dev)
dev->isoc_ctl.urb = NULL;
dev->isoc_ctl.transfer_buffer = NULL;
dev->isoc_ctl.num_bufs = 0;
+
+ dev->stream_state = STREAM_OFF;
}
/*
@@ -717,7 +720,7 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
rc = videobuf_iolock(vq, &buf->vb, NULL);
if (rc < 0) {
- printk(KERN_INFO "videobuf_iolock failed\n");
+ pr_info("videobuf_iolock failed\n");
goto fail;
}
}
@@ -730,7 +733,7 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
AU0828_MAX_ISO_BUFS, dev->max_pkt_size,
au0828_isoc_copy);
if (rc < 0) {
- printk(KERN_INFO "au0828_init_isoc failed\n");
+ pr_info("au0828_init_isoc failed\n");
goto fail;
}
}
@@ -801,7 +804,7 @@ static int au0828_analog_stream_enable(struct au0828_dev *d)
/* set au0828 interface0 to AS5 here again */
ret = usb_set_interface(d->usbdev, 0, 5);
if (ret < 0) {
- printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
+ pr_info("Au0828 can't set alt setting to 5!\n");
return -EBUSY;
}
}
@@ -1090,7 +1093,7 @@ static int au0828_v4l2_close(struct file *filp)
USB bandwidth */
ret = usb_set_interface(dev->usbdev, 0, 0);
if (ret < 0)
- printk(KERN_INFO "Au0828 can't set alternate to 0!\n");
+ pr_info("Au0828 can't set alternate to 0!\n");
}
mutex_unlock(&dev->lock);
@@ -1344,7 +1347,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
return rc;
if (videobuf_queue_is_busy(&fh->vb_vidq)) {
- printk(KERN_INFO "%s queue busy\n", __func__);
+ pr_info("%s queue busy\n", __func__);
rc = -EBUSY;
goto out;
}
@@ -1868,6 +1871,69 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
return rc;
}
+void au0828_v4l2_suspend(struct au0828_dev *dev)
+{
+ struct urb *urb;
+ int i;
+
+ pr_info("stopping V4L2\n");
+
+ if (dev->stream_state == STREAM_ON) {
+ pr_info("stopping V4L2 active URBs\n");
+ au0828_analog_stream_disable(dev);
+ /* stop urbs */
+ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
+ urb = dev->isoc_ctl.urb[i];
+ if (urb) {
+ if (!irqs_disabled())
+ usb_kill_urb(urb);
+ else
+ usb_unlink_urb(urb);
+ }
+ }
+ }
+
+ if (dev->vid_timeout_running)
+ del_timer_sync(&dev->vid_timeout);
+ if (dev->vbi_timeout_running)
+ del_timer_sync(&dev->vbi_timeout);
+}
+
+void au0828_v4l2_resume(struct au0828_dev *dev)
+{
+ int i, rc;
+
+ pr_info("restarting V4L2\n");
+
+ if (dev->stream_state == STREAM_ON) {
+ au0828_stream_interrupt(dev);
+ au0828_init_tuner(dev);
+ }
+
+ if (dev->vid_timeout_running)
+ mod_timer(&dev->vid_timeout, jiffies + (HZ / 10));
+ if (dev->vbi_timeout_running)
+ mod_timer(&dev->vbi_timeout, jiffies + (HZ / 10));
+
+ /* If we were doing ac97 instead of i2s, it would go here...*/
+ au0828_i2s_init(dev);
+
+ au0828_analog_stream_enable(dev);
+
+ if (!(dev->stream_state == STREAM_ON)) {
+ au0828_analog_stream_reset(dev);
+ /* submit urbs */
+ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
+ rc = usb_submit_urb(dev->isoc_ctl.urb[i], GFP_ATOMIC);
+ if (rc) {
+ au0828_isocdbg("submit of urb %i failed (error=%i)\n",
+ i, rc);
+ au0828_uninit_isoc(dev);
+ }
+ }
+ }
+}
+
static struct v4l2_file_operations au0828_v4l_fops = {
.owner = THIS_MODULE,
.open = au0828_v4l2_open,
@@ -1939,7 +2005,7 @@ int au0828_analog_register(struct au0828_dev *dev,
retval = usb_set_interface(dev->usbdev,
interface->cur_altsetting->desc.bInterfaceNumber, 5);
if (retval != 0) {
- printk(KERN_INFO "Failure setting usb interface0 to as5\n");
+ pr_info("Failure setting usb interface0 to as5\n");
return retval;
}
@@ -1963,7 +2029,7 @@ int au0828_analog_register(struct au0828_dev *dev,
}
}
if (!(dev->isoc_in_endpointaddr)) {
- printk(KERN_INFO "Could not locate isoc endpoint\n");
+ pr_info("Could not locate isoc endpoint\n");
kfree(dev);
return -ENODEV;
}
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 96bec05d7dac..36815a369c68 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -19,6 +19,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/usb.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -42,7 +44,6 @@
#include "au0828-reg.h"
#include "au0828-cards.h"
-#define DRIVER_NAME "au0828"
#define URB_COUNT 16
#define URB_BUFSIZE (0xe522)
@@ -89,6 +90,7 @@ struct au0828_board {
unsigned char tuner_addr;
unsigned char i2c_clk_divider;
unsigned char has_ir_i2c:1;
+ unsigned char has_analog:1;
struct au0828_input input[AU0828_MAX_INPUT];
};
@@ -266,8 +268,8 @@ struct au0828_dev {
char *transfer_buffer[AU0828_MAX_ISO_BUFS];/* transfer buffers for isoc
transfer */
- /* USB / URB Related */
- int urb_streaming;
+ /* DVB USB / URB Related */
+ bool urb_streaming, need_urb_start;
struct urb *urbs[URB_COUNT];
/* Preallocated transfer digital transfer buffers */
@@ -311,22 +313,38 @@ int au0828_analog_register(struct au0828_dev *dev,
struct usb_interface *interface);
int au0828_analog_stream_disable(struct au0828_dev *d);
void au0828_analog_unregister(struct au0828_dev *dev);
+#ifdef CONFIG_VIDEO_AU0828_V4L2
+void au0828_v4l2_suspend(struct au0828_dev *dev);
+void au0828_v4l2_resume(struct au0828_dev *dev);
+#else
+static inline void au0828_v4l2_suspend(struct au0828_dev *dev) { };
+static inline void au0828_v4l2_resume(struct au0828_dev *dev) { };
+#endif
/* ----------------------------------------------------------- */
/* au0828-dvb.c */
extern int au0828_dvb_register(struct au0828_dev *dev);
extern void au0828_dvb_unregister(struct au0828_dev *dev);
+void au0828_dvb_suspend(struct au0828_dev *dev);
+void au0828_dvb_resume(struct au0828_dev *dev);
/* au0828-vbi.c */
extern struct videobuf_queue_ops au0828_vbi_qops;
#define dprintk(level, fmt, arg...)\
do { if (au0828_debug & level)\
- printk(KERN_DEBUG DRIVER_NAME "/0: " fmt, ## arg);\
+ printk(KERN_DEBUG pr_fmt(fmt), ## arg);\
} while (0)
/* au0828-input.c */
-int au0828_rc_register(struct au0828_dev *dev);
-void au0828_rc_unregister(struct au0828_dev *dev);
-int au0828_rc_suspend(struct au0828_dev *dev);
-int au0828_rc_resume(struct au0828_dev *dev);
+#ifdef CONFIG_VIDEO_AU0828_RC
+extern int au0828_rc_register(struct au0828_dev *dev);
+extern void au0828_rc_unregister(struct au0828_dev *dev);
+extern int au0828_rc_suspend(struct au0828_dev *dev);
+extern int au0828_rc_resume(struct au0828_dev *dev);
+#else
+static inline int au0828_rc_register(struct au0828_dev *dev) { return 0; }
+static inline void au0828_rc_unregister(struct au0828_dev *dev) { }
+static inline int au0828_rc_suspend(struct au0828_dev *dev) { return 0; }
+static inline int au0828_rc_resume(struct au0828_dev *dev) { return 0; }
+#endif
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index a428c10e1a16..40a69879fc0a 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -1595,7 +1595,7 @@ void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
if_freq = 16000000;
}
- cx231xx_info("Enter IF=%zd\n",
+ cx231xx_info("Enter IF=%zu\n",
ARRAY_SIZE(Dif_set_array));
for (i = 0; i < ARRAY_SIZE(Dif_set_array); i++) {
if (Dif_set_array[i].if_freq == if_freq) {
@@ -2223,7 +2223,7 @@ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode)
if (status < 0)
return status;
- tmp = le32_to_cpu(*((u32 *) value));
+ tmp = le32_to_cpu(*((__le32 *) value));
switch (mode) {
case POLARIS_AVMODE_ENXTERNAL_AV:
@@ -2444,7 +2444,7 @@ int cx231xx_power_suspend(struct cx231xx *dev)
if (status > 0)
return status;
- tmp = le32_to_cpu(*((u32 *) value));
+ tmp = le32_to_cpu(*((__le32 *) value));
tmp &= (~PWR_MODE_MASK);
value[0] = (u8) tmp;
@@ -2472,7 +2472,7 @@ int cx231xx_start_stream(struct cx231xx *dev, u32 ep_mask)
if (status < 0)
return status;
- tmp = le32_to_cpu(*((u32 *) value));
+ tmp = le32_to_cpu(*((__le32 *) value));
tmp |= ep_mask;
value[0] = (u8) tmp;
value[1] = (u8) (tmp >> 8);
@@ -2497,7 +2497,7 @@ int cx231xx_stop_stream(struct cx231xx *dev, u32 ep_mask)
if (status < 0)
return status;
- tmp = le32_to_cpu(*((u32 *) value));
+ tmp = le32_to_cpu(*((__le32 *) value));
tmp &= (~ep_mask);
value[0] = (u8) tmp;
value[1] = (u8) (tmp >> 8);
@@ -2644,7 +2644,7 @@ static int cx231xx_set_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u32 gpio_val)
{
int status = 0;
- gpio_val = cpu_to_le32(gpio_val);
+ gpio_val = (__force u32)cpu_to_le32(gpio_val);
status = cx231xx_send_gpio_cmd(dev, gpio_bit, (u8 *)&gpio_val, 4, 0, 0);
return status;
@@ -2652,7 +2652,7 @@ static int cx231xx_set_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u32 gpio_val)
static int cx231xx_get_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u32 *gpio_val)
{
- u32 tmp;
+ __le32 tmp;
int status = 0;
status = cx231xx_send_gpio_cmd(dev, gpio_bit, (u8 *)&tmp, 4, 0, 1);
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 8039b769f258..791f00c6276b 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -705,7 +705,7 @@ struct cx231xx_board cx231xx_boards[] = {
},
},
[CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx] = {
- .name = "Hauppauge WinTV 930C-HD (1113xx) / PCTV QuatroStick 521e",
+ .name = "Hauppauge WinTV 930C-HD (1113xx) / HVR-900H (111xxx) / PCTV QuatroStick 521e",
.tuner_type = TUNER_NXP_TDA18271,
.tuner_addr = 0x60,
.tuner_gpio = RDE250_XCV_TUNER,
@@ -744,7 +744,7 @@ struct cx231xx_board cx231xx_boards[] = {
} },
},
[CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx] = {
- .name = "Hauppauge WinTV 930C-HD (1114xx) / PCTV QuatroStick 522e",
+ .name = "Hauppauge WinTV 930C-HD (1114xx) / HVR-901H (1114xx) / PCTV QuatroStick 522e",
.tuner_type = TUNER_ABSENT,
.tuner_addr = 0x60,
.tuner_gpio = RDE250_XCV_TUNER,
@@ -815,6 +815,12 @@ struct usb_device_id cx231xx_id_table[] = {
.driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx},
{USB_DEVICE(0x2040, 0xb131),
.driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx},
+ /* Hauppauge WinTV-HVR-900-H */
+ {USB_DEVICE(0x2040, 0xb138),
+ .driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx},
+ /* Hauppauge WinTV-HVR-901-H */
+ {USB_DEVICE(0x2040, 0xb139),
+ .driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx},
{USB_DEVICE(0x2040, 0xb140),
.driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER},
{USB_DEVICE(0x2040, 0xc200),
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 513194aa6561..180103e48036 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -1491,7 +1491,7 @@ int cx231xx_mode_register(struct cx231xx *dev, u16 address, u32 mode)
if (status < 0)
return status;
- tmp = le32_to_cpu(*((u32 *) value));
+ tmp = le32_to_cpu(*((__le32 *) value));
tmp |= mode;
value[0] = (u8) tmp;
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index 1fa79741d199..6c7b5e250eed 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -403,8 +403,6 @@ static int attach_xc5000(u8 addr, struct cx231xx *dev)
int cx231xx_set_analog_freq(struct cx231xx *dev, u32 freq)
{
- int status = 0;
-
if ((dev->dvb != NULL) && (dev->dvb->frontend != NULL)) {
struct dvb_tuner_ops *dops = &dev->dvb->frontend->ops.tuner_ops;
@@ -423,7 +421,7 @@ int cx231xx_set_analog_freq(struct cx231xx *dev, u32 freq)
}
- return status;
+ return 0;
}
int cx231xx_reset_analog_tuner(struct cx231xx *dev)
@@ -740,7 +738,7 @@ static int dvb_init(struct cx231xx *dev)
goto out_free;
}
- dev->dvb->frontend->ops.i2c_gate_ctrl = 0;
+ dev->dvb->frontend->ops.i2c_gate_ctrl = NULL;
/* define general-purpose callback pointer */
dvb->frontend->callback = cx231xx_tuner_callback;
@@ -773,7 +771,7 @@ static int dvb_init(struct cx231xx *dev)
goto out_free;
}
- dev->dvb->frontend->ops.i2c_gate_ctrl = 0;
+ dev->dvb->frontend->ops.i2c_gate_ctrl = NULL;
/* define general-purpose callback pointer */
dvb->frontend->callback = cx231xx_tuner_callback;
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index 66645b02c854..5b34323ad207 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -141,3 +141,10 @@ config DVB_USB_RTL28XXU
help
Say Y here to support the Realtek RTL28xxU DVB USB receiver.
+config DVB_USB_DVBSKY
+ tristate "DVBSky USB support"
+ depends on DVB_USB_V2
+ select DVB_M88DS3103 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_M88TS2022 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y here to support the USB receivers from DVBSky.
diff --git a/drivers/media/usb/dvb-usb-v2/Makefile b/drivers/media/usb/dvb-usb-v2/Makefile
index bc38f03394cd..f10d4df0eae5 100644
--- a/drivers/media/usb/dvb-usb-v2/Makefile
+++ b/drivers/media/usb/dvb-usb-v2/Makefile
@@ -37,6 +37,9 @@ obj-$(CONFIG_DVB_USB_MXL111SF) += mxl111sf-tuner.o
dvb-usb-rtl28xxu-objs := rtl28xxu.o
obj-$(CONFIG_DVB_USB_RTL28XXU) += dvb-usb-rtl28xxu.o
+dvb-usb-dvbsky-objs := dvbsky.o
+obj-$(CONFIG_DVB_USB_DVBSKY) += dvb-usb-dvbsky.o
+
ccflags-y += -I$(srctree)/drivers/media/dvb-core
ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
ccflags-y += -I$(srctree)/drivers/media/tuners
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 5ca738ab44e0..16c0b7d4f8e7 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -419,7 +419,7 @@ static int af9015_eeprom_hash(struct dvb_usb_device *d)
/* calculate checksum */
for (i = 0; i < AF9015_EEPROM_SIZE / sizeof(u32); i++) {
state->eeprom_sum *= GOLDEN_RATIO_PRIME_32;
- state->eeprom_sum += le32_to_cpu(((u32 *)buf)[i]);
+ state->eeprom_sum += le32_to_cpu(((__le32 *)buf)[i]);
}
for (i = 0; i < AF9015_EEPROM_SIZE; i += 16)
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index c82beac0e0cb..00758c83eec7 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -193,6 +193,92 @@ static int af9035_wr_reg_mask(struct dvb_usb_device *d, u32 reg, u8 val,
return af9035_wr_regs(d, reg, &val, 1);
}
+static int af9035_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr,
+ void *platform_data, struct i2c_adapter *adapter)
+{
+ int ret, num;
+ struct state *state = d_to_priv(d);
+ struct i2c_client *client;
+ struct i2c_board_info board_info = {
+ .addr = addr,
+ .platform_data = platform_data,
+ };
+
+ strlcpy(board_info.type, type, I2C_NAME_SIZE);
+
+ /* find first free client */
+ for (num = 0; num < AF9035_I2C_CLIENT_MAX; num++) {
+ if (state->i2c_client[num] == NULL)
+ break;
+ }
+
+ dev_dbg(&d->udev->dev, "%s: num=%d\n", __func__, num);
+
+ if (num == AF9035_I2C_CLIENT_MAX) {
+ dev_err(&d->udev->dev, "%s: I2C client out of index\n",
+ KBUILD_MODNAME);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ request_module(board_info.type);
+
+ /* register I2C device */
+ client = i2c_new_device(adapter, &board_info);
+ if (client == NULL || client->dev.driver == NULL) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* increase I2C driver usage count */
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ state->i2c_client[num] = client;
+ return 0;
+err:
+ dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static void af9035_del_i2c_dev(struct dvb_usb_device *d)
+{
+ int num;
+ struct state *state = d_to_priv(d);
+ struct i2c_client *client;
+
+ /* find last used client */
+ num = AF9035_I2C_CLIENT_MAX;
+ while (num--) {
+ if (state->i2c_client[num] != NULL)
+ break;
+ }
+
+ dev_dbg(&d->udev->dev, "%s: num=%d\n", __func__, num);
+
+ if (num == -1) {
+ dev_err(&d->udev->dev, "%s: I2C client out of index\n",
+ KBUILD_MODNAME);
+ goto err;
+ }
+
+ client = state->i2c_client[num];
+
+ /* decrease I2C driver usage count */
+ module_put(client->dev.driver->owner);
+
+ /* unregister I2C device */
+ i2c_unregister_device(client);
+
+ state->i2c_client[num] = NULL;
+ return;
+err:
+ dev_dbg(&d->udev->dev, "%s: failed\n", __func__);
+}
+
static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
struct i2c_msg msg[], int num)
{
@@ -204,7 +290,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
return -EAGAIN;
/*
- * I2C sub header is 5 bytes long. Meaning of those bytes are:
+ * AF9035 I2C sub header is 5 bytes long. Meaning of those bytes are:
* 0: data len
* 1: I2C addr << 1
* 2: reg addr len
@@ -218,110 +304,156 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
* NOTE: As a firmware knows tuner type there is very small possibility
* there could be some tuner I2C hacks done by firmware and this may
* lead problems if firmware expects those bytes are used.
+ *
+ * TODO: Here is few hacks. AF9035 chip integrates AF9033 demodulator.
+ * IT9135 chip integrates AF9033 demodulator and RF tuner. For dual
+ * tuner devices, there is also external AF9033 demodulator connected
+ * via external I2C bus. All AF9033 demod I2C traffic, both single and
+ * dual tuner configuration, is covered by firmware - actual USB IO
+ * looks just like a memory access.
+ * In case of IT913x chip, there is own tuner driver. It is implemented
+ * currently as a I2C driver, even tuner IP block is likely build
+ * directly into the demodulator memory space and there is no own I2C
+ * bus. I2C subsystem does not allow register multiple devices to same
+ * bus, having same slave address. Due to that we reuse demod address,
+ * shifted by one bit, on that case.
+ *
+ * For IT930x we use a different command and the sub header is
+ * different as well:
+ * 0: data len
+ * 1: I2C bus (0x03 seems to be only value used)
+ * 2: I2C addr << 1
*/
- if (num == 2 && !(msg[0].flags & I2C_M_RD) &&
- (msg[1].flags & I2C_M_RD)) {
+#define AF9035_IS_I2C_XFER_WRITE_READ(_msg, _num) \
+ (_num == 2 && !(_msg[0].flags & I2C_M_RD) && (_msg[1].flags & I2C_M_RD))
+#define AF9035_IS_I2C_XFER_WRITE(_msg, _num) \
+ (_num == 1 && !(_msg[0].flags & I2C_M_RD))
+#define AF9035_IS_I2C_XFER_READ(_msg, _num) \
+ (_num == 1 && (_msg[0].flags & I2C_M_RD))
+
+ if (AF9035_IS_I2C_XFER_WRITE_READ(msg, num)) {
if (msg[0].len > 40 || msg[1].len > 40) {
/* TODO: correct limits > 40 */
ret = -EOPNOTSUPP;
- } else if ((msg[0].addr == state->af9033_config[0].i2c_addr) ||
- (msg[0].addr == state->af9033_config[1].i2c_addr)) {
+ } else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ (msg[0].addr == state->af9033_i2c_addr[1]) ||
+ (state->chip_type == 0x9135)) {
/* demod access via firmware interface */
u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
- if (msg[0].addr == state->af9033_config[1].i2c_addr)
+ if (msg[0].addr == state->af9033_i2c_addr[1] ||
+ msg[0].addr == (state->af9033_i2c_addr[1] >> 1))
reg |= 0x100000;
ret = af9035_rd_regs(d, reg, &msg[1].buf[0],
msg[1].len);
} else {
- /* I2C */
+ /* I2C write + read */
u8 buf[MAX_XFER_SIZE];
struct usb_req req = { CMD_I2C_RD, 0, 5 + msg[0].len,
buf, msg[1].len, msg[1].buf };
- if (5 + msg[0].len > sizeof(buf)) {
- dev_warn(&d->udev->dev,
- "%s: i2c xfer: len=%d is too big!\n",
- KBUILD_MODNAME, msg[0].len);
- ret = -EOPNOTSUPP;
- goto unlock;
+ if (state->chip_type == 0x9306) {
+ req.cmd = CMD_GENERIC_I2C_RD;
+ req.wlen = 3 + msg[0].len;
}
req.mbox |= ((msg[0].addr & 0x80) >> 3);
+
buf[0] = msg[1].len;
- buf[1] = msg[0].addr << 1;
- buf[2] = 0x00; /* reg addr len */
- buf[3] = 0x00; /* reg addr MSB */
- buf[4] = 0x00; /* reg addr LSB */
- memcpy(&buf[5], msg[0].buf, msg[0].len);
+ if (state->chip_type == 0x9306) {
+ buf[1] = 0x03; /* I2C bus */
+ buf[2] = msg[0].addr << 1;
+ memcpy(&buf[3], msg[0].buf, msg[0].len);
+ } else {
+ buf[1] = msg[0].addr << 1;
+ buf[2] = 0x00; /* reg addr len */
+ buf[3] = 0x00; /* reg addr MSB */
+ buf[4] = 0x00; /* reg addr LSB */
+ memcpy(&buf[5], msg[0].buf, msg[0].len);
+ }
ret = af9035_ctrl_msg(d, &req);
}
- } else if (num == 1 && !(msg[0].flags & I2C_M_RD)) {
+ } else if (AF9035_IS_I2C_XFER_WRITE(msg, num)) {
if (msg[0].len > 40) {
/* TODO: correct limits > 40 */
ret = -EOPNOTSUPP;
- } else if ((msg[0].addr == state->af9033_config[0].i2c_addr) ||
- (msg[0].addr == state->af9033_config[1].i2c_addr)) {
+ } else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
+ (msg[0].addr == state->af9033_i2c_addr[1]) ||
+ (state->chip_type == 0x9135)) {
/* demod access via firmware interface */
u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
- if (msg[0].addr == state->af9033_config[1].i2c_addr)
+ if (msg[0].addr == state->af9033_i2c_addr[1] ||
+ msg[0].addr == (state->af9033_i2c_addr[1] >> 1))
reg |= 0x100000;
ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
msg[0].len - 3);
} else {
- /* I2C */
+ /* I2C write */
u8 buf[MAX_XFER_SIZE];
struct usb_req req = { CMD_I2C_WR, 0, 5 + msg[0].len,
buf, 0, NULL };
- if (5 + msg[0].len > sizeof(buf)) {
- dev_warn(&d->udev->dev,
- "%s: i2c xfer: len=%d is too big!\n",
- KBUILD_MODNAME, msg[0].len);
- ret = -EOPNOTSUPP;
- goto unlock;
+ if (state->chip_type == 0x9306) {
+ req.cmd = CMD_GENERIC_I2C_WR;
+ req.wlen = 3 + msg[0].len;
}
+
req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[0].len;
- buf[1] = msg[0].addr << 1;
- buf[2] = 0x00; /* reg addr len */
- buf[3] = 0x00; /* reg addr MSB */
- buf[4] = 0x00; /* reg addr LSB */
- memcpy(&buf[5], msg[0].buf, msg[0].len);
+ if (state->chip_type == 0x9306) {
+ buf[1] = 0x03; /* I2C bus */
+ buf[2] = msg[0].addr << 1;
+ memcpy(&buf[3], msg[0].buf, msg[0].len);
+ } else {
+ buf[1] = msg[0].addr << 1;
+ buf[2] = 0x00; /* reg addr len */
+ buf[3] = 0x00; /* reg addr MSB */
+ buf[4] = 0x00; /* reg addr LSB */
+ memcpy(&buf[5], msg[0].buf, msg[0].len);
+ }
ret = af9035_ctrl_msg(d, &req);
}
- } else if (num == 1 && (msg[0].flags & I2C_M_RD)) {
+ } else if (AF9035_IS_I2C_XFER_READ(msg, num)) {
if (msg[0].len > 40) {
/* TODO: correct limits > 40 */
ret = -EOPNOTSUPP;
} else {
- /* I2C */
+ /* I2C read */
u8 buf[5];
struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf),
- buf, msg[0].len, msg[0].buf };
+ buf, msg[0].len, msg[0].buf };
+
+ if (state->chip_type == 0x9306) {
+ req.cmd = CMD_GENERIC_I2C_RD;
+ req.wlen = 3;
+ }
req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[0].len;
- buf[1] = msg[0].addr << 1;
- buf[2] = 0x00; /* reg addr len */
- buf[3] = 0x00; /* reg addr MSB */
- buf[4] = 0x00; /* reg addr LSB */
+ if (state->chip_type == 0x9306) {
+ buf[1] = 0x03; /* I2C bus */
+ buf[2] = msg[0].addr << 1;
+ } else {
+ buf[1] = msg[0].addr << 1;
+ buf[2] = 0x00; /* reg addr len */
+ buf[3] = 0x00; /* reg addr MSB */
+ buf[4] = 0x00; /* reg addr LSB */
+ }
ret = af9035_ctrl_msg(d, &req);
}
} else {
/*
* We support only three kind of I2C transactions:
- * 1) 1 x read + 1 x write (repeated start)
+ * 1) 1 x write + 1 x read (repeated start)
* 2) 1 x write
* 3) 1 x read
*/
ret = -EOPNOTSUPP;
}
-unlock:
mutex_unlock(&d->i2c_mutex);
if (ret < 0)
@@ -371,6 +503,9 @@ static int af9035_identify_state(struct dvb_usb_device *d, const char **name)
else
*name = AF9035_FIRMWARE_IT9135_V1;
state->eeprom_addr = EEPROM_BASE_IT9135;
+ } else if (state->chip_type == 0x9306) {
+ *name = AF9035_FIRMWARE_IT9303;
+ state->eeprom_addr = EEPROM_BASE_IT9135;
} else {
*name = AF9035_FIRMWARE_AF9035;
state->eeprom_addr = EEPROM_BASE_AF9035;
@@ -536,6 +671,7 @@ static int af9035_download_firmware(struct dvb_usb_device *d,
u8 tmp;
struct usb_req req = { 0, 0, 0, NULL, 0, NULL };
struct usb_req req_fw_ver = { CMD_FW_QUERYINFO, 0, 1, wbuf, 4, rbuf };
+
dev_dbg(&d->udev->dev, "%s:\n", __func__);
/*
@@ -579,7 +715,8 @@ static int af9035_download_firmware(struct dvb_usb_device *d,
if (!tmp)
tmp = 0x3a;
- if (state->chip_type == 0x9135) {
+ if ((state->chip_type == 0x9135) ||
+ (state->chip_type == 0x9306)) {
ret = af9035_wr_reg(d, 0x004bfb, tmp);
if (ret < 0)
goto err;
@@ -640,23 +777,26 @@ static int af9035_read_config(struct dvb_usb_device *d)
u16 tmp16, addr;
/* demod I2C "address" */
- state->af9033_config[0].i2c_addr = 0x38;
- state->af9033_config[1].i2c_addr = 0x3a;
+ state->af9033_i2c_addr[0] = 0x38;
+ state->af9033_i2c_addr[1] = 0x3a;
state->af9033_config[0].adc_multiplier = AF9033_ADC_MULTIPLIER_2X;
state->af9033_config[1].adc_multiplier = AF9033_ADC_MULTIPLIER_2X;
state->af9033_config[0].ts_mode = AF9033_TS_MODE_USB;
state->af9033_config[1].ts_mode = AF9033_TS_MODE_SERIAL;
- /* eeprom memory mapped location */
if (state->chip_type == 0x9135) {
+ /* feed clock for integrated RF tuner */
+ state->af9033_config[0].dyn0_clk = true;
+ state->af9033_config[1].dyn0_clk = true;
+
if (state->chip_version == 0x02) {
state->af9033_config[0].tuner = AF9033_TUNER_IT9135_60;
state->af9033_config[1].tuner = AF9033_TUNER_IT9135_60;
- tmp16 = 0x00461d;
+ tmp16 = 0x00461d; /* eeprom memory mapped location */
} else {
state->af9033_config[0].tuner = AF9033_TUNER_IT9135_38;
state->af9033_config[1].tuner = AF9033_TUNER_IT9135_38;
- tmp16 = 0x00461b;
+ tmp16 = 0x00461b; /* eeprom memory mapped location */
}
/* check if eeprom exists */
@@ -668,8 +808,16 @@ static int af9035_read_config(struct dvb_usb_device *d)
dev_dbg(&d->udev->dev, "%s: no eeprom\n", __func__);
goto skip_eeprom;
}
+ } else if (state->chip_type == 0x9306) {
+ /*
+ * IT930x is an USB bridge, only single demod-single tuner
+ * configurations seen so far.
+ */
+ return 0;
}
+
+
/* check if there is dual tuners */
ret = af9035_rd_reg(d, state->eeprom_addr + EEPROM_TS_MODE, &tmp);
if (ret < 0)
@@ -690,7 +838,7 @@ static int af9035_read_config(struct dvb_usb_device *d)
goto err;
if (tmp)
- state->af9033_config[1].i2c_addr = tmp;
+ state->af9033_i2c_addr[1] = tmp;
dev_dbg(&d->udev->dev, "%s: 2nd demod I2C addr=%02x\n",
__func__, tmp);
@@ -799,25 +947,6 @@ static int af9035_read_config(struct dvb_usb_device *d)
addr += 0x10; /* shift for the 2nd tuner params */
}
- /*
- * These AVerMedia devices has a bad EEPROM content :-(
- * Override some wrong values here.
- */
- if (le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_AVERMEDIA) {
- switch (le16_to_cpu(d->udev->descriptor.idProduct)) {
- case USB_PID_AVERMEDIA_A835B_1835:
- case USB_PID_AVERMEDIA_A835B_2835:
- case USB_PID_AVERMEDIA_A835B_3835:
- dev_info(&d->udev->dev,
- "%s: overriding tuner from %02x to %02x\n",
- KBUILD_MODNAME, state->af9033_config[0].tuner,
- AF9033_TUNER_IT9135_60);
-
- state->af9033_config[0].tuner = AF9033_TUNER_IT9135_60;
- break;
- }
- }
-
skip_eeprom:
/* get demod clock */
ret = af9035_rd_reg(d, 0x00d800, &tmp);
@@ -990,6 +1119,7 @@ static int af9035_frontend_callback(void *adapter_priv, int component,
static int af9035_get_adapter_count(struct dvb_usb_device *d)
{
struct state *state = d_to_priv(d);
+
return state->dual_mode + 1;
}
@@ -998,7 +1128,8 @@ static int af9035_frontend_attach(struct dvb_usb_adapter *adap)
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
int ret;
- dev_dbg(&d->udev->dev, "%s:\n", __func__);
+
+ dev_dbg(&d->udev->dev, "%s: adap->id=%d\n", __func__, adap->id);
if (!state->af9033_config[adap->id].tuner) {
/* unsupported tuner */
@@ -1006,9 +1137,13 @@ static int af9035_frontend_attach(struct dvb_usb_adapter *adap)
goto err;
}
- /* attach demodulator */
- adap->fe[0] = dvb_attach(af9033_attach, &state->af9033_config[adap->id],
- &d->i2c_adap, &state->ops);
+ state->af9033_config[adap->id].fe = &adap->fe[0];
+ state->af9033_config[adap->id].ops = &state->ops;
+ ret = af9035_add_i2c_dev(d, "af9033", state->af9033_i2c_addr[adap->id],
+ &state->af9033_config[adap->id], &d->i2c_adap);
+ if (ret)
+ goto err;
+
if (adap->fe[0] == NULL) {
ret = -ENODEV;
goto err;
@@ -1026,6 +1161,78 @@ err:
return ret;
}
+static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct state *state = adap_to_priv(adap);
+ struct dvb_usb_device *d = adap_to_d(adap);
+ int ret;
+ struct si2168_config si2168_config;
+ struct i2c_adapter *adapter;
+
+ dev_dbg(&d->udev->dev, "adap->id=%d\n", adap->id);
+
+ si2168_config.i2c_adapter = &adapter;
+ si2168_config.fe = &adap->fe[0];
+ si2168_config.ts_mode = SI2168_TS_SERIAL;
+
+ state->af9033_config[adap->id].fe = &adap->fe[0];
+ state->af9033_config[adap->id].ops = &state->ops;
+ ret = af9035_add_i2c_dev(d, "si2168", 0x67, &si2168_config,
+ &d->i2c_adap);
+ if (ret)
+ goto err;
+
+ if (adap->fe[0] == NULL) {
+ ret = -ENODEV;
+ goto err;
+ }
+ state->i2c_adapter_demod = adapter;
+
+ return 0;
+
+err:
+ dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9035_frontend_detach(struct dvb_usb_adapter *adap)
+{
+ struct state *state = adap_to_priv(adap);
+ struct dvb_usb_device *d = adap_to_d(adap);
+ int demod2;
+
+ dev_dbg(&d->udev->dev, "%s: adap->id=%d\n", __func__, adap->id);
+
+ /*
+ * For dual tuner devices we have to resolve 2nd demod client, as there
+ * is two different kind of tuner drivers; one is using I2C binding
+ * and the other is using DVB attach/detach binding.
+ */
+ switch (state->af9033_config[adap->id].tuner) {
+ case AF9033_TUNER_IT9135_38:
+ case AF9033_TUNER_IT9135_51:
+ case AF9033_TUNER_IT9135_52:
+ case AF9033_TUNER_IT9135_60:
+ case AF9033_TUNER_IT9135_61:
+ case AF9033_TUNER_IT9135_62:
+ demod2 = 2;
+ break;
+ default:
+ demod2 = 1;
+ }
+
+ if (adap->id == 1) {
+ if (state->i2c_client[demod2])
+ af9035_del_i2c_dev(d);
+ } else if (adap->id == 0) {
+ if (state->i2c_client[0])
+ af9035_del_i2c_dev(d);
+ }
+
+ return 0;
+}
+
static struct tua9001_config af9035_tua9001_config = {
.i2c_addr = 0x60,
};
@@ -1084,7 +1291,8 @@ static int af9035_tuner_attach(struct dvb_usb_adapter *adap)
struct dvb_frontend *fe;
struct i2c_msg msg[1];
u8 tuner_addr;
- dev_dbg(&d->udev->dev, "%s:\n", __func__);
+
+ dev_dbg(&d->udev->dev, "%s: adap->id=%d\n", __func__, adap->id);
/*
* XXX: Hack used in that function: we abuse unused I2C address bit [7]
@@ -1243,14 +1451,53 @@ static int af9035_tuner_attach(struct dvb_usb_adapter *adap)
case AF9033_TUNER_IT9135_38:
case AF9033_TUNER_IT9135_51:
case AF9033_TUNER_IT9135_52:
+ {
+ struct it913x_config it913x_config = {
+ .fe = adap->fe[0],
+ .chip_ver = 1,
+ };
+
+ if (state->dual_mode) {
+ if (adap->id == 0)
+ it913x_config.role = IT913X_ROLE_DUAL_MASTER;
+ else
+ it913x_config.role = IT913X_ROLE_DUAL_SLAVE;
+ }
+
+ ret = af9035_add_i2c_dev(d, "it913x",
+ state->af9033_i2c_addr[adap->id] >> 1,
+ &it913x_config, &d->i2c_adap);
+ if (ret)
+ goto err;
+
+ fe = adap->fe[0];
+ break;
+ }
case AF9033_TUNER_IT9135_60:
case AF9033_TUNER_IT9135_61:
case AF9033_TUNER_IT9135_62:
- /* attach tuner */
- fe = dvb_attach(it913x_attach, adap->fe[0], &d->i2c_adap,
- state->af9033_config[adap->id].i2c_addr,
- state->af9033_config[0].tuner);
+ {
+ struct it913x_config it913x_config = {
+ .fe = adap->fe[0],
+ .chip_ver = 2,
+ };
+
+ if (state->dual_mode) {
+ if (adap->id == 0)
+ it913x_config.role = IT913X_ROLE_DUAL_MASTER;
+ else
+ it913x_config.role = IT913X_ROLE_DUAL_SLAVE;
+ }
+
+ ret = af9035_add_i2c_dev(d, "it913x",
+ state->af9033_i2c_addr[adap->id] >> 1,
+ &it913x_config, &d->i2c_adap);
+ if (ret)
+ goto err;
+
+ fe = adap->fe[0];
break;
+ }
default:
fe = NULL;
}
@@ -1268,6 +1515,119 @@ err:
return ret;
}
+static int it930x_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct state *state = adap_to_priv(adap);
+ struct dvb_usb_device *d = adap_to_d(adap);
+ int ret;
+ struct si2157_config si2157_config;
+
+ dev_dbg(&d->udev->dev, "%s: adap->id=%d\n", __func__, adap->id);
+
+ /* I2C master bus 2 clock speed 300k */
+ ret = af9035_wr_reg(d, 0x00f6a7, 0x07);
+ if (ret < 0)
+ goto err;
+
+ /* I2C master bus 1,3 clock speed 300k */
+ ret = af9035_wr_reg(d, 0x00f103, 0x07);
+ if (ret < 0)
+ goto err;
+
+ /* set gpio11 low */
+ ret = af9035_wr_reg_mask(d, 0xd8d4, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8d5, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8d3, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ /* Tuner enable using gpiot2_en, gpiot2_on and gpiot2_o (reset) */
+ ret = af9035_wr_reg_mask(d, 0xd8b8, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8b9, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8b7, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ msleep(200);
+
+ ret = af9035_wr_reg_mask(d, 0xd8b7, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ memset(&si2157_config, 0, sizeof(si2157_config));
+ si2157_config.fe = adap->fe[0];
+ ret = af9035_add_i2c_dev(d, "si2157", 0x63,
+ &si2157_config, state->i2c_adapter_demod);
+
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+
+static int it930x_tuner_detach(struct dvb_usb_adapter *adap)
+{
+ struct state *state = adap_to_priv(adap);
+ struct dvb_usb_device *d = adap_to_d(adap);
+
+ dev_dbg(&d->udev->dev, "adap->id=%d\n", adap->id);
+
+ if (adap->id == 1) {
+ if (state->i2c_client[3])
+ af9035_del_i2c_dev(d);
+ } else if (adap->id == 0) {
+ if (state->i2c_client[1])
+ af9035_del_i2c_dev(d);
+ }
+
+ return 0;
+}
+
+
+static int af9035_tuner_detach(struct dvb_usb_adapter *adap)
+{
+ struct state *state = adap_to_priv(adap);
+ struct dvb_usb_device *d = adap_to_d(adap);
+
+ dev_dbg(&d->udev->dev, "%s: adap->id=%d\n", __func__, adap->id);
+
+ switch (state->af9033_config[adap->id].tuner) {
+ case AF9033_TUNER_IT9135_38:
+ case AF9033_TUNER_IT9135_51:
+ case AF9033_TUNER_IT9135_52:
+ case AF9033_TUNER_IT9135_60:
+ case AF9033_TUNER_IT9135_61:
+ case AF9033_TUNER_IT9135_62:
+ if (adap->id == 1) {
+ if (state->i2c_client[3])
+ af9035_del_i2c_dev(d);
+ } else if (adap->id == 0) {
+ if (state->i2c_client[1])
+ af9035_del_i2c_dev(d);
+ }
+ }
+
+ return 0;
+}
+
static int af9035_init(struct dvb_usb_device *d)
{
struct state *state = d_to_priv(d);
@@ -1315,6 +1675,89 @@ err:
return ret;
}
+static int it930x_init(struct dvb_usb_device *d)
+{
+ struct state *state = d_to_priv(d);
+ int ret, i;
+ u16 frame_size = (d->udev->speed == USB_SPEED_FULL ? 5 : 816) * 188 / 4;
+ u8 packet_size = (d->udev->speed == USB_SPEED_FULL ? 64 : 512) / 4;
+ struct reg_val_mask tab[] = {
+ { 0x00da1a, 0x00, 0x01 }, /* ignore_sync_byte */
+ { 0x00f41f, 0x04, 0x04 }, /* dvbt_inten */
+ { 0x00da10, 0x00, 0x01 }, /* mpeg_full_speed */
+ { 0x00f41a, 0x01, 0x01 }, /* dvbt_en */
+ { 0x00da1d, 0x01, 0x01 }, /* mp2_sw_rst, reset EP4 */
+ { 0x00dd11, 0x00, 0x20 }, /* ep4_tx_en, disable EP4 */
+ { 0x00dd13, 0x00, 0x20 }, /* ep4_tx_nak, disable EP4 NAK */
+ { 0x00dd11, 0x20, 0x20 }, /* ep4_tx_en, enable EP4 */
+ { 0x00dd11, 0x00, 0x40 }, /* ep5_tx_en, disable EP5 */
+ { 0x00dd13, 0x00, 0x40 }, /* ep5_tx_nak, disable EP5 NAK */
+ { 0x00dd11, state->dual_mode << 6, 0x40 }, /* enable EP5 */
+ { 0x00dd88, (frame_size >> 0) & 0xff, 0xff},
+ { 0x00dd89, (frame_size >> 8) & 0xff, 0xff},
+ { 0x00dd0c, packet_size, 0xff},
+ { 0x00dd8a, (frame_size >> 0) & 0xff, 0xff},
+ { 0x00dd8b, (frame_size >> 8) & 0xff, 0xff},
+ { 0x00dd0d, packet_size, 0xff },
+ { 0x00da1d, 0x00, 0x01 }, /* mp2_sw_rst, disable */
+ { 0x00d833, 0x01, 0xff }, /* slew rate ctrl: slew rate boosts */
+ { 0x00d830, 0x00, 0xff }, /* Bit 0 of output driving control */
+ { 0x00d831, 0x01, 0xff }, /* Bit 1 of output driving control */
+ { 0x00d832, 0x00, 0xff }, /* Bit 2 of output driving control */
+
+ /* suspend gpio1 for TS-C */
+ { 0x00d8b0, 0x01, 0xff }, /* gpio1 */
+ { 0x00d8b1, 0x01, 0xff }, /* gpio1 */
+ { 0x00d8af, 0x00, 0xff }, /* gpio1 */
+
+ /* suspend gpio7 for TS-D */
+ { 0x00d8c4, 0x01, 0xff }, /* gpio7 */
+ { 0x00d8c5, 0x01, 0xff }, /* gpio7 */
+ { 0x00d8c3, 0x00, 0xff }, /* gpio7 */
+
+ /* suspend gpio13 for TS-B */
+ { 0x00d8dc, 0x01, 0xff }, /* gpio13 */
+ { 0x00d8dd, 0x01, 0xff }, /* gpio13 */
+ { 0x00d8db, 0x00, 0xff }, /* gpio13 */
+
+ /* suspend gpio14 for TS-E */
+ { 0x00d8e4, 0x01, 0xff }, /* gpio14 */
+ { 0x00d8e5, 0x01, 0xff }, /* gpio14 */
+ { 0x00d8e3, 0x00, 0xff }, /* gpio14 */
+
+ /* suspend gpio15 for TS-A */
+ { 0x00d8e8, 0x01, 0xff }, /* gpio15 */
+ { 0x00d8e9, 0x01, 0xff }, /* gpio15 */
+ { 0x00d8e7, 0x00, 0xff }, /* gpio15 */
+
+ { 0x00da58, 0x00, 0x01 }, /* ts_in_src, serial */
+ { 0x00da73, 0x01, 0xff }, /* ts0_aggre_mode */
+ { 0x00da78, 0x47, 0xff }, /* ts0_sync_byte */
+ { 0x00da4c, 0x01, 0xff }, /* ts0_en */
+ { 0x00da5a, 0x1f, 0xff }, /* ts_fail_ignore */
+ };
+
+ dev_dbg(&d->udev->dev,
+ "%s: USB speed=%d frame_size=%04x packet_size=%02x\n",
+ __func__, d->udev->speed, frame_size, packet_size);
+
+ /* init endpoints */
+ for (i = 0; i < ARRAY_SIZE(tab); i++) {
+ ret = af9035_wr_reg_mask(d, tab[i].reg,
+ tab[i].val, tab[i].mask);
+
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+
#if IS_ENABLED(CONFIG_RC_CORE)
static int af9035_rc_query(struct dvb_usb_device *d)
{
@@ -1409,6 +1852,7 @@ static int af9035_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
struct usb_data_stream_properties *stream)
{
struct dvb_usb_device *d = fe_to_d(fe);
+
dev_dbg(&d->udev->dev, "%s: adap=%d\n", __func__, fe_to_adap(fe)->id);
if (d->udev->speed == USB_SPEED_FULL)
@@ -1486,7 +1930,9 @@ static const struct dvb_usb_device_properties af9035_props = {
.i2c_algo = &af9035_i2c_algo,
.read_config = af9035_read_config,
.frontend_attach = af9035_frontend_attach,
+ .frontend_detach = af9035_frontend_detach,
.tuner_attach = af9035_tuner_attach,
+ .tuner_detach = af9035_tuner_detach,
.init = af9035_init,
.get_rc_config = af9035_get_rc_config,
.get_stream_config = af9035_get_stream_config,
@@ -1515,6 +1961,37 @@ static const struct dvb_usb_device_properties af9035_props = {
},
};
+static const struct dvb_usb_device_properties it930x_props = {
+ .driver_name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .adapter_nr = adapter_nr,
+ .size_of_priv = sizeof(struct state),
+
+ .generic_bulk_ctrl_endpoint = 0x02,
+ .generic_bulk_ctrl_endpoint_response = 0x81,
+
+ .identify_state = af9035_identify_state,
+ .download_firmware = af9035_download_firmware,
+
+ .i2c_algo = &af9035_i2c_algo,
+ .read_config = af9035_read_config,
+ .frontend_attach = it930x_frontend_attach,
+ .frontend_detach = af9035_frontend_detach,
+ .tuner_attach = it930x_tuner_attach,
+ .tuner_detach = it930x_tuner_detach,
+ .init = it930x_init,
+ .get_stream_config = af9035_get_stream_config,
+
+ .get_adapter_count = af9035_get_adapter_count,
+ .adapter = {
+ {
+ .stream = DVB_USB_STREAM_BULK(0x84, 4, 816 * 188),
+ }, {
+ .stream = DVB_USB_STREAM_BULK(0x85, 4, 816 * 188),
+ },
+ },
+};
+
static const struct usb_device_id af9035_id_table[] = {
/* AF9035 devices */
{ DVB_USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_9035,
@@ -1568,17 +2045,21 @@ static const struct usb_device_id af9035_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CTVDIGDUAL_V2,
&af9035_props, "Digital Dual TV Receiver CTVDIGDUAL_V2",
RC_MAP_IT913X_V1) },
+ /* IT930x devices */
+ { DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9303,
+ &it930x_props, "ITE 9303 Generic", NULL) },
/* XXX: that same ID [0ccd:0099] is used by af9015 driver too */
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099,
- &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
+ &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)",
+ NULL) },
{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
&af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
{ DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
&af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
{ DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_78E,
- &af9035_props, "PCTV 78e", RC_MAP_IT913X_V1) },
+ &af9035_props, "PCTV AndroiDTV (78e)", RC_MAP_IT913X_V1) },
{ DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_79E,
- &af9035_props, "PCTV 79e", RC_MAP_IT913X_V2) },
+ &af9035_props, "PCTV microStick (79e)", RC_MAP_IT913X_V2) },
{ }
};
MODULE_DEVICE_TABLE(usb, af9035_id_table);
@@ -1603,3 +2084,4 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE(AF9035_FIRMWARE_AF9035);
MODULE_FIRMWARE(AF9035_FIRMWARE_IT9135_V1);
MODULE_FIRMWARE(AF9035_FIRMWARE_IT9135_V2);
+MODULE_FIRMWARE(AF9035_FIRMWARE_IT9303);
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.h b/drivers/media/usb/dvb-usb-v2/af9035.h
index c21902fdd4c4..416a97f05ec8 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.h
+++ b/drivers/media/usb/dvb-usb-v2/af9035.h
@@ -30,7 +30,9 @@
#include "mxl5007t.h"
#include "tda18218.h"
#include "fc2580.h"
-#include "tuner_it913x.h"
+#include "it913x.h"
+#include "si2168.h"
+#include "si2157.h"
struct reg_val {
u32 reg;
@@ -61,9 +63,12 @@ struct state {
u16 chip_type;
u8 dual_mode:1;
u16 eeprom_addr;
+ u8 af9033_i2c_addr[2];
struct af9033_config af9033_config[2];
-
struct af9033_ops ops;
+ #define AF9035_I2C_CLIENT_MAX 4
+ struct i2c_client *i2c_client[AF9035_I2C_CLIENT_MAX];
+ struct i2c_adapter *i2c_adapter_demod;
};
static const u32 clock_lut_af9035[] = {
@@ -97,6 +102,7 @@ static const u32 clock_lut_it9135[] = {
#define AF9035_FIRMWARE_AF9035 "dvb-usb-af9035-02.fw"
#define AF9035_FIRMWARE_IT9135_V1 "dvb-usb-it9135-01.fw"
#define AF9035_FIRMWARE_IT9135_V2 "dvb-usb-it9135-02.fw"
+#define AF9035_FIRMWARE_IT9303 "dvb-usb-it9303-01.fw"
/*
* eeprom is memory mapped as read only. Writing that memory mapped address
@@ -138,5 +144,7 @@ static const u32 clock_lut_it9135[] = {
#define CMD_FW_DL_BEGIN 0x24
#define CMD_FW_DL_END 0x25
#define CMD_FW_SCATTER_WR 0x29
+#define CMD_GENERIC_I2C_RD 0x2a
+#define CMD_GENERIC_I2C_WR 0x2b
#endif
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index e4a2382196f0..d3c5f230e97a 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -332,7 +332,6 @@ static struct tda10023_config anysee_tda10023_tda18212_config = {
};
static struct tda18212_config anysee_tda18212_config = {
- .i2c_address = (0xc0 >> 1),
.if_dvbt_6 = 4150,
.if_dvbt_7 = 4150,
.if_dvbt_8 = 4150,
@@ -340,7 +339,6 @@ static struct tda18212_config anysee_tda18212_config = {
};
static struct tda18212_config anysee_tda18212_config2 = {
- .i2c_address = 0x60 /* (0xc0 >> 1) */,
.if_dvbt_6 = 3550,
.if_dvbt_7 = 3700,
.if_dvbt_8 = 4150,
@@ -632,6 +630,92 @@ error:
return ret;
}
+static int anysee_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr,
+ void *platform_data)
+{
+ int ret, num;
+ struct anysee_state *state = d_to_priv(d);
+ struct i2c_client *client;
+ struct i2c_adapter *adapter = &d->i2c_adap;
+ struct i2c_board_info board_info = {
+ .addr = addr,
+ .platform_data = platform_data,
+ };
+
+ strlcpy(board_info.type, type, I2C_NAME_SIZE);
+
+ /* find first free client */
+ for (num = 0; num < ANYSEE_I2C_CLIENT_MAX; num++) {
+ if (state->i2c_client[num] == NULL)
+ break;
+ }
+
+ dev_dbg(&d->udev->dev, "%s: num=%d\n", __func__, num);
+
+ if (num == ANYSEE_I2C_CLIENT_MAX) {
+ dev_err(&d->udev->dev, "%s: I2C client out of index\n",
+ KBUILD_MODNAME);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ request_module(board_info.type);
+
+ /* register I2C device */
+ client = i2c_new_device(adapter, &board_info);
+ if (client == NULL || client->dev.driver == NULL) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* increase I2C driver usage count */
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ state->i2c_client[num] = client;
+ return 0;
+err:
+ dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static void anysee_del_i2c_dev(struct dvb_usb_device *d)
+{
+ int num;
+ struct anysee_state *state = d_to_priv(d);
+ struct i2c_client *client;
+
+ /* find last used client */
+ num = ANYSEE_I2C_CLIENT_MAX;
+ while (num--) {
+ if (state->i2c_client[num] != NULL)
+ break;
+ }
+
+ dev_dbg(&d->udev->dev, "%s: num=%d\n", __func__, num);
+
+ if (num == -1) {
+ dev_err(&d->udev->dev, "%s: I2C client out of index\n",
+ KBUILD_MODNAME);
+ goto err;
+ }
+
+ client = state->i2c_client[num];
+
+ /* decrease I2C driver usage count */
+ module_put(client->dev.driver->owner);
+
+ /* unregister I2C device */
+ i2c_unregister_device(client);
+
+ state->i2c_client[num] = NULL;
+err:
+ dev_dbg(&d->udev->dev, "%s: failed\n", __func__);
+}
+
static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
{
struct anysee_state *state = adap_to_priv(adap);
@@ -640,12 +724,12 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
u8 tmp;
struct i2c_msg msg[2] = {
{
- .addr = anysee_tda18212_config.i2c_address,
+ .addr = 0x60,
.flags = 0,
.len = 1,
.buf = "\x00",
}, {
- .addr = anysee_tda18212_config.i2c_address,
+ .addr = 0x60,
.flags = I2C_M_RD,
.len = 1,
.buf = &tmp,
@@ -723,9 +807,11 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
/* probe TDA18212 */
tmp = 0;
ret = i2c_transfer(&d->i2c_adap, msg, 2);
- if (ret == 2 && tmp == 0xc7)
+ if (ret == 2 && tmp == 0xc7) {
dev_dbg(&d->udev->dev, "%s: TDA18212 found\n",
__func__);
+ state->has_tda18212 = true;
+ }
else
tmp = 0;
@@ -939,46 +1025,63 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
* fails attach old simple PLL. */
/* attach tuner */
- fe = dvb_attach(tda18212_attach, adap->fe[0], &d->i2c_adap,
- &anysee_tda18212_config);
+ if (state->has_tda18212) {
+ struct tda18212_config tda18212_config =
+ anysee_tda18212_config;
- if (fe && adap->fe[1]) {
- /* attach tuner for 2nd FE */
- fe = dvb_attach(tda18212_attach, adap->fe[1],
- &d->i2c_adap, &anysee_tda18212_config);
- break;
- } else if (fe) {
- break;
- }
-
- /* attach tuner */
- fe = dvb_attach(dvb_pll_attach, adap->fe[0], (0xc0 >> 1),
- &d->i2c_adap, DVB_PLL_SAMSUNG_DTOS403IH102A);
+ tda18212_config.fe = adap->fe[0];
+ ret = anysee_add_i2c_dev(d, "tda18212", 0x60,
+ &tda18212_config);
+ if (ret)
+ goto err;
+
+ /* copy tuner ops for 2nd FE as tuner is shared */
+ if (adap->fe[1]) {
+ adap->fe[1]->tuner_priv =
+ adap->fe[0]->tuner_priv;
+ memcpy(&adap->fe[1]->ops.tuner_ops,
+ &adap->fe[0]->ops.tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ }
- if (fe && adap->fe[1]) {
- /* attach tuner for 2nd FE */
- fe = dvb_attach(dvb_pll_attach, adap->fe[1],
+ return 0;
+ } else {
+ /* attach tuner */
+ fe = dvb_attach(dvb_pll_attach, adap->fe[0],
(0xc0 >> 1), &d->i2c_adap,
DVB_PLL_SAMSUNG_DTOS403IH102A);
+
+ if (fe && adap->fe[1]) {
+ /* attach tuner for 2nd FE */
+ fe = dvb_attach(dvb_pll_attach, adap->fe[1],
+ (0xc0 >> 1), &d->i2c_adap,
+ DVB_PLL_SAMSUNG_DTOS403IH102A);
+ }
}
break;
case ANYSEE_HW_508TC: /* 18 */
case ANYSEE_HW_508PTC: /* 21 */
+ {
/* E7 TC */
/* E7 PTC */
+ struct tda18212_config tda18212_config = anysee_tda18212_config;
- /* attach tuner */
- fe = dvb_attach(tda18212_attach, adap->fe[0], &d->i2c_adap,
- &anysee_tda18212_config);
-
- if (fe) {
- /* attach tuner for 2nd FE */
- fe = dvb_attach(tda18212_attach, adap->fe[1],
- &d->i2c_adap, &anysee_tda18212_config);
+ tda18212_config.fe = adap->fe[0];
+ ret = anysee_add_i2c_dev(d, "tda18212", 0x60, &tda18212_config);
+ if (ret)
+ goto err;
+
+ /* copy tuner ops for 2nd FE as tuner is shared */
+ if (adap->fe[1]) {
+ adap->fe[1]->tuner_priv = adap->fe[0]->tuner_priv;
+ memcpy(&adap->fe[1]->ops.tuner_ops,
+ &adap->fe[0]->ops.tuner_ops,
+ sizeof(struct dvb_tuner_ops));
}
- break;
+ return 0;
+ }
case ANYSEE_HW_508S2: /* 19 */
case ANYSEE_HW_508PS2: /* 22 */
/* E7 S2 */
@@ -997,13 +1100,18 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
break;
case ANYSEE_HW_508T2C: /* 20 */
+ {
/* E7 T2C */
+ struct tda18212_config tda18212_config =
+ anysee_tda18212_config2;
- /* attach tuner */
- fe = dvb_attach(tda18212_attach, adap->fe[0], &d->i2c_adap,
- &anysee_tda18212_config2);
+ tda18212_config.fe = adap->fe[0];
+ ret = anysee_add_i2c_dev(d, "tda18212", 0x60, &tda18212_config);
+ if (ret)
+ goto err;
- break;
+ return 0;
+ }
default:
fe = NULL;
}
@@ -1012,7 +1120,7 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
ret = 0;
else
ret = -ENODEV;
-
+err:
return ret;
}
@@ -1270,6 +1378,11 @@ static int anysee_init(struct dvb_usb_device *d)
static void anysee_exit(struct dvb_usb_device *d)
{
+ struct anysee_state *state = d_to_priv(d);
+
+ if (state->i2c_client[0])
+ anysee_del_i2c_dev(d);
+
return anysee_ci_release(d);
}
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.h b/drivers/media/usb/dvb-usb-v2/anysee.h
index 8f426d9fc6e1..3ca2bca4ebaf 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.h
+++ b/drivers/media/usb/dvb-usb-v2/anysee.h
@@ -55,8 +55,11 @@ struct anysee_state {
u8 buf[64];
u8 seq;
u8 hw; /* PCB ID */
+ #define ANYSEE_I2C_CLIENT_MAX 1
+ struct i2c_client *i2c_client[ANYSEE_I2C_CLIENT_MAX];
u8 fe_id:1; /* frondend ID */
u8 has_ci:1;
+ u8 has_tda18212:1;
u8 ci_attached:1;
struct dvb_ca_en50221 ci;
unsigned long ci_cam_ready; /* jiffies */
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index 124b4baa7e97..14e111e13e54 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -214,6 +214,7 @@ struct dvb_usb_adapter_properties {
* @read_config: called to resolve device configuration
* @read_mac_address: called to resolve adapter mac-address
* @frontend_attach: called to attach the possible frontends
+ * @frontend_detach: called to detach the possible frontends
* @tuner_attach: called to attach the possible tuners
* @frontend_ctrl: called to power on/off active frontend
* @streaming_ctrl: called to start/stop the usb streaming of adapter
@@ -254,7 +255,9 @@ struct dvb_usb_device_properties {
int (*read_config) (struct dvb_usb_device *d);
int (*read_mac_address) (struct dvb_usb_adapter *, u8 []);
int (*frontend_attach) (struct dvb_usb_adapter *);
+ int (*frontend_detach)(struct dvb_usb_adapter *);
int (*tuner_attach) (struct dvb_usb_adapter *);
+ int (*tuner_detach)(struct dvb_usb_adapter *);
int (*frontend_ctrl) (struct dvb_frontend *, int);
int (*streaming_ctrl) (struct dvb_frontend *, int);
int (*init) (struct dvb_usb_device *);
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index 2e90310be2af..1950f37df835 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -21,7 +21,7 @@
#include "dvb_usb_common.h"
-int dvb_usbv2_disable_rc_polling;
+static int dvb_usbv2_disable_rc_polling;
module_param_named(disable_rc_polling, dvb_usbv2_disable_rc_polling, int, 0644);
MODULE_PARM_DESC(disable_rc_polling,
"disable remote control polling (default: 0)");
@@ -664,9 +664,10 @@ err:
static int dvb_usbv2_adapter_frontend_exit(struct dvb_usb_adapter *adap)
{
- int i;
- dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
- adap->id);
+ int ret, i;
+ struct dvb_usb_device *d = adap_to_d(adap);
+
+ dev_dbg(&d->udev->dev, "%s: adap=%d\n", __func__, adap->id);
for (i = MAX_NO_OF_FE_PER_ADAP - 1; i >= 0; i--) {
if (adap->fe[i]) {
@@ -675,6 +676,23 @@ static int dvb_usbv2_adapter_frontend_exit(struct dvb_usb_adapter *adap)
}
}
+ if (d->props->tuner_detach) {
+ ret = d->props->tuner_detach(adap);
+ if (ret < 0) {
+ dev_dbg(&d->udev->dev, "%s: tuner_detach() failed=%d\n",
+ __func__, ret);
+ }
+ }
+
+ if (d->props->frontend_detach) {
+ ret = d->props->frontend_detach(adap);
+ if (ret < 0) {
+ dev_dbg(&d->udev->dev,
+ "%s: frontend_detach() failed=%d\n",
+ __func__, ret);
+ }
+ }
+
return 0;
}
@@ -762,9 +780,9 @@ static int dvb_usbv2_adapter_exit(struct dvb_usb_device *d)
for (i = MAX_NO_OF_ADAPTER_PER_DEVICE - 1; i >= 0; i--) {
if (d->adapter[i].props) {
- dvb_usbv2_adapter_frontend_exit(&d->adapter[i]);
dvb_usbv2_adapter_dvb_exit(&d->adapter[i]);
dvb_usbv2_adapter_stream_exit(&d->adapter[i]);
+ dvb_usbv2_adapter_frontend_exit(&d->adapter[i]);
}
}
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c
index 33ff97e708e3..22bdce15ecf3 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c
@@ -26,7 +26,7 @@ static int dvb_usb_v2_generic_io(struct dvb_usb_device *d,
{
int ret, actual_length;
- if (!d || !wbuf || !wlen || !d->props->generic_bulk_ctrl_endpoint ||
+ if (!wbuf || !wlen || !d->props->generic_bulk_ctrl_endpoint ||
!d->props->generic_bulk_ctrl_endpoint_response) {
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, -EINVAL);
return -EINVAL;
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
new file mode 100644
index 000000000000..34688c89df11
--- /dev/null
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -0,0 +1,460 @@
+/*
+ * Driver for DVBSky USB2.0 receiver
+ *
+ * Copyright (C) 2013 Max nibble <nibble.max@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "dvb_usb.h"
+#include "m88ds3103.h"
+#include "m88ts2022.h"
+
+#define DVBSKY_MSG_DELAY 0/*2000*/
+#define DVBSKY_BUF_LEN 64
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+struct dvbsky_state {
+ struct mutex stream_mutex;
+ u8 ibuf[DVBSKY_BUF_LEN];
+ u8 obuf[DVBSKY_BUF_LEN];
+ u8 last_lock;
+ struct i2c_client *i2c_client_tuner;
+
+ /* fe hook functions*/
+ int (*fe_set_voltage)(struct dvb_frontend *fe,
+ fe_sec_voltage_t voltage);
+ int (*fe_read_status)(struct dvb_frontend *fe,
+ fe_status_t *status);
+};
+
+static int dvbsky_usb_generic_rw(struct dvb_usb_device *d,
+ u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
+{
+ int ret;
+ struct dvbsky_state *state = d_to_priv(d);
+
+ mutex_lock(&d->usb_mutex);
+ if (wlen != 0)
+ memcpy(state->obuf, wbuf, wlen);
+
+ ret = dvb_usbv2_generic_rw_locked(d, state->obuf, wlen,
+ state->ibuf, rlen);
+
+ if (!ret && (rlen != 0))
+ memcpy(rbuf, state->ibuf, rlen);
+
+ mutex_unlock(&d->usb_mutex);
+ return ret;
+}
+
+static int dvbsky_stream_ctrl(struct dvb_usb_device *d, u8 onoff)
+{
+ struct dvbsky_state *state = d_to_priv(d);
+ int ret;
+ u8 obuf_pre[3] = { 0x37, 0, 0 };
+ u8 obuf_post[3] = { 0x36, 3, 0 };
+
+ mutex_lock(&state->stream_mutex);
+ ret = dvbsky_usb_generic_rw(d, obuf_pre, 3, NULL, 0);
+ if (!ret && onoff) {
+ msleep(20);
+ ret = dvbsky_usb_generic_rw(d, obuf_post, 3, NULL, 0);
+ }
+ mutex_unlock(&state->stream_mutex);
+ return ret;
+}
+
+static int dvbsky_streaming_ctrl(struct dvb_frontend *fe, int onoff)
+{
+ struct dvb_usb_device *d = fe_to_d(fe);
+
+ return dvbsky_stream_ctrl(d, (onoff == 0) ? 0 : 1);
+}
+
+/* GPIO */
+static int dvbsky_gpio_ctrl(struct dvb_usb_device *d, u8 gport, u8 value)
+{
+ int ret;
+ u8 obuf[3], ibuf[2];
+
+ obuf[0] = 0x0e;
+ obuf[1] = gport;
+ obuf[2] = value;
+ ret = dvbsky_usb_generic_rw(d, obuf, 3, ibuf, 1);
+ if (ret)
+ dev_err(&d->udev->dev, "%s: %s() failed=%d\n",
+ KBUILD_MODNAME, __func__, ret);
+ return ret;
+}
+
+/* I2C */
+static int dvbsky_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ int num)
+{
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ int ret = 0;
+ u8 ibuf[64], obuf[64];
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ if (num > 2) {
+ dev_err(&d->udev->dev,
+ "dvbsky_usb: too many i2c messages[%d] than 2.", num);
+ ret = -EOPNOTSUPP;
+ goto i2c_error;
+ }
+
+ if (num == 1) {
+ if (msg[0].len > 60) {
+ dev_err(&d->udev->dev,
+ "dvbsky_usb: too many i2c bytes[%d] than 60.",
+ msg[0].len);
+ ret = -EOPNOTSUPP;
+ goto i2c_error;
+ }
+ if (msg[0].flags & I2C_M_RD) {
+ /* single read */
+ obuf[0] = 0x09;
+ obuf[1] = 0;
+ obuf[2] = msg[0].len;
+ obuf[3] = msg[0].addr;
+ ret = dvbsky_usb_generic_rw(d, obuf, 4,
+ ibuf, msg[0].len + 1);
+ if (ret)
+ dev_err(&d->udev->dev, "%s: %s() failed=%d\n",
+ KBUILD_MODNAME, __func__, ret);
+ if (!ret)
+ memcpy(msg[0].buf, &ibuf[1], msg[0].len);
+ } else {
+ /* write */
+ obuf[0] = 0x08;
+ obuf[1] = msg[0].addr;
+ obuf[2] = msg[0].len;
+ memcpy(&obuf[3], msg[0].buf, msg[0].len);
+ ret = dvbsky_usb_generic_rw(d, obuf,
+ msg[0].len + 3, ibuf, 1);
+ if (ret)
+ dev_err(&d->udev->dev, "%s: %s() failed=%d\n",
+ KBUILD_MODNAME, __func__, ret);
+ }
+ } else {
+ if ((msg[0].len > 60) || (msg[1].len > 60)) {
+ dev_err(&d->udev->dev,
+ "dvbsky_usb: too many i2c bytes[w-%d][r-%d] than 60.",
+ msg[0].len, msg[1].len);
+ ret = -EOPNOTSUPP;
+ goto i2c_error;
+ }
+ /* write then read */
+ obuf[0] = 0x09;
+ obuf[1] = msg[0].len;
+ obuf[2] = msg[1].len;
+ obuf[3] = msg[0].addr;
+ memcpy(&obuf[4], msg[0].buf, msg[0].len);
+ ret = dvbsky_usb_generic_rw(d, obuf,
+ msg[0].len + 4, ibuf, msg[1].len + 1);
+ if (ret)
+ dev_err(&d->udev->dev, "%s: %s() failed=%d\n",
+ KBUILD_MODNAME, __func__, ret);
+
+ if (!ret)
+ memcpy(msg[1].buf, &ibuf[1], msg[1].len);
+ }
+i2c_error:
+ mutex_unlock(&d->i2c_mutex);
+ return (ret) ? ret : num;
+}
+
+static u32 dvbsky_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
+
+static struct i2c_algorithm dvbsky_i2c_algo = {
+ .master_xfer = dvbsky_i2c_xfer,
+ .functionality = dvbsky_i2c_func,
+};
+
+#if IS_ENABLED(CONFIG_RC_CORE)
+static int dvbsky_rc_query(struct dvb_usb_device *d)
+{
+ u32 code = 0xffff, scancode;
+ u8 rc5_command, rc5_system;
+ u8 obuf[2], ibuf[2], toggle;
+ int ret;
+
+ obuf[0] = 0x10;
+ ret = dvbsky_usb_generic_rw(d, obuf, 1, ibuf, 2);
+ if (ret)
+ dev_err(&d->udev->dev, "%s: %s() failed=%d\n",
+ KBUILD_MODNAME, __func__, ret);
+ if (ret == 0)
+ code = (ibuf[0] << 8) | ibuf[1];
+ if (code != 0xffff) {
+ dev_dbg(&d->udev->dev, "rc code: %x\n", code);
+ rc5_command = code & 0x3F;
+ rc5_system = (code & 0x7C0) >> 6;
+ toggle = (code & 0x800) ? 1 : 0;
+ scancode = rc5_system << 8 | rc5_command;
+ rc_keydown(d->rc_dev, RC_TYPE_RC5, scancode, toggle);
+ }
+ return 0;
+}
+
+static int dvbsky_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
+{
+ rc->allowed_protos = RC_BIT_RC5;
+ rc->query = dvbsky_rc_query;
+ rc->interval = 300;
+ return 0;
+}
+#else
+ #define dvbsky_get_rc_config NULL
+#endif
+
+static int dvbsky_usb_set_voltage(struct dvb_frontend *fe,
+ fe_sec_voltage_t voltage)
+{
+ struct dvb_usb_device *d = fe_to_d(fe);
+ struct dvbsky_state *state = d_to_priv(d);
+ u8 value;
+
+ if (voltage == SEC_VOLTAGE_OFF)
+ value = 0;
+ else
+ value = 1;
+ dvbsky_gpio_ctrl(d, 0x80, value);
+
+ return state->fe_set_voltage(fe, voltage);
+}
+
+static int dvbsky_read_mac_addr(struct dvb_usb_adapter *adap, u8 mac[6])
+{
+ struct dvb_usb_device *d = adap_to_d(adap);
+ u8 obuf[] = { 0x1e, 0x00 };
+ u8 ibuf[6] = { 0 };
+ struct i2c_msg msg[] = {
+ {
+ .addr = 0x51,
+ .flags = 0,
+ .buf = obuf,
+ .len = 2,
+ }, {
+ .addr = 0x51,
+ .flags = I2C_M_RD,
+ .buf = ibuf,
+ .len = 6,
+ }
+ };
+
+ if (i2c_transfer(&d->i2c_adap, msg, 2) == 2)
+ memcpy(mac, ibuf, 6);
+
+ dev_info(&d->udev->dev, "dvbsky_usb MAC address=%pM\n", mac);
+
+ return 0;
+}
+
+static int dvbsky_usb_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct dvb_usb_device *d = fe_to_d(fe);
+ struct dvbsky_state *state = d_to_priv(d);
+ int ret;
+
+ ret = state->fe_read_status(fe, status);
+
+ /* it need resync slave fifo when signal change from unlock to lock.*/
+ if ((*status & FE_HAS_LOCK) && (!state->last_lock))
+ dvbsky_stream_ctrl(d, 1);
+
+ state->last_lock = (*status & FE_HAS_LOCK) ? 1 : 0;
+ return ret;
+}
+
+static const struct m88ds3103_config dvbsky_s960_m88ds3103_config = {
+ .i2c_addr = 0x68,
+ .clock = 27000000,
+ .i2c_wr_max = 33,
+ .clock_out = 0,
+ .ts_mode = M88DS3103_TS_CI,
+ .ts_clk = 16000,
+ .ts_clk_pol = 0,
+ .agc = 0x99,
+ .lnb_hv_pol = 1,
+ .lnb_en_pol = 1,
+};
+
+static int dvbsky_s960_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvbsky_state *state = adap_to_priv(adap);
+ struct dvb_usb_device *d = adap_to_d(adap);
+ int ret = 0;
+ /* demod I2C adapter */
+ struct i2c_adapter *i2c_adapter;
+ struct i2c_client *client;
+ struct i2c_board_info info;
+ struct m88ts2022_config m88ts2022_config = {
+ .clock = 27000000,
+ };
+ memset(&info, 0, sizeof(struct i2c_board_info));
+
+ /* attach demod */
+ adap->fe[0] = dvb_attach(m88ds3103_attach,
+ &dvbsky_s960_m88ds3103_config,
+ &d->i2c_adap,
+ &i2c_adapter);
+ if (!adap->fe[0]) {
+ dev_err(&d->udev->dev, "dvbsky_s960_attach fail.\n");
+ ret = -ENODEV;
+ goto fail_attach;
+ }
+
+ /* attach tuner */
+ m88ts2022_config.fe = adap->fe[0];
+ strlcpy(info.type, "m88ts2022", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &m88ts2022_config;
+ request_module("m88ts2022");
+ client = i2c_new_device(i2c_adapter, &info);
+ if (client == NULL || client->dev.driver == NULL) {
+ dvb_frontend_detach(adap->fe[0]);
+ ret = -ENODEV;
+ goto fail_attach;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ dvb_frontend_detach(adap->fe[0]);
+ ret = -ENODEV;
+ goto fail_attach;
+ }
+
+ /* delegate signal strength measurement to tuner */
+ adap->fe[0]->ops.read_signal_strength =
+ adap->fe[0]->ops.tuner_ops.get_rf_strength;
+
+ /* hook fe: need to resync the slave fifo when signal locks. */
+ state->fe_read_status = adap->fe[0]->ops.read_status;
+ adap->fe[0]->ops.read_status = dvbsky_usb_read_status;
+
+ /* hook fe: LNB off/on is control by Cypress usb chip. */
+ state->fe_set_voltage = adap->fe[0]->ops.set_voltage;
+ adap->fe[0]->ops.set_voltage = dvbsky_usb_set_voltage;
+
+ state->i2c_client_tuner = client;
+
+fail_attach:
+ return ret;
+}
+
+static int dvbsky_identify_state(struct dvb_usb_device *d, const char **name)
+{
+ dvbsky_gpio_ctrl(d, 0x04, 1);
+ msleep(20);
+ dvbsky_gpio_ctrl(d, 0x83, 0);
+ dvbsky_gpio_ctrl(d, 0xc0, 1);
+ msleep(100);
+ dvbsky_gpio_ctrl(d, 0x83, 1);
+ dvbsky_gpio_ctrl(d, 0xc0, 0);
+ msleep(50);
+
+ return WARM;
+}
+
+static int dvbsky_init(struct dvb_usb_device *d)
+{
+ struct dvbsky_state *state = d_to_priv(d);
+
+ /* use default interface */
+ /*
+ ret = usb_set_interface(d->udev, 0, 0);
+ if (ret)
+ return ret;
+ */
+ mutex_init(&state->stream_mutex);
+
+ state->last_lock = 0;
+
+ return 0;
+}
+
+static void dvbsky_exit(struct dvb_usb_device *d)
+{
+ struct dvbsky_state *state = d_to_priv(d);
+ struct i2c_client *client;
+
+ client = state->i2c_client_tuner;
+ /* remove I2C tuner */
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+}
+
+/* DVB USB Driver stuff */
+static struct dvb_usb_device_properties dvbsky_s960_props = {
+ .driver_name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .adapter_nr = adapter_nr,
+ .size_of_priv = sizeof(struct dvbsky_state),
+
+ .generic_bulk_ctrl_endpoint = 0x01,
+ .generic_bulk_ctrl_endpoint_response = 0x81,
+ .generic_bulk_ctrl_delay = DVBSKY_MSG_DELAY,
+
+ .i2c_algo = &dvbsky_i2c_algo,
+ .frontend_attach = dvbsky_s960_attach,
+ .init = dvbsky_init,
+ .get_rc_config = dvbsky_get_rc_config,
+ .streaming_ctrl = dvbsky_streaming_ctrl,
+ .identify_state = dvbsky_identify_state,
+ .exit = dvbsky_exit,
+ .read_mac_address = dvbsky_read_mac_addr,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .stream = DVB_USB_STREAM_BULK(0x82, 8, 4096),
+ }
+ }
+};
+
+static const struct usb_device_id dvbsky_id_table[] = {
+ { DVB_USB_DEVICE(0x0572, 0x6831,
+ &dvbsky_s960_props, "DVBSky S960/S860", RC_MAP_DVBSKY) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, dvbsky_id_table);
+
+static struct usb_driver dvbsky_usb_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = dvbsky_id_table,
+ .probe = dvb_usbv2_probe,
+ .disconnect = dvb_usbv2_disconnect,
+ .suspend = dvb_usbv2_suspend,
+ .resume = dvb_usbv2_resume,
+ .reset_resume = dvb_usbv2_reset_resume,
+ .no_dynamic_id = 1,
+ .soft_unbind = 1,
+};
+
+module_usb_driver(dvbsky_usb_driver);
+
+MODULE_AUTHOR("Max nibble <nibble.max@gmail.com>");
+MODULE_DESCRIPTION("Driver for DVBSky USB");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index e332af731187..9f2c5459b73a 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -1252,7 +1252,7 @@ static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
/* Turn PID filter on the fly by module option */
if (pid_filter == 2) {
- adap->pid_filtering = 1;
+ adap->pid_filtering = true;
adap->max_feed_count = 15;
}
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index b8a707e57b99..c3447eaf1104 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -31,11 +31,11 @@ module_param_named(debug, dvb_usb_mxl111sf_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level "
"(1=info, 2=xfer, 4=i2c, 8=reg, 16=adv (or-able)).");
-int dvb_usb_mxl111sf_isoc;
+static int dvb_usb_mxl111sf_isoc;
module_param_named(isoc, dvb_usb_mxl111sf_isoc, int, 0644);
MODULE_PARM_DESC(isoc, "enable usb isoc xfer (0=bulk, 1=isoc).");
-int dvb_usb_mxl111sf_spi;
+static int dvb_usb_mxl111sf_spi;
module_param_named(spi, dvb_usb_mxl111sf_spi, int, 0644);
MODULE_PARM_DESC(spi, "use spi rather than tp for data xfer (0=tp, 1=spi).");
@@ -43,7 +43,7 @@ MODULE_PARM_DESC(spi, "use spi rather than tp for data xfer (0=tp, 1=spi).");
#define ANT_PATH_EXTERNAL 1
#define ANT_PATH_INTERNAL 2
-int dvb_usb_mxl111sf_rfswitch =
+static int dvb_usb_mxl111sf_rfswitch =
#if 0
ANT_PATH_AUTO;
#else
@@ -887,7 +887,7 @@ static u32 mxl111sf_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-struct i2c_algorithm mxl111sf_i2c_algo = {
+static struct i2c_algorithm mxl111sf_i2c_algo = {
.master_xfer = mxl111sf_i2c_xfer,
.functionality = mxl111sf_i2c_func,
#ifdef NEED_ALGO_CONTROL
diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
index 10aef2188fbe..41d3eb922a00 100644
--- a/drivers/media/usb/dvb-usb/Kconfig
+++ b/drivers/media/usb/dvb-usb/Kconfig
@@ -130,7 +130,7 @@ config DVB_USB_CXUSB
Medion MD95700 hybrid USB2.0 device.
DViCO FusionHDTV (Bluebird) USB2.0 devices
- TechnoTrend TVStick CT2-4400
+ TechnoTrend TVStick CT2-4400 and CT2-4650 CI devices
config DVB_USB_M920X
tristate "Uli m920x DVB-T USB2.0 support"
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index af176b6ce738..3f4361e48a32 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -30,7 +30,7 @@ MODULE_PARM_DESC(debug,
"set debugging level (1=info,xfer=2,rc=4,reg=8,i2c=16,fw=32 (or-able))."
DVB_USB_DEBUG_STATUS);
/* enable obnoxious led */
-bool dvb_usb_af9005_led = 1;
+bool dvb_usb_af9005_led = true;
module_param_named(led, dvb_usb_af9005_led, bool, 0644);
MODULE_PARM_DESC(led, "enable led (default: 1).");
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 16bc579d1404..356abb369c20 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -44,6 +44,7 @@
#include "atbm8830.h"
#include "si2168.h"
#include "si2157.h"
+#include "sp2.h"
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 80
@@ -175,7 +176,7 @@ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
for (i = 0; i < num; i++) {
- if (d->udev->descriptor.idVendor == USB_VID_MEDION)
+ if (le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_MEDION)
switch (msg[i].addr) {
case 0x63:
cxusb_gpio_tuner(d, 0);
@@ -672,6 +673,70 @@ static struct rc_map_table rc_map_d680_dmb_table[] = {
{ 0x0025, KEY_POWER },
};
+static int cxusb_tt_ct2_4400_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
+{
+ u8 wbuf[2];
+ u8 rbuf[6];
+ int ret;
+ struct i2c_msg msg[] = {
+ {
+ .addr = 0x51,
+ .flags = 0,
+ .buf = wbuf,
+ .len = 2,
+ }, {
+ .addr = 0x51,
+ .flags = I2C_M_RD,
+ .buf = rbuf,
+ .len = 6,
+ }
+ };
+
+ wbuf[0] = 0x1e;
+ wbuf[1] = 0x00;
+ ret = cxusb_i2c_xfer(&d->i2c_adap, msg, 2);
+
+ if (ret == 2) {
+ memcpy(mac, rbuf, 6);
+ return 0;
+ } else {
+ if (ret < 0)
+ return ret;
+ return -EIO;
+ }
+}
+
+static int cxusb_tt_ct2_4650_ci_ctrl(void *priv, u8 read, int addr,
+ u8 data, int *mem)
+{
+ struct dvb_usb_device *d = priv;
+ u8 wbuf[3];
+ u8 rbuf[2];
+ int ret;
+
+ wbuf[0] = (addr >> 8) & 0xff;
+ wbuf[1] = addr & 0xff;
+
+ if (read) {
+ ret = cxusb_ctrl_msg(d, CMD_SP2_CI_READ, wbuf, 2, rbuf, 2);
+ } else {
+ wbuf[2] = data;
+ ret = cxusb_ctrl_msg(d, CMD_SP2_CI_WRITE, wbuf, 3, rbuf, 1);
+ }
+
+ if (ret)
+ goto err;
+
+ if (read)
+ *mem = rbuf[1];
+
+ return 0;
+err:
+ deb_info("%s: ci usb write returned %d\n", __func__, ret);
+ return ret;
+
+}
+
static int cxusb_dee1601_demod_init(struct dvb_frontend* fe)
{
static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x28 };
@@ -1350,9 +1415,12 @@ static int cxusb_tt_ct2_4400_attach(struct dvb_usb_adapter *adap)
struct i2c_adapter *adapter;
struct i2c_client *client_demod;
struct i2c_client *client_tuner;
+ struct i2c_client *client_ci;
struct i2c_board_info info;
struct si2168_config si2168_config;
struct si2157_config si2157_config;
+ struct sp2_config sp2_config;
+ u8 o[2], i;
/* reset the tuner */
if (cxusb_tt_ct2_4400_gpio_tuner(d, 0) < 0) {
@@ -1369,6 +1437,7 @@ static int cxusb_tt_ct2_4400_attach(struct dvb_usb_adapter *adap)
/* attach frontend */
si2168_config.i2c_adapter = &adapter;
si2168_config.fe = &adap->fe_adap[0].fe;
+ si2168_config.ts_mode = SI2168_TS_PARALLEL;
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, "si2168", I2C_NAME_SIZE);
info.addr = 0x64;
@@ -1408,6 +1477,48 @@ static int cxusb_tt_ct2_4400_attach(struct dvb_usb_adapter *adap)
st->i2c_client_tuner = client_tuner;
+ /* initialize CI */
+ if (d->udev->descriptor.idProduct ==
+ USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI) {
+
+ memcpy(o, "\xc0\x01", 2);
+ cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1);
+ msleep(100);
+
+ memcpy(o, "\xc0\x00", 2);
+ cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1);
+ msleep(100);
+
+ memset(&sp2_config, 0, sizeof(sp2_config));
+ sp2_config.dvb_adap = &adap->dvb_adap;
+ sp2_config.priv = d;
+ sp2_config.ci_control = cxusb_tt_ct2_4650_ci_ctrl;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "sp2", I2C_NAME_SIZE);
+ info.addr = 0x40;
+ info.platform_data = &sp2_config;
+ request_module(info.type);
+ client_ci = i2c_new_device(&d->i2c_adap, &info);
+ if (client_ci == NULL || client_ci->dev.driver == NULL) {
+ module_put(client_tuner->dev.driver->owner);
+ i2c_unregister_device(client_tuner);
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ return -ENODEV;
+ }
+ if (!try_module_get(client_ci->dev.driver->owner)) {
+ i2c_unregister_device(client_ci);
+ module_put(client_tuner->dev.driver->owner);
+ i2c_unregister_device(client_tuner);
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ return -ENODEV;
+ }
+
+ st->i2c_client_ci = client_ci;
+
+ }
+
return 0;
}
@@ -1537,6 +1648,13 @@ static void cxusb_disconnect(struct usb_interface *intf)
struct cxusb_state *st = d->priv;
struct i2c_client *client;
+ /* remove I2C client for CI */
+ client = st->i2c_client_ci;
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+
/* remove I2C client for tuner */
client = st->i2c_client_tuner;
if (client) {
@@ -1576,6 +1694,7 @@ static struct usb_device_id cxusb_table [] = {
{ USB_DEVICE(USB_VID_CONEXANT, USB_PID_CONEXANT_D680_DMB) },
{ USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_D689) },
{ USB_DEVICE(USB_VID_TECHNOTREND, USB_PID_TECHNOTREND_TVSTICK_CT2_4400) },
+ { USB_DEVICE(USB_VID_TECHNOTREND, USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI) },
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, cxusb_table);
@@ -2230,6 +2349,8 @@ static struct dvb_usb_device_properties cxusb_tt_ct2_4400_properties = {
.size_of_priv = sizeof(struct cxusb_state),
.num_adapters = 1,
+ .read_mac_address = cxusb_tt_ct2_4400_read_mac_address,
+
.adapter = {
{
.num_frontends = 1,
@@ -2265,13 +2386,18 @@ static struct dvb_usb_device_properties cxusb_tt_ct2_4400_properties = {
.rc_interval = 150,
},
- .num_device_descs = 1,
+ .num_device_descs = 2,
.devices = {
{
"TechnoTrend TVStick CT2-4400",
{ NULL },
{ &cxusb_table[20], NULL },
},
+ {
+ "TechnoTrend TT-connect CT2-4650 CI",
+ { NULL },
+ { &cxusb_table[21], NULL },
+ },
}
};
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h
index 527ff7905e15..29f3e2ea2476 100644
--- a/drivers/media/usb/dvb-usb/cxusb.h
+++ b/drivers/media/usb/dvb-usb/cxusb.h
@@ -28,10 +28,14 @@
#define CMD_ANALOG 0x50
#define CMD_DIGITAL 0x51
+#define CMD_SP2_CI_WRITE 0x70
+#define CMD_SP2_CI_READ 0x71
+
struct cxusb_state {
u8 gpio_write_state[3];
struct i2c_client *i2c_client_demod;
struct i2c_client *i2c_client_tuner;
+ struct i2c_client *i2c_client_ci;
};
#endif
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index ce47d3f1c850..e1757b8f5f5d 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -220,12 +220,21 @@ static struct dibx000_agc_config stk7700d_7000p_mt2266_agc_config[2] = {
};
static struct dibx000_bandwidth_config stk7700d_mt2266_pll_config = {
- 60000, 30000,
- 1, 8, 3, 1, 0,
- 0, 0, 1, 1, 2,
- (3 << 14) | (1 << 12) | (524 << 0),
- 0,
- 20452225,
+ .internal = 60000,
+ .sampling = 30000,
+ .pll_prediv = 1,
+ .pll_ratio = 8,
+ .pll_range = 3,
+ .pll_reset = 1,
+ .pll_bypass = 0,
+ .enable_refdiv = 0,
+ .bypclk_div = 0,
+ .IO_CLK_en_core = 1,
+ .ADClkSrc = 1,
+ .modulo = 2,
+ .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0),
+ .ifreq = 0,
+ .timf = 20452225,
};
static struct dib7000p_config stk7700d_dib7000p_mt2266_config[] = {
@@ -342,57 +351,57 @@ static int stk7700d_tuner_attach(struct dvb_usb_adapter *adap)
/* STK7700-PH: Digital/Analog Hybrid Tuner, e.h. Cinergy HT USB HE */
static struct dibx000_agc_config xc3028_agc_config = {
- BAND_VHF | BAND_UHF, /* band_caps */
-
+ .band_caps = BAND_VHF | BAND_UHF,
/* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=0,
* P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0,
* P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */
- (0 << 15) | (0 << 14) | (0 << 11) | (0 << 10) | (0 << 9) | (0 << 8) |
- (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), /* setup */
-
- 712, /* inv_gain */
- 21, /* time_stabiliz */
-
- 0, /* alpha_level */
- 118, /* thlock */
-
- 0, /* wbd_inv */
- 2867, /* wbd_ref */
- 0, /* wbd_sel */
- 2, /* wbd_alpha */
-
- 0, /* agc1_max */
- 0, /* agc1_min */
- 39718, /* agc2_max */
- 9930, /* agc2_min */
- 0, /* agc1_pt1 */
- 0, /* agc1_pt2 */
- 0, /* agc1_pt3 */
- 0, /* agc1_slope1 */
- 0, /* agc1_slope2 */
- 0, /* agc2_pt1 */
- 128, /* agc2_pt2 */
- 29, /* agc2_slope1 */
- 29, /* agc2_slope2 */
-
- 17, /* alpha_mant */
- 27, /* alpha_exp */
- 23, /* beta_mant */
- 51, /* beta_exp */
-
- 1, /* perform_agc_softsplit */
+ .setup = (0 << 15) | (0 << 14) | (0 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0),
+ .inv_gain = 712,
+ .time_stabiliz = 21,
+ .alpha_level = 0,
+ .thlock = 118,
+ .wbd_inv = 0,
+ .wbd_ref = 2867,
+ .wbd_sel = 0,
+ .wbd_alpha = 2,
+ .agc1_max = 0,
+ .agc1_min = 0,
+ .agc2_max = 39718,
+ .agc2_min = 9930,
+ .agc1_pt1 = 0,
+ .agc1_pt2 = 0,
+ .agc1_pt3 = 0,
+ .agc1_slope1 = 0,
+ .agc1_slope2 = 0,
+ .agc2_pt1 = 0,
+ .agc2_pt2 = 128,
+ .agc2_slope1 = 29,
+ .agc2_slope2 = 29,
+ .alpha_mant = 17,
+ .alpha_exp = 27,
+ .beta_mant = 23,
+ .beta_exp = 51,
+ .perform_agc_softsplit = 1,
};
/* PLL Configuration for COFDM BW_MHz = 8.00 with external clock = 30.00 */
static struct dibx000_bandwidth_config xc3028_bw_config = {
- 60000, 30000, /* internal, sampling */
- 1, 8, 3, 1, 0, /* pll_cfg: prediv, ratio, range, reset, bypass */
- 0, 0, 1, 1, 0, /* misc: refdiv, bypclk_div, IO_CLK_en_core, ADClkSrc,
- modulo */
- (3 << 14) | (1 << 12) | (524 << 0), /* sad_cfg: refsel, sel, freq_15k */
- (1 << 25) | 5816102, /* ifreq = 5.200000 MHz */
- 20452225, /* timf */
- 30000000, /* xtal_hz */
+ .internal = 60000,
+ .sampling = 30000,
+ .pll_prediv = 1,
+ .pll_ratio = 8,
+ .pll_range = 3,
+ .pll_reset = 1,
+ .pll_bypass = 0,
+ .enable_refdiv = 0,
+ .bypclk_div = 0,
+ .IO_CLK_en_core = 1,
+ .ADClkSrc = 1,
+ .modulo = 0,
+ .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0), /* sad_cfg: refsel, sel, freq_15k */
+ .ifreq = (1 << 25) | 5816102, /* ifreq = 5.200000 MHz */
+ .timf = 20452225,
+ .xtal_hz = 30000000,
};
static struct dib7000p_config stk7700ph_dib7700_xc3028_config = {
@@ -614,59 +623,55 @@ static struct dibx000_agc_config stk7700p_7000m_mt2060_agc_config = {
};
static struct dibx000_agc_config stk7700p_7000p_mt2060_agc_config = {
- BAND_UHF | BAND_VHF,
-
+ .band_caps = BAND_UHF | BAND_VHF,
/* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0,
* P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */
- (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8)
- | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0),
-
- 712,
- 41,
-
- 0,
- 118,
-
- 0,
- 4095,
- 0,
- 0,
-
- 42598,
- 16384,
- 42598,
- 0,
-
- 0,
- 137,
- 255,
-
- 0,
- 255,
-
- 0,
- 0,
-
- 0,
- 41,
-
- 15,
- 25,
-
- 28,
- 48,
-
- 0,
+ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0),
+ .inv_gain = 712,
+ .time_stabiliz = 41,
+ .alpha_level = 0,
+ .thlock = 118,
+ .wbd_inv = 0,
+ .wbd_ref = 4095,
+ .wbd_sel = 0,
+ .wbd_alpha = 0,
+ .agc1_max = 42598,
+ .agc1_min = 16384,
+ .agc2_max = 42598,
+ .agc2_min = 0,
+ .agc1_pt1 = 0,
+ .agc1_pt2 = 137,
+ .agc1_pt3 = 255,
+ .agc1_slope1 = 0,
+ .agc1_slope2 = 255,
+ .agc2_pt1 = 0,
+ .agc2_pt2 = 0,
+ .agc2_slope1 = 0,
+ .agc2_slope2 = 41,
+ .alpha_mant = 15,
+ .alpha_exp = 25,
+ .beta_mant = 28,
+ .beta_exp = 48,
+ .perform_agc_softsplit = 0,
};
static struct dibx000_bandwidth_config stk7700p_pll_config = {
- 60000, 30000,
- 1, 8, 3, 1, 0,
- 0, 0, 1, 1, 0,
- (3 << 14) | (1 << 12) | (524 << 0),
- 60258167,
- 20452225,
- 30000000,
+ .internal = 60000,
+ .sampling = 30000,
+ .pll_prediv = 1,
+ .pll_ratio = 8,
+ .pll_range = 3,
+ .pll_reset = 1,
+ .pll_bypass = 0,
+ .enable_refdiv = 0,
+ .bypclk_div = 0,
+ .IO_CLK_en_core = 1,
+ .ADClkSrc = 1,
+ .modulo = 0,
+ .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0),
+ .ifreq = 60258167,
+ .timf = 20452225,
+ .xtal_hz = 30000000,
};
static struct dib7000m_config stk7700p_dib7000m_config = {
@@ -758,45 +763,36 @@ static int stk7700p_tuner_attach(struct dvb_usb_adapter *adap)
/* DIB7070 generic */
static struct dibx000_agc_config dib7070_agc_config = {
- BAND_UHF | BAND_VHF | BAND_LBAND | BAND_SBAND,
+ .band_caps = BAND_UHF | BAND_VHF | BAND_LBAND | BAND_SBAND,
/* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0,
* P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */
- (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8)
- | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0),
-
- 600,
- 10,
-
- 0,
- 118,
-
- 0,
- 3530,
- 1,
- 5,
-
- 65535,
- 0,
-
- 65535,
- 0,
-
- 0,
- 40,
- 183,
- 206,
- 255,
- 72,
- 152,
- 88,
- 90,
-
- 17,
- 27,
- 23,
- 51,
-
- 0,
+ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0),
+ .inv_gain = 600,
+ .time_stabiliz = 10,
+ .alpha_level = 0,
+ .thlock = 118,
+ .wbd_inv = 0,
+ .wbd_ref = 3530,
+ .wbd_sel = 1,
+ .wbd_alpha = 5,
+ .agc1_max = 65535,
+ .agc1_min = 0,
+ .agc2_max = 65535,
+ .agc2_min = 0,
+ .agc1_pt1 = 0,
+ .agc1_pt2 = 40,
+ .agc1_pt3 = 183,
+ .agc1_slope1 = 206,
+ .agc1_slope2 = 255,
+ .agc2_pt1 = 72,
+ .agc2_pt2 = 152,
+ .agc2_slope1 = 88,
+ .agc2_slope2 = 90,
+ .alpha_mant = 17,
+ .alpha_exp = 27,
+ .beta_mant = 23,
+ .beta_exp = 51,
+ .perform_agc_softsplit = 0,
};
static int dib7070_tuner_reset(struct dvb_frontend *fe, int onoff)
@@ -952,13 +948,22 @@ static int stk70x0p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff)
}
static struct dibx000_bandwidth_config dib7070_bw_config_12_mhz = {
- 60000, 15000,
- 1, 20, 3, 1, 0,
- 0, 0, 1, 1, 2,
- (3 << 14) | (1 << 12) | (524 << 0),
- (0 << 25) | 0,
- 20452225,
- 12000000,
+ .internal = 60000,
+ .sampling = 15000,
+ .pll_prediv = 1,
+ .pll_ratio = 20,
+ .pll_range = 3,
+ .pll_reset = 1,
+ .pll_bypass = 0,
+ .enable_refdiv = 0,
+ .bypclk_div = 0,
+ .IO_CLK_en_core = 1,
+ .ADClkSrc = 1,
+ .modulo = 2,
+ .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0),
+ .ifreq = (0 << 25) | 0,
+ .timf = 20452225,
+ .xtal_hz = 12000000,
};
static struct dib7000p_config dib7070p_dib7000p_config = {
@@ -1169,14 +1174,22 @@ static struct dibx000_agc_config dib807x_agc_config[2] = {
};
static struct dibx000_bandwidth_config dib807x_bw_config_12_mhz = {
- 60000, 15000, /* internal, sampling*/
- 1, 20, 3, 1, 0, /* pll_cfg: prediv, ratio, range, reset, bypass*/
- 0, 0, 1, 1, 2, /* misc: refdiv, bypclk_div, IO_CLK_en_core,
- ADClkSrc, modulo */
- (3 << 14) | (1 << 12) | (599 << 0), /* sad_cfg: refsel, sel, freq_15k*/
- (0 << 25) | 0, /* ifreq = 0.000000 MHz*/
- 18179755, /* timf*/
- 12000000, /* xtal_hz*/
+ .internal = 60000,
+ .sampling = 15000,
+ .pll_prediv = 1,
+ .pll_ratio = 20,
+ .pll_range = 3,
+ .pll_reset = 1,
+ .pll_bypass = 0,
+ .enable_refdiv = 0,
+ .bypclk_div = 0,
+ .IO_CLK_en_core = 1,
+ .ADClkSrc = 1,
+ .modulo = 2,
+ .sad_cfg = (3 << 14) | (1 << 12) | (599 << 0), /* sad_cfg: refsel, sel, freq_15k*/
+ .ifreq = (0 << 25) | 0, /* ifreq = 0.000000 MHz*/
+ .timf = 18179755,
+ .xtal_hz = 12000000,
};
static struct dib8000_config dib807x_dib8000_config[2] = {
@@ -1921,13 +1934,22 @@ static struct dibx000_agc_config dib8096p_agc_config[2] = {
};
static struct dibx000_bandwidth_config dib8096p_clock_config_12_mhz = {
- 108000, 13500,
- 1, 9, 1, 0, 0,
- 0, 0, 0, 0, 2,
- (3 << 14) | (1 << 12) | (524 << 0),
- (0 << 25) | 0,
- 20199729,
- 12000000,
+ .internal = 108000,
+ .sampling = 13500,
+ .pll_prediv = 1,
+ .pll_ratio = 9,
+ .pll_range = 1,
+ .pll_reset = 0,
+ .pll_bypass = 0,
+ .enable_refdiv = 0,
+ .bypclk_div = 0,
+ .IO_CLK_en_core = 0,
+ .ADClkSrc = 0,
+ .modulo = 2,
+ .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0),
+ .ifreq = (0 << 25) | 0,
+ .timf = 20199729,
+ .xtal_hz = 12000000,
};
static struct dib8000_config tfe8096p_dib8000_config = {
@@ -2724,13 +2746,22 @@ static struct dibx000_agc_config dib7090_agc_config[2] = {
};
static struct dibx000_bandwidth_config dib7090_clock_config_12_mhz = {
- 60000, 15000,
- 1, 5, 0, 0, 0,
- 0, 0, 1, 1, 2,
- (3 << 14) | (1 << 12) | (524 << 0),
- (0 << 25) | 0,
- 20452225,
- 15000000,
+ .internal = 60000,
+ .sampling = 15000,
+ .pll_prediv = 1,
+ .pll_ratio = 5,
+ .pll_range = 0,
+ .pll_reset = 0,
+ .pll_bypass = 0,
+ .enable_refdiv = 0,
+ .bypclk_div = 0,
+ .IO_CLK_en_core = 1,
+ .ADClkSrc = 1,
+ .modulo = 2,
+ .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0),
+ .ifreq = (0 << 25) | 0,
+ .timf = 20452225,
+ .xtal_hz = 15000000,
};
static struct dib7000p_config nim7090_dib7000p_config = {
@@ -3498,14 +3529,22 @@ static struct dibx000_agc_config stk7700p_7000p_xc4000_agc_config = {
};
static struct dibx000_bandwidth_config stk7700p_xc4000_pll_config = {
- 60000, 30000, /* internal, sampling */
- 1, 8, 3, 1, 0, /* pll_cfg: prediv, ratio, range, reset, bypass */
- 0, 0, 1, 1, 0, /* misc: refdiv, bypclk_div, IO_CLK_en_core, */
- /* ADClkSrc, modulo */
- (3 << 14) | (1 << 12) | 524, /* sad_cfg: refsel, sel, freq_15k */
- 39370534, /* ifreq */
- 20452225, /* timf */
- 30000000 /* xtal */
+ .internal = 60000,
+ .sampling = 30000,
+ .pll_prediv = 1,
+ .pll_ratio = 8,
+ .pll_range = 3,
+ .pll_reset = 1,
+ .pll_bypass = 0,
+ .enable_refdiv = 0,
+ .bypclk_div = 0,
+ .IO_CLK_en_core = 1,
+ .ADClkSrc = 1,
+ .modulo = 0,
+ .sad_cfg = (3 << 14) | (1 << 12) | 524, /* sad_cfg: refsel, sel, freq_15k */
+ .ifreq = 39370534,
+ .timf = 20452225,
+ .xtal_hz = 30000000
};
/* FIXME: none of these inputs are validated yet */
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 6d68af0c49c8..ef3a8f75f82e 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -258,8 +258,8 @@ static struct dib3000mc_config mod3000p_dib3000p_config = {
int dibusb_dib3000mc_frontend_attach(struct dvb_usb_adapter *adap)
{
- if (adap->dev->udev->descriptor.idVendor == USB_VID_LITEON &&
- adap->dev->udev->descriptor.idProduct ==
+ if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_LITEON &&
+ le16_to_cpu(adap->dev->udev->descriptor.idProduct) ==
USB_PID_LITEON_DVB_T_WARM) {
msleep(1000);
}
@@ -297,8 +297,8 @@ int dibusb_dib3000mc_tuner_attach(struct dvb_usb_adapter *adap)
struct i2c_adapter *tun_i2c;
// First IF calibration for Liteon Sticks
- if (adap->dev->udev->descriptor.idVendor == USB_VID_LITEON &&
- adap->dev->udev->descriptor.idProduct == USB_PID_LITEON_DVB_T_WARM) {
+ if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_LITEON &&
+ le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_LITEON_DVB_T_WARM) {
dibusb_read_eeprom_byte(adap->dev,0x7E,&a);
dibusb_read_eeprom_byte(adap->dev,0x7F,&b);
@@ -310,8 +310,8 @@ int dibusb_dib3000mc_tuner_attach(struct dvb_usb_adapter *adap)
else
warn("LITE-ON DVB-T: Strange IF1 calibration :%2X %2X\n", a, b);
- } else if (adap->dev->udev->descriptor.idVendor == USB_VID_DIBCOM &&
- adap->dev->udev->descriptor.idProduct == USB_PID_DIBCOM_MOD3001_WARM) {
+ } else if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_DIBCOM &&
+ le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_DIBCOM_MOD3001_WARM) {
u8 desc;
dibusb_read_eeprom_byte(adap->dev, 7, &desc);
if (desc == 2) {
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 2add8c507ec9..1a3df10d6bad 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -667,7 +667,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[1] = (msg[j].addr << 1);
memcpy(obuf + 2, msg[j].buf, msg[j].len);
dw210x_op_rw(d->udev,
- udev->descriptor.idProduct ==
+ le16_to_cpu(udev->descriptor.idProduct) ==
0x7500 ? 0x92 : 0x90, 0, 0,
obuf, msg[j].len + 2,
DW210X_WRITE_MSG);
@@ -1598,7 +1598,7 @@ static int dw2102_load_firmware(struct usb_device *dev,
u8 reset16[] = {0, 0, 0, 0, 0, 0, 0};
const struct firmware *fw;
- switch (dev->descriptor.idProduct) {
+ switch (le16_to_cpu(dev->descriptor.idProduct)) {
case 0x2101:
ret = request_firmware(&fw, DW2101_FIRMWARE, &dev->dev);
if (ret != 0) {
@@ -1641,7 +1641,7 @@ static int dw2102_load_firmware(struct usb_device *dev,
ret = -EINVAL;
}
/* init registers */
- switch (dev->descriptor.idProduct) {
+ switch (le16_to_cpu(dev->descriptor.idProduct)) {
case USB_PID_TEVII_S650:
dw2104_properties.rc.core.rc_codes = RC_MAP_TEVII_NEC;
case USB_PID_DW2104:
@@ -1901,14 +1901,14 @@ static struct dvb_usb_device_properties s6x0_properties = {
}
};
-struct dvb_usb_device_properties *p1100;
+static struct dvb_usb_device_properties *p1100;
static struct dvb_usb_device_description d1100 = {
"Prof 1100 USB ",
{&dw2102_table[PROF_1100], NULL},
{NULL},
};
-struct dvb_usb_device_properties *s660;
+static struct dvb_usb_device_properties *s660;
static struct dvb_usb_device_description d660 = {
"TeVii S660 USB",
{&dw2102_table[TEVII_S660], NULL},
@@ -1927,14 +1927,14 @@ static struct dvb_usb_device_description d480_2 = {
{NULL},
};
-struct dvb_usb_device_properties *p7500;
+static struct dvb_usb_device_properties *p7500;
static struct dvb_usb_device_description d7500 = {
"Prof 7500 USB DVB-S2",
{&dw2102_table[PROF_7500], NULL},
{NULL},
};
-struct dvb_usb_device_properties *s421;
+static struct dvb_usb_device_properties *s421;
static struct dvb_usb_device_description d421 = {
"TeVii S421 PCI",
{&dw2102_table[TEVII_S421], NULL},
diff --git a/drivers/media/usb/dvb-usb/opera1.c b/drivers/media/usb/dvb-usb/opera1.c
index 16ba90acf539..14a2119912ba 100644
--- a/drivers/media/usb/dvb-usb/opera1.c
+++ b/drivers/media/usb/dvb-usb/opera1.c
@@ -554,8 +554,8 @@ static int opera1_probe(struct usb_interface *intf,
{
struct usb_device *udev = interface_to_usbdev(intf);
- if (udev->descriptor.idProduct == USB_PID_OPERA1_WARM &&
- udev->descriptor.idVendor == USB_VID_OPERA1 &&
+ if (le16_to_cpu(udev->descriptor.idProduct) == USB_PID_OPERA1_WARM &&
+ le16_to_cpu(udev->descriptor.idVendor) == USB_VID_OPERA1 &&
opera1_xilinx_load_firmware(udev, "dvb-usb-opera1-fpga-01.fw") != 0
) {
return -EINVAL;
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index bdfe8963591c..d17618fe8f5c 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -883,7 +883,7 @@ static int pctv452e_frontend_attach(struct dvb_usb_adapter *a)
if (!a->fe_adap[0].fe)
return -ENODEV;
if ((dvb_attach(lnbp22_attach, a->fe_adap[0].fe,
- &a->dev->i2c_adap)) == 0)
+ &a->dev->i2c_adap)) == NULL)
err("Cannot attach lnbp22\n");
id = a->dev->desc->warm_ids[0];
@@ -900,7 +900,7 @@ static int pctv452e_tuner_attach(struct dvb_usb_adapter *a)
if (!a->fe_adap[0].fe)
return -ENODEV;
if (dvb_attach(stb6100_attach, a->fe_adap[0].fe, &stb6100_config,
- &a->dev->i2c_adap) == 0) {
+ &a->dev->i2c_adap) == NULL) {
err("%s failed\n", __func__);
return -ENODEV;
}
@@ -965,7 +965,7 @@ static struct dvb_usb_device_properties pctv452e_properties = {
.cold_ids = { NULL, NULL }, /* this is a warm only device */
.warm_ids = { &pctv452e_usb_table[0], NULL }
},
- { 0 },
+ { NULL },
}
};
@@ -1023,7 +1023,7 @@ static struct dvb_usb_device_properties tt_connect_s2_3600_properties = {
.cold_ids = { NULL, NULL },
.warm_ids = { &pctv452e_usb_table[2], NULL }
},
- { 0 },
+ { NULL },
}
};
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index e881ef7b6445..957c7ae30efe 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -268,7 +268,7 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
nonblock = !!(substream->f_flags & O_NONBLOCK);
if (nonblock) {
if (!mutex_trylock(&dev->lock))
- return -EAGAIN;
+ return -EAGAIN;
} else
mutex_lock(&dev->lock);
@@ -893,7 +893,7 @@ static int em28xx_audio_init(struct em28xx *dev)
static int devnr;
int err;
- if (!dev->has_alsa_audio) {
+ if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR) {
/* This device does not support the extension (in this case
the device is expecting the snd-usb-audio module or
doesn't have analog audio support at all) */
@@ -975,7 +975,7 @@ static int em28xx_audio_fini(struct em28xx *dev)
if (dev == NULL)
return 0;
- if (!dev->has_alsa_audio) {
+ if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR) {
/* This device does not support the extension (in this case
the device is expecting the snd-usb-audio module or
doesn't have analog audio support at all) */
@@ -1003,7 +1003,7 @@ static int em28xx_audio_suspend(struct em28xx *dev)
if (dev == NULL)
return 0;
- if (!dev->has_alsa_audio)
+ if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR)
return 0;
em28xx_info("Suspending audio extension");
@@ -1017,7 +1017,7 @@ static int em28xx_audio_resume(struct em28xx *dev)
if (dev == NULL)
return 0;
- if (!dev->has_alsa_audio)
+ if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR)
return 0;
em28xx_info("Resuming audio extension");
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 9da812b8a786..71fa51e7984e 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -2246,7 +2246,7 @@ struct em28xx_board em28xx_boards[] = {
};
EXPORT_SYMBOL_GPL(em28xx_boards);
-const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
+static const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
/* table of devices that work with this driver */
struct usb_device_id em28xx_id_table[] = {
@@ -2931,9 +2931,9 @@ static void request_module_async(struct work_struct *work)
#if defined(CONFIG_MODULES) && defined(MODULE)
if (dev->has_video)
request_module("em28xx-v4l");
- if (dev->has_audio_class)
+ if (dev->usb_audio_type == EM28XX_USB_AUDIO_CLASS)
request_module("snd-usb-audio");
- else if (dev->has_alsa_audio)
+ else if (dev->usb_audio_type == EM28XX_USB_AUDIO_VENDOR)
request_module("em28xx-alsa");
if (dev->board.has_dvb)
request_module("em28xx-dvb");
@@ -3098,16 +3098,6 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
}
}
- if (dev->chip_id == CHIP_ID_EM2870 ||
- dev->chip_id == CHIP_ID_EM2874 ||
- dev->chip_id == CHIP_ID_EM28174 ||
- dev->chip_id == CHIP_ID_EM28178) {
- /* Digital only device - don't load any alsa module */
- dev->audio_mode.has_audio = false;
- dev->has_audio_class = false;
- dev->has_alsa_audio = false;
- }
-
if (chip_name != default_chip_name)
printk(KERN_INFO DRIVER_NAME
": chip ID is %s\n", chip_name);
@@ -3190,7 +3180,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
struct usb_device *udev;
struct em28xx *dev = NULL;
int retval;
- bool has_audio = false, has_video = false, has_dvb = false;
+ bool has_vendor_audio = false, has_video = false, has_dvb = false;
int i, nr, try_bulk;
const int ifnum = interface->altsetting[0].desc.bInterfaceNumber;
char *speed;
@@ -3272,7 +3262,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
break;
case 0x83:
if (usb_endpoint_xfer_isoc(e)) {
- has_audio = true;
+ has_vendor_audio = true;
} else {
printk(KERN_INFO DRIVER_NAME
": error: skipping audio endpoint 0x83, because it uses bulk transfers !\n");
@@ -3328,7 +3318,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
}
}
- if (!(has_audio || has_video || has_dvb)) {
+ if (!(has_vendor_audio || has_video || has_dvb)) {
retval = -ENODEV;
goto err_free;
}
@@ -3375,26 +3365,27 @@ static int em28xx_usb_probe(struct usb_interface *interface,
dev->devno = nr;
dev->model = id->driver_info;
dev->alt = -1;
- dev->is_audio_only = has_audio && !(has_video || has_dvb);
- dev->has_alsa_audio = has_audio;
- dev->audio_mode.has_audio = has_audio;
+ dev->is_audio_only = has_vendor_audio && !(has_video || has_dvb);
dev->has_video = has_video;
dev->ifnum = ifnum;
- /* Checks if audio is provided by some interface */
+ if (has_vendor_audio) {
+ printk(KERN_INFO DRIVER_NAME ": Audio interface %i found %s\n",
+ ifnum, "(Vendor Class)");
+ dev->usb_audio_type = EM28XX_USB_AUDIO_VENDOR;
+ }
+ /* Checks if audio is provided by a USB Audio Class interface */
for (i = 0; i < udev->config->desc.bNumInterfaces; i++) {
struct usb_interface *uif = udev->config->interface[i];
if (uif->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
- dev->has_audio_class = 1;
+ if (has_vendor_audio)
+ em28xx_err("em28xx: device seems to have vendor AND usb audio class interfaces !\n"
+ "\t\tThe vendor interface will be ignored. Please contact the developers <linux-media@vger.kernel.org>\n");
+ dev->usb_audio_type = EM28XX_USB_AUDIO_CLASS;
break;
}
}
- if (has_audio)
- printk(KERN_INFO DRIVER_NAME
- ": Audio interface %i found %s\n",
- ifnum,
- dev->has_audio_class ? "(USB Audio Class)" : "(Vendor Class)");
if (has_video)
printk(KERN_INFO DRIVER_NAME
": Video interface %i found:%s%s\n",
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 523d7e92bf47..b5e52fe7957a 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -279,7 +279,7 @@ int em28xx_read_ac97(struct em28xx *dev, u8 reg)
{
int ret;
u8 addr = (reg & 0x7f) | 0x80;
- u16 val;
+ __le16 val;
ret = em28xx_is_ac97_ready(dev);
if (ret < 0)
@@ -433,7 +433,7 @@ int em28xx_audio_analog_set(struct em28xx *dev)
int ret, i;
u8 xclk;
- if (!dev->audio_mode.has_audio)
+ if (dev->int_audio_type == EM28XX_INT_AUDIO_NONE)
return 0;
/* It is assumed that all devices use master volume for output.
@@ -505,37 +505,48 @@ int em28xx_audio_setup(struct em28xx *dev)
{
int vid1, vid2, feat, cfg;
u32 vid;
+ u8 i2s_samplerates;
- if (!dev->audio_mode.has_audio)
+ if (dev->chip_id == CHIP_ID_EM2870 ||
+ dev->chip_id == CHIP_ID_EM2874 ||
+ dev->chip_id == CHIP_ID_EM28174 ||
+ dev->chip_id == CHIP_ID_EM28178) {
+ /* Digital only device - don't load any alsa module */
+ dev->int_audio_type = EM28XX_INT_AUDIO_NONE;
+ dev->usb_audio_type = EM28XX_USB_AUDIO_NONE;
return 0;
+ }
/* See how this device is configured */
cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
em28xx_info("Config register raw data: 0x%02x\n", cfg);
- if (cfg < 0) {
- /* Register read error? */
- cfg = EM28XX_CHIPCFG_AC97; /* Be conservative */
+ if (cfg < 0) { /* Register read error */
+ /* Be conservative */
+ dev->int_audio_type = EM28XX_INT_AUDIO_AC97;
} else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) == 0x00) {
/* The device doesn't have vendor audio at all */
- dev->has_alsa_audio = false;
- dev->audio_mode.has_audio = false;
+ dev->int_audio_type = EM28XX_INT_AUDIO_NONE;
+ dev->usb_audio_type = EM28XX_USB_AUDIO_NONE;
return 0;
} else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) != EM28XX_CHIPCFG_AC97) {
+ dev->int_audio_type = EM28XX_INT_AUDIO_I2S;
if (dev->chip_id < CHIP_ID_EM2860 &&
(cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
EM2820_CHIPCFG_I2S_1_SAMPRATE)
- dev->audio_mode.i2s_samplerates = 1;
+ i2s_samplerates = 1;
else if (dev->chip_id >= CHIP_ID_EM2860 &&
(cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
EM2860_CHIPCFG_I2S_5_SAMPRATES)
- dev->audio_mode.i2s_samplerates = 5;
+ i2s_samplerates = 5;
else
- dev->audio_mode.i2s_samplerates = 3;
+ i2s_samplerates = 3;
em28xx_info("I2S Audio (%d sample rate(s))\n",
- dev->audio_mode.i2s_samplerates);
+ i2s_samplerates);
/* Skip the code that does AC97 vendor detection */
dev->audio_mode.ac97 = EM28XX_NO_AC97;
goto init_audio;
+ } else {
+ dev->int_audio_type = EM28XX_INT_AUDIO_AC97;
}
dev->audio_mode.ac97 = EM28XX_AC97_OTHER;
@@ -549,8 +560,9 @@ int em28xx_audio_setup(struct em28xx *dev)
*/
em28xx_warn("AC97 chip type couldn't be determined\n");
dev->audio_mode.ac97 = EM28XX_NO_AC97;
- dev->has_alsa_audio = false;
- dev->audio_mode.has_audio = false;
+ if (dev->usb_audio_type == EM28XX_USB_AUDIO_VENDOR)
+ dev->usb_audio_type = EM28XX_USB_AUDIO_NONE;
+ dev->int_audio_type = EM28XX_INT_AUDIO_NONE;
goto init_audio;
}
@@ -559,15 +571,12 @@ int em28xx_audio_setup(struct em28xx *dev)
goto init_audio;
vid = vid1 << 16 | vid2;
-
- dev->audio_mode.ac97_vendor_id = vid;
em28xx_warn("AC97 vendor ID = 0x%08x\n", vid);
feat = em28xx_read_ac97(dev, AC97_RESET);
if (feat < 0)
goto init_audio;
- dev->audio_mode.ac97_feat = feat;
em28xx_warn("AC97 features = 0x%04x\n", feat);
/* Try to identify what audio processor we have */
@@ -586,8 +595,8 @@ init_audio:
em28xx_info("Empia 202 AC97 audio processor detected\n");
break;
case EM28XX_AC97_SIGMATEL:
- em28xx_info("Sigmatel audio processor detected(stac 97%02x)\n",
- dev->audio_mode.ac97_vendor_id & 0xff);
+ em28xx_info("Sigmatel audio processor detected (stac 97%02x)\n",
+ vid & 0xff);
break;
case EM28XX_AC97_OTHER:
em28xx_warn("Unknown AC97 audio processor detected!\n");
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 3a3e243edf89..9682c52d67d1 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -373,7 +373,6 @@ static struct tda18271_config kworld_ub435q_v2_config = {
};
static struct tda18212_config kworld_ub435q_v3_config = {
- .i2c_address = 0x60,
.if_atsc_vsb = 3600,
.if_atsc_qam = 3600,
};
@@ -856,7 +855,9 @@ static const struct m88ds3103_config pctv_461e_m88ds3103_config = {
.clock = 27000000,
.i2c_wr_max = 33,
.clock_out = 0,
- .ts_mode = M88DS3103_TS_PARALLEL_16,
+ .ts_mode = M88DS3103_TS_PARALLEL,
+ .ts_clk = 16000,
+ .ts_clk_pol = 1,
.agc = 0x99,
};
@@ -1435,6 +1436,15 @@ static int em28xx_dvb_init(struct em28xx *dev)
}
break;
case EM2874_BOARD_KWORLD_UB435Q_V3:
+ {
+ struct i2c_client *client;
+ struct i2c_adapter *adapter = &dev->i2c_adap[dev->def_i2c_bus];
+ struct i2c_board_info board_info = {
+ .type = "tda18212",
+ .addr = 0x60,
+ .platform_data = &kworld_ub435q_v3_config,
+ };
+
dvb->fe[0] = dvb_attach(lgdt3305_attach,
&em2874_lgdt3305_nogate_dev,
&dev->i2c_adap[dev->def_i2c_bus]);
@@ -1443,14 +1453,26 @@ static int em28xx_dvb_init(struct em28xx *dev)
goto out_free;
}
- /* Attach the demodulator. */
- if (!dvb_attach(tda18212_attach, dvb->fe[0],
- &dev->i2c_adap[dev->def_i2c_bus],
- &kworld_ub435q_v3_config)) {
- result = -EINVAL;
+ /* attach tuner */
+ kworld_ub435q_v3_config.fe = dvb->fe[0];
+ request_module("tda18212");
+ client = i2c_new_device(adapter, &board_info);
+ if (client == NULL || client->dev.driver == NULL) {
+ dvb_frontend_detach(dvb->fe[0]);
+ result = -ENODEV;
goto out_free;
}
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ dvb_frontend_detach(dvb->fe[0]);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ dvb->i2c_client_tuner = client;
break;
+ }
case EM2874_BOARD_PCTV_HD_MINI_80E:
dvb->fe[0] = dvb_attach(drx39xxj_attach, &dev->i2c_adap[dev->def_i2c_bus]);
if (dvb->fe[0] != NULL) {
@@ -1533,6 +1555,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
/* attach demod */
si2168_config.i2c_adapter = &adapter;
si2168_config.fe = &dvb->fe[0];
+ si2168_config.ts_mode = SI2168_TS_PARALLEL;
memset(&info, 0, sizeof(struct i2c_board_info));
strlcpy(info.type, "si2168", I2C_NAME_SIZE);
info.addr = 0x64;
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index ed843bd221ea..581f6dad4ca9 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -71,8 +71,7 @@ struct em28xx_IR {
unsigned int last_readcount;
u64 rc_type;
- /* i2c slave address of external device (if used) */
- u16 i2c_dev_addr;
+ struct i2c_client *i2c_client;
int (*get_key_i2c)(struct i2c_client *ir, enum rc_type *protocol, u32 *scancode);
int (*get_key)(struct em28xx_IR *, struct em28xx_ir_poll_result *);
@@ -294,16 +293,11 @@ static int em2874_polling_getkey(struct em28xx_IR *ir,
static int em28xx_i2c_ir_handle_key(struct em28xx_IR *ir)
{
- struct em28xx *dev = ir->dev;
static u32 scancode;
enum rc_type protocol;
int rc;
- struct i2c_client client;
-
- client.adapter = &ir->dev->i2c_adap[dev->def_i2c_bus];
- client.addr = ir->i2c_dev_addr;
- rc = ir->get_key_i2c(&client, &protocol, &scancode);
+ rc = ir->get_key_i2c(ir->i2c_client, &protocol, &scancode);
if (rc < 0) {
dprintk("ir->get_key_i2c() failed: %d\n", rc);
return rc;
@@ -361,7 +355,7 @@ static void em28xx_ir_work(struct work_struct *work)
{
struct em28xx_IR *ir = container_of(work, struct em28xx_IR, work.work);
- if (ir->i2c_dev_addr) /* external i2c device */
+ if (ir->i2c_client) /* external i2c device */
em28xx_i2c_ir_handle_key(ir);
else /* internal device */
em28xx_ir_handle_key(ir);
@@ -609,17 +603,17 @@ static int em28xx_register_snapshot_button(struct em28xx *dev)
static void em28xx_init_buttons(struct em28xx *dev)
{
u8 i = 0, j = 0;
- bool addr_new = 0;
+ bool addr_new = false;
dev->button_polling_interval = EM28XX_BUTTONS_DEBOUNCED_QUERY_INTERVAL;
while (dev->board.buttons[i].role >= 0 &&
dev->board.buttons[i].role < EM28XX_NUM_BUTTON_ROLES) {
struct em28xx_button *button = &dev->board.buttons[i];
/* Check if polling address is already on the list */
- addr_new = 1;
+ addr_new = true;
for (j = 0; j < dev->num_button_polling_addresses; j++) {
if (button->reg_r == dev->button_polling_addresses[j]) {
- addr_new = 0;
+ addr_new = false;
break;
}
}
@@ -756,7 +750,13 @@ static int em28xx_ir_init(struct em28xx *dev)
goto error;
}
- ir->i2c_dev_addr = i2c_rc_dev_addr;
+ ir->i2c_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ if (!ir->i2c_client)
+ goto error;
+ ir->i2c_client->adapter = &ir->dev->i2c_adap[dev->def_i2c_bus];
+ ir->i2c_client->addr = i2c_rc_dev_addr;
+ ir->i2c_client->flags = 0;
+ /* NOTE: all other fields of i2c_client are unused */
} else { /* internal device */
switch (dev->chip_id) {
case CHIP_ID_EM2860:
@@ -815,6 +815,7 @@ static int em28xx_ir_init(struct em28xx *dev)
return 0;
error:
+ kfree(ir->i2c_client);
dev->ir = NULL;
rc_free_device(rc);
kfree(ir);
@@ -841,6 +842,8 @@ static int em28xx_ir_fini(struct em28xx *dev)
if (ir->rc)
rc_unregister_device(ir->rc);
+ kfree(ir->i2c_client);
+
/* done */
kfree(ir);
dev->ir = NULL;
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 6d7f657f6f55..34ee1e03a732 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -29,17 +29,6 @@
#include "em28xx.h"
#include "em28xx-v4l.h"
-static unsigned int vbibufs = 5;
-module_param(vbibufs, int, 0644);
-MODULE_PARM_DESC(vbibufs, "number of vbi buffers, range 2-32");
-
-static unsigned int vbi_debug;
-module_param(vbi_debug, int, 0644);
-MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
-
-#define dprintk(level, fmt, arg...) if (vbi_debug >= level) \
- printk(KERN_DEBUG "%s: " fmt, dev->core->name , ## arg)
-
/* ------------------------------------------------------------------ */
static int vbi_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 29abc379551e..03d5ece0319c 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -435,7 +435,10 @@ static inline void finish_buffer(struct em28xx *dev,
em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field);
buf->vb.v4l2_buf.sequence = dev->v4l2->field_count++;
- buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
+ if (dev->v4l2->progressive)
+ buf->vb.v4l2_buf.field = V4L2_FIELD_NONE;
+ else
+ buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
@@ -478,7 +481,7 @@ static void em28xx_copy_video(struct em28xx *dev,
lencopy = lencopy > remain ? remain : lencopy;
if ((char *)startwrite + lencopy > (char *)buf->vb_buf + buf->length) {
- em28xx_isocdbg("Overflow of %zi bytes past buffer end (1)\n",
+ em28xx_isocdbg("Overflow of %zu bytes past buffer end (1)\n",
((char *)startwrite + lencopy) -
((char *)buf->vb_buf + buf->length));
remain = (char *)buf->vb_buf + buf->length -
@@ -504,7 +507,7 @@ static void em28xx_copy_video(struct em28xx *dev,
if ((char *)startwrite + lencopy > (char *)buf->vb_buf +
buf->length) {
- em28xx_isocdbg("Overflow of %zi bytes past buffer end"
+ em28xx_isocdbg("Overflow of %zu bytes past buffer end"
"(2)\n",
((char *)startwrite + lencopy) -
((char *)buf->vb_buf + buf->length));
@@ -718,7 +721,7 @@ static inline void process_frame_data_em25xx(struct em28xx *dev,
struct em28xx_buffer *buf = dev->usb_ctl.vid_buf;
struct em28xx_dmaqueue *dmaq = &dev->vidq;
struct em28xx_v4l2 *v4l2 = dev->v4l2;
- bool frame_end = 0;
+ bool frame_end = false;
/* Check for header */
/* NOTE: at least with bulk transfers, only the first packet
@@ -994,13 +997,16 @@ static void em28xx_stop_streaming(struct vb2_queue *vq)
}
spin_lock_irqsave(&dev->slock, flags);
+ if (dev->usb_ctl.vid_buf != NULL) {
+ vb2_buffer_done(&dev->usb_ctl.vid_buf->vb, VB2_BUF_STATE_ERROR);
+ dev->usb_ctl.vid_buf = NULL;
+ }
while (!list_empty(&vidq->active)) {
struct em28xx_buffer *buf;
buf = list_entry(vidq->active.next, struct em28xx_buffer, list);
list_del(&buf->list);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
- dev->usb_ctl.vid_buf = NULL;
spin_unlock_irqrestore(&dev->slock, flags);
}
@@ -1021,13 +1027,16 @@ void em28xx_stop_vbi_streaming(struct vb2_queue *vq)
}
spin_lock_irqsave(&dev->slock, flags);
+ if (dev->usb_ctl.vbi_buf != NULL) {
+ vb2_buffer_done(&dev->usb_ctl.vbi_buf->vb, VB2_BUF_STATE_ERROR);
+ dev->usb_ctl.vbi_buf = NULL;
+ }
while (!list_empty(&vbiq->active)) {
struct em28xx_buffer *buf;
buf = list_entry(vbiq->active.next, struct em28xx_buffer, list);
list_del(&buf->list);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
- dev->usb_ctl.vbi_buf = NULL;
spin_unlock_irqrestore(&dev->slock, flags);
}
@@ -1711,7 +1720,7 @@ static int vidioc_querycap(struct file *file, void *priv,
else
cap->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_VBI_CAPTURE;
- if (dev->audio_mode.has_audio)
+ if (dev->int_audio_type != EM28XX_INT_AUDIO_NONE)
cap->device_caps |= V4L2_CAP_AUDIO;
if (dev->tuner_type != TUNER_ABSENT)
@@ -2296,7 +2305,7 @@ static int em28xx_v4l2_init(struct em28xx *dev)
v4l2->v4l2_dev.ctrl_handler = hdl;
if (dev->board.is_webcam)
- v4l2->progressive = 1;
+ v4l2->progressive = true;
/*
* Default format, used for tvp5150 or saa711x output formats
@@ -2502,7 +2511,7 @@ static int em28xx_v4l2_init(struct em28xx *dev)
v4l2_disable_ioctl(v4l2->vdev, VIDIOC_G_FREQUENCY);
v4l2_disable_ioctl(v4l2->vdev, VIDIOC_S_FREQUENCY);
}
- if (!dev->audio_mode.has_audio) {
+ if (dev->int_audio_type == EM28XX_INT_AUDIO_NONE) {
v4l2_disable_ioctl(v4l2->vdev, VIDIOC_G_AUDIO);
v4l2_disable_ioctl(v4l2->vdev, VIDIOC_S_AUDIO);
}
@@ -2532,7 +2541,7 @@ static int em28xx_v4l2_init(struct em28xx *dev)
v4l2_disable_ioctl(v4l2->vbi_dev, VIDIOC_G_FREQUENCY);
v4l2_disable_ioctl(v4l2->vbi_dev, VIDIOC_S_FREQUENCY);
}
- if (!dev->audio_mode.has_audio) {
+ if (dev->int_audio_type == EM28XX_INT_AUDIO_NONE) {
v4l2_disable_ioctl(v4l2->vbi_dev, VIDIOC_G_AUDIO);
v4l2_disable_ioctl(v4l2->vbi_dev, VIDIOC_S_AUDIO);
}
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 4360338e7b31..a21a7463b557 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -309,13 +309,18 @@ enum em28xx_ac97_mode {
struct em28xx_audio_mode {
enum em28xx_ac97_mode ac97;
+};
- u16 ac97_feat;
- u32 ac97_vendor_id;
-
- unsigned int has_audio:1;
+enum em28xx_int_audio_type {
+ EM28XX_INT_AUDIO_NONE = 0,
+ EM28XX_INT_AUDIO_AC97,
+ EM28XX_INT_AUDIO_I2S,
+};
- u8 i2s_samplerates;
+enum em28xx_usb_audio_type {
+ EM28XX_USB_AUDIO_NONE = 0,
+ EM28XX_USB_AUDIO_CLASS,
+ EM28XX_USB_AUDIO_VENDOR,
};
/* em28xx has two audio inputs: tuner and line in.
@@ -608,9 +613,9 @@ struct em28xx {
unsigned int is_em25xx:1; /* em25xx/em276x/7x/8x family bridge */
unsigned char disconnected:1; /* device has been diconnected */
unsigned int has_video:1;
- unsigned int has_audio_class:1;
- unsigned int has_alsa_audio:1;
unsigned int is_audio_only:1;
+ enum em28xx_int_audio_type int_audio_type;
+ enum em28xx_usb_audio_type usb_audio_type;
struct em28xx_board board;
diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
index ece27ece8115..3f986e1178ce 100644
--- a/drivers/media/usb/go7007/go7007-usb.c
+++ b/drivers/media/usb/go7007/go7007-usb.c
@@ -696,7 +696,7 @@ static int go7007_usb_ezusb_write_interrupt(struct go7007 *go,
sizeof(status_reg), timeout);
if (r < 0)
break;
- status_reg = le16_to_cpu(*((u16 *)go->usb_buf));
+ status_reg = le16_to_cpu(*((__le16 *)go->usb_buf));
if (!(status_reg & 0x0010))
break;
msleep(10);
@@ -751,7 +751,7 @@ static int go7007_usb_onboard_write_interrupt(struct go7007 *go,
static void go7007_usb_readinterrupt_complete(struct urb *urb)
{
struct go7007 *go = (struct go7007 *)urb->context;
- u16 *regs = (u16 *)urb->transfer_buffer;
+ __le16 *regs = (__le16 *)urb->transfer_buffer;
int status = urb->status;
if (status) {
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index e8cf23c91cef..43d65057a5fe 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -876,9 +876,8 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
ep_tb[0].alt = gspca_dev->alt;
alt_idx = 1;
} else {
-
- /* else, compute the minimum bandwidth
- * and build the endpoint table */
+ /* else, compute the minimum bandwidth
+ * and build the endpoint table */
alt_idx = build_isoc_ep_tb(gspca_dev, intf, ep_tb);
if (alt_idx <= 0) {
pr_err("no transfer endpoint found\n");
diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
index f06253cd7469..d39adf90303b 100644
--- a/drivers/media/usb/gspca/gspca.h
+++ b/drivers/media/usb/gspca/gspca.h
@@ -235,6 +235,6 @@ int gspca_resume(struct usb_interface *intf);
int gspca_expo_autogain(struct gspca_dev *gspca_dev, int avg_lum,
int desired_avg_lum, int deadzone, int gain_knee, int exposure_knee);
int gspca_coarse_grained_expo_autogain(struct gspca_dev *gspca_dev,
- int avg_lum, int desired_avg_lum, int deadzone);
+ int avg_lum, int desired_avg_lum, int deadzone);
#endif /* GSPCAV2_H */
diff --git a/drivers/media/usb/gspca/kinect.c b/drivers/media/usb/gspca/kinect.c
index 45bc1f51c5d8..3cb30a37d6ac 100644
--- a/drivers/media/usb/gspca/kinect.c
+++ b/drivers/media/usb/gspca/kinect.c
@@ -51,9 +51,9 @@ struct pkt_hdr {
struct cam_hdr {
uint8_t magic[2];
- uint16_t len;
- uint16_t cmd;
- uint16_t tag;
+ __le16 len;
+ __le16 cmd;
+ __le16 tag;
};
/* specific webcam descriptor */
@@ -188,9 +188,9 @@ static int send_cmd(struct gspca_dev *gspca_dev, uint16_t cmd, void *cmdbuf,
rhdr->tag, chdr->tag);
return -1;
}
- if (cpu_to_le16(rhdr->len) != (actual_len/2)) {
+ if (le16_to_cpu(rhdr->len) != (actual_len/2)) {
pr_err("send_cmd: Bad len %04x != %04x\n",
- cpu_to_le16(rhdr->len), (int)(actual_len/2));
+ le16_to_cpu(rhdr->len), (int)(actual_len/2));
return -1;
}
@@ -211,7 +211,7 @@ static int write_register(struct gspca_dev *gspca_dev, uint16_t reg,
uint16_t data)
{
uint16_t reply[2];
- uint16_t cmd[2];
+ __le16 cmd[2];
int res;
cmd[0] = cpu_to_le16(reg);
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index 41a9a892f79c..d0ee899584a9 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -1297,7 +1297,7 @@ static void set_cmatrix(struct gspca_dev *gspca_dev,
s32 hue_coord, hue_index = 180 + hue;
u8 cmatrix[21];
- memset(cmatrix, 0, sizeof cmatrix);
+ memset(cmatrix, 0, sizeof(cmatrix));
cmatrix[2] = (contrast * 0x25 / 0x100) + 0x26;
cmatrix[0] = 0x13 + (cmatrix[2] - 0x26) * 0x13 / 0x25;
cmatrix[4] = 0x07 + (cmatrix[2] - 0x26) * 0x07 / 0x25;
@@ -1787,8 +1787,9 @@ static int sd_init(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int i;
u8 value;
- u8 i2c_init[9] =
- {0x80, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03};
+ u8 i2c_init[9] = {
+ 0x80, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03
+ };
for (i = 0; i < ARRAY_SIZE(bridge_init); i++) {
value = bridge_init[i][1];
@@ -2242,8 +2243,9 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
int avg_lum, is_jpeg;
- static const u8 frame_header[] =
- {0xff, 0xff, 0x00, 0xc4, 0xc4, 0x96};
+ static const u8 frame_header[] = {
+ 0xff, 0xff, 0x00, 0xc4, 0xc4, 0x96
+ };
is_jpeg = (sd->fmt & 0x03) == 0;
if (len >= 64 && memcmp(data, frame_header, 6) == 0) {
diff --git a/drivers/media/usb/hackrf/Kconfig b/drivers/media/usb/hackrf/Kconfig
new file mode 100644
index 000000000000..937e6f5c1e8e
--- /dev/null
+++ b/drivers/media/usb/hackrf/Kconfig
@@ -0,0 +1,10 @@
+config USB_HACKRF
+ tristate "HackRF"
+ depends on VIDEO_V4L2
+ select VIDEOBUF2_VMALLOC
+ ---help---
+ This is a video4linux2 driver for HackRF SDR device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hackrf
+
diff --git a/drivers/media/usb/hackrf/Makefile b/drivers/media/usb/hackrf/Makefile
new file mode 100644
index 000000000000..73064a24cd4e
--- /dev/null
+++ b/drivers/media/usb/hackrf/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_USB_HACKRF) += hackrf.o
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
new file mode 100644
index 000000000000..328b5ba47a0a
--- /dev/null
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -0,0 +1,1142 @@
+/*
+ * HackRF driver
+ *
+ * Copyright (C) 2014 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-vmalloc.h>
+
+/* HackRF USB API commands (from HackRF Library) */
+enum {
+ CMD_SET_TRANSCEIVER_MODE = 0x01,
+ CMD_SAMPLE_RATE_SET = 0x06,
+ CMD_BASEBAND_FILTER_BANDWIDTH_SET = 0x07,
+ CMD_BOARD_ID_READ = 0x0e,
+ CMD_VERSION_STRING_READ = 0x0f,
+ CMD_SET_FREQ = 0x10,
+ CMD_SET_LNA_GAIN = 0x13,
+ CMD_SET_VGA_GAIN = 0x14,
+};
+
+/*
+ * bEndpointAddress 0x81 EP 1 IN
+ * Transfer Type Bulk
+ * wMaxPacketSize 0x0200 1x 512 bytes
+ */
+#define MAX_BULK_BUFS (6)
+#define BULK_BUFFER_SIZE (128 * 512)
+
+static const struct v4l2_frequency_band bands_adc[] = {
+ {
+ .tuner = 0,
+ .type = V4L2_TUNER_ADC,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 200000,
+ .rangehigh = 24000000,
+ },
+};
+
+static const struct v4l2_frequency_band bands_rf[] = {
+ {
+ .tuner = 1,
+ .type = V4L2_TUNER_RF,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 1,
+ .rangehigh = 4294967294LL, /* max u32, hw goes over 7GHz */
+ },
+};
+
+/* stream formats */
+struct hackrf_format {
+ char *name;
+ u32 pixelformat;
+ u32 buffersize;
+};
+
+/* format descriptions for capture and preview */
+static struct hackrf_format formats[] = {
+ {
+ .name = "Complex S8",
+ .pixelformat = V4L2_SDR_FMT_CS8,
+ .buffersize = BULK_BUFFER_SIZE,
+ },
+};
+
+static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats);
+
+/* intermediate buffers with raw data from the USB device */
+struct hackrf_frame_buf {
+ struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */
+ struct list_head list;
+};
+
+struct hackrf_dev {
+#define POWER_ON (1 << 1)
+#define URB_BUF (1 << 2)
+#define USB_STATE_URB_BUF (1 << 3)
+ unsigned long flags;
+
+ struct device *dev;
+ struct usb_device *udev;
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+
+ /* videobuf2 queue and queued buffers list */
+ struct vb2_queue vb_queue;
+ struct list_head queued_bufs;
+ spinlock_t queued_bufs_lock; /* Protects queued_bufs */
+ unsigned sequence; /* Buffer sequence counter */
+ unsigned int vb_full; /* vb is full and packets dropped */
+
+ /* Note if taking both locks v4l2_lock must always be locked first! */
+ struct mutex v4l2_lock; /* Protects everything else */
+ struct mutex vb_queue_lock; /* Protects vb_queue */
+
+ struct urb *urb_list[MAX_BULK_BUFS];
+ int buf_num;
+ unsigned long buf_size;
+ u8 *buf_list[MAX_BULK_BUFS];
+ dma_addr_t dma_addr[MAX_BULK_BUFS];
+ int urbs_initialized;
+ int urbs_submitted;
+
+ /* USB control message buffer */
+ #define BUF_SIZE 24
+ u8 buf[BUF_SIZE];
+
+ /* Current configuration */
+ unsigned int f_adc;
+ unsigned int f_rf;
+ u32 pixelformat;
+ u32 buffersize;
+
+ /* Controls */
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *bandwidth_auto;
+ struct v4l2_ctrl *bandwidth;
+ struct v4l2_ctrl *lna_gain;
+ struct v4l2_ctrl *if_gain;
+
+ /* Sample rate calc */
+ unsigned long jiffies_next;
+ unsigned int sample;
+ unsigned int sample_measured;
+};
+
+#define hackrf_dbg_usb_control_msg(_dev, _r, _t, _v, _i, _b, _l) { \
+ char *_direction; \
+ if (_t & USB_DIR_IN) \
+ _direction = "<<<"; \
+ else \
+ _direction = ">>>"; \
+ dev_dbg(_dev, "%02x %02x %02x %02x %02x %02x %02x %02x %s %*ph\n", \
+ _t, _r, _v & 0xff, _v >> 8, _i & 0xff, \
+ _i >> 8, _l & 0xff, _l >> 8, _direction, _l, _b); \
+}
+
+/* execute firmware command */
+static int hackrf_ctrl_msg(struct hackrf_dev *dev, u8 request, u16 value,
+ u16 index, u8 *data, u16 size)
+{
+ int ret;
+ unsigned int pipe;
+ u8 requesttype;
+
+ switch (request) {
+ case CMD_SET_TRANSCEIVER_MODE:
+ case CMD_SET_FREQ:
+ case CMD_SAMPLE_RATE_SET:
+ case CMD_BASEBAND_FILTER_BANDWIDTH_SET:
+ pipe = usb_sndctrlpipe(dev->udev, 0);
+ requesttype = (USB_TYPE_VENDOR | USB_DIR_OUT);
+ break;
+ case CMD_BOARD_ID_READ:
+ case CMD_VERSION_STRING_READ:
+ case CMD_SET_LNA_GAIN:
+ case CMD_SET_VGA_GAIN:
+ pipe = usb_rcvctrlpipe(dev->udev, 0);
+ requesttype = (USB_TYPE_VENDOR | USB_DIR_IN);
+ break;
+ default:
+ dev_err(dev->dev, "Unknown command %02x\n", request);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* write request */
+ if (!(requesttype & USB_DIR_IN))
+ memcpy(dev->buf, data, size);
+
+ ret = usb_control_msg(dev->udev, pipe, request, requesttype, value,
+ index, dev->buf, size, 1000);
+ hackrf_dbg_usb_control_msg(dev->dev, request, requesttype, value,
+ index, dev->buf, size);
+ if (ret < 0) {
+ dev_err(dev->dev, "usb_control_msg() failed %d request %02x\n",
+ ret, request);
+ goto err;
+ }
+
+ /* read request */
+ if (requesttype & USB_DIR_IN)
+ memcpy(data, dev->buf, size);
+
+ return 0;
+err:
+ return ret;
+}
+
+/* Private functions */
+static struct hackrf_frame_buf *hackrf_get_next_fill_buf(struct hackrf_dev *dev)
+{
+ unsigned long flags;
+ struct hackrf_frame_buf *buf = NULL;
+
+ spin_lock_irqsave(&dev->queued_bufs_lock, flags);
+ if (list_empty(&dev->queued_bufs))
+ goto leave;
+
+ buf = list_entry(dev->queued_bufs.next, struct hackrf_frame_buf, list);
+ list_del(&buf->list);
+leave:
+ spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
+ return buf;
+}
+
+static unsigned int hackrf_convert_stream(struct hackrf_dev *dev,
+ void *dst, void *src, unsigned int src_len)
+{
+ memcpy(dst, src, src_len);
+
+ /* calculate sample rate and output it in 10 seconds intervals */
+ if (unlikely(time_is_before_jiffies(dev->jiffies_next))) {
+ #define MSECS 10000UL
+ unsigned int msecs = jiffies_to_msecs(jiffies -
+ dev->jiffies_next + msecs_to_jiffies(MSECS));
+ unsigned int samples = dev->sample - dev->sample_measured;
+
+ dev->jiffies_next = jiffies + msecs_to_jiffies(MSECS);
+ dev->sample_measured = dev->sample;
+ dev_dbg(dev->dev, "slen=%u samples=%u msecs=%u sample rate=%lu\n",
+ src_len, samples, msecs,
+ samples * 1000UL / msecs);
+ }
+
+ /* total number of samples */
+ dev->sample += src_len / 2;
+
+ return src_len;
+}
+
+/*
+ * This gets called for the bulk stream pipe. This is done in interrupt
+ * time, so it has to be fast, not crash, and not stall. Neat.
+ */
+static void hackrf_urb_complete(struct urb *urb)
+{
+ struct hackrf_dev *dev = urb->context;
+ struct hackrf_frame_buf *fbuf;
+
+ dev_dbg_ratelimited(dev->dev, "status=%d length=%d/%d errors=%d\n",
+ urb->status, urb->actual_length,
+ urb->transfer_buffer_length, urb->error_count);
+
+ switch (urb->status) {
+ case 0: /* success */
+ case -ETIMEDOUT: /* NAK */
+ break;
+ case -ECONNRESET: /* kill */
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ default: /* error */
+ dev_err_ratelimited(dev->dev, "URB failed %d\n", urb->status);
+ break;
+ }
+
+ if (likely(urb->actual_length > 0)) {
+ void *ptr;
+ unsigned int len;
+ /* get free framebuffer */
+ fbuf = hackrf_get_next_fill_buf(dev);
+ if (unlikely(fbuf == NULL)) {
+ dev->vb_full++;
+ dev_notice_ratelimited(dev->dev,
+ "videobuf is full, %d packets dropped\n",
+ dev->vb_full);
+ goto skip;
+ }
+
+ /* fill framebuffer */
+ ptr = vb2_plane_vaddr(&fbuf->vb, 0);
+ len = hackrf_convert_stream(dev, ptr, urb->transfer_buffer,
+ urb->actual_length);
+ vb2_set_plane_payload(&fbuf->vb, 0, len);
+ v4l2_get_timestamp(&fbuf->vb.v4l2_buf.timestamp);
+ fbuf->vb.v4l2_buf.sequence = dev->sequence++;
+ vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+ }
+skip:
+ usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+static int hackrf_kill_urbs(struct hackrf_dev *dev)
+{
+ int i;
+
+ for (i = dev->urbs_submitted - 1; i >= 0; i--) {
+ dev_dbg(dev->dev, "kill urb=%d\n", i);
+ /* stop the URB */
+ usb_kill_urb(dev->urb_list[i]);
+ }
+ dev->urbs_submitted = 0;
+
+ return 0;
+}
+
+static int hackrf_submit_urbs(struct hackrf_dev *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < dev->urbs_initialized; i++) {
+ dev_dbg(dev->dev, "submit urb=%d\n", i);
+ ret = usb_submit_urb(dev->urb_list[i], GFP_ATOMIC);
+ if (ret) {
+ dev_err(dev->dev, "Could not submit URB no. %d - get them all back\n",
+ i);
+ hackrf_kill_urbs(dev);
+ return ret;
+ }
+ dev->urbs_submitted++;
+ }
+
+ return 0;
+}
+
+static int hackrf_free_stream_bufs(struct hackrf_dev *dev)
+{
+ if (dev->flags & USB_STATE_URB_BUF) {
+ while (dev->buf_num) {
+ dev->buf_num--;
+ dev_dbg(dev->dev, "free buf=%d\n", dev->buf_num);
+ usb_free_coherent(dev->udev, dev->buf_size,
+ dev->buf_list[dev->buf_num],
+ dev->dma_addr[dev->buf_num]);
+ }
+ }
+ dev->flags &= ~USB_STATE_URB_BUF;
+
+ return 0;
+}
+
+static int hackrf_alloc_stream_bufs(struct hackrf_dev *dev)
+{
+ dev->buf_num = 0;
+ dev->buf_size = BULK_BUFFER_SIZE;
+
+ dev_dbg(dev->dev, "all in all I will use %u bytes for streaming\n",
+ MAX_BULK_BUFS * BULK_BUFFER_SIZE);
+
+ for (dev->buf_num = 0; dev->buf_num < MAX_BULK_BUFS; dev->buf_num++) {
+ dev->buf_list[dev->buf_num] = usb_alloc_coherent(dev->udev,
+ BULK_BUFFER_SIZE, GFP_ATOMIC,
+ &dev->dma_addr[dev->buf_num]);
+ if (!dev->buf_list[dev->buf_num]) {
+ dev_dbg(dev->dev, "alloc buf=%d failed\n",
+ dev->buf_num);
+ hackrf_free_stream_bufs(dev);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev->dev, "alloc buf=%d %p (dma %llu)\n", dev->buf_num,
+ dev->buf_list[dev->buf_num],
+ (long long)dev->dma_addr[dev->buf_num]);
+ dev->flags |= USB_STATE_URB_BUF;
+ }
+
+ return 0;
+}
+
+static int hackrf_free_urbs(struct hackrf_dev *dev)
+{
+ int i;
+
+ hackrf_kill_urbs(dev);
+
+ for (i = dev->urbs_initialized - 1; i >= 0; i--) {
+ if (dev->urb_list[i]) {
+ dev_dbg(dev->dev, "free urb=%d\n", i);
+ /* free the URBs */
+ usb_free_urb(dev->urb_list[i]);
+ }
+ }
+ dev->urbs_initialized = 0;
+
+ return 0;
+}
+
+static int hackrf_alloc_urbs(struct hackrf_dev *dev)
+{
+ int i, j;
+
+ /* allocate the URBs */
+ for (i = 0; i < MAX_BULK_BUFS; i++) {
+ dev_dbg(dev->dev, "alloc urb=%d\n", i);
+ dev->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!dev->urb_list[i]) {
+ dev_dbg(dev->dev, "failed\n");
+ for (j = 0; j < i; j++)
+ usb_free_urb(dev->urb_list[j]);
+ return -ENOMEM;
+ }
+ usb_fill_bulk_urb(dev->urb_list[i],
+ dev->udev,
+ usb_rcvbulkpipe(dev->udev, 0x81),
+ dev->buf_list[i],
+ BULK_BUFFER_SIZE,
+ hackrf_urb_complete, dev);
+
+ dev->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+ dev->urb_list[i]->transfer_dma = dev->dma_addr[i];
+ dev->urbs_initialized++;
+ }
+
+ return 0;
+}
+
+/* Must be called with vb_queue_lock hold */
+static void hackrf_cleanup_queued_bufs(struct hackrf_dev *dev)
+{
+ unsigned long flags;
+
+ dev_dbg(dev->dev, "\n");
+
+ spin_lock_irqsave(&dev->queued_bufs_lock, flags);
+ while (!list_empty(&dev->queued_bufs)) {
+ struct hackrf_frame_buf *buf;
+
+ buf = list_entry(dev->queued_bufs.next,
+ struct hackrf_frame_buf, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
+}
+
+/* The user yanked out the cable... */
+static void hackrf_disconnect(struct usb_interface *intf)
+{
+ struct v4l2_device *v = usb_get_intfdata(intf);
+ struct hackrf_dev *dev = container_of(v, struct hackrf_dev, v4l2_dev);
+
+ dev_dbg(dev->dev, "\n");
+
+ mutex_lock(&dev->vb_queue_lock);
+ mutex_lock(&dev->v4l2_lock);
+ /* No need to keep the urbs around after disconnection */
+ dev->udev = NULL;
+ v4l2_device_disconnect(&dev->v4l2_dev);
+ video_unregister_device(&dev->vdev);
+ mutex_unlock(&dev->v4l2_lock);
+ mutex_unlock(&dev->vb_queue_lock);
+
+ v4l2_device_put(&dev->v4l2_dev);
+}
+
+/* Videobuf2 operations */
+static int hackrf_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct hackrf_dev *dev = vb2_get_drv_priv(vq);
+
+ dev_dbg(dev->dev, "nbuffers=%d\n", *nbuffers);
+
+ /* Need at least 8 buffers */
+ if (vq->num_buffers + *nbuffers < 8)
+ *nbuffers = 8 - vq->num_buffers;
+ *nplanes = 1;
+ sizes[0] = PAGE_ALIGN(dev->buffersize);
+
+ dev_dbg(dev->dev, "nbuffers=%d sizes[0]=%d\n", *nbuffers, sizes[0]);
+ return 0;
+}
+
+static void hackrf_buf_queue(struct vb2_buffer *vb)
+{
+ struct hackrf_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct hackrf_frame_buf *buf =
+ container_of(vb, struct hackrf_frame_buf, vb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->queued_bufs_lock, flags);
+ list_add_tail(&buf->list, &dev->queued_bufs);
+ spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
+}
+
+static int hackrf_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct hackrf_dev *dev = vb2_get_drv_priv(vq);
+ int ret;
+
+ dev_dbg(dev->dev, "\n");
+
+ if (!dev->udev)
+ return -ENODEV;
+
+ mutex_lock(&dev->v4l2_lock);
+
+ dev->sequence = 0;
+
+ set_bit(POWER_ON, &dev->flags);
+
+ ret = hackrf_alloc_stream_bufs(dev);
+ if (ret)
+ goto err;
+
+ ret = hackrf_alloc_urbs(dev);
+ if (ret)
+ goto err;
+
+ ret = hackrf_submit_urbs(dev);
+ if (ret)
+ goto err;
+
+ /* start hardware streaming */
+ ret = hackrf_ctrl_msg(dev, CMD_SET_TRANSCEIVER_MODE, 1, 0, NULL, 0);
+ if (ret)
+ goto err;
+
+ goto exit_mutex_unlock;
+err:
+ hackrf_kill_urbs(dev);
+ hackrf_free_urbs(dev);
+ hackrf_free_stream_bufs(dev);
+ clear_bit(POWER_ON, &dev->flags);
+
+ /* return all queued buffers to vb2 */
+ {
+ struct hackrf_frame_buf *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp, &dev->queued_bufs, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+ }
+
+exit_mutex_unlock:
+ mutex_unlock(&dev->v4l2_lock);
+
+ return ret;
+}
+
+static void hackrf_stop_streaming(struct vb2_queue *vq)
+{
+ struct hackrf_dev *dev = vb2_get_drv_priv(vq);
+
+ dev_dbg(dev->dev, "\n");
+
+ mutex_lock(&dev->v4l2_lock);
+
+ /* stop hardware streaming */
+ hackrf_ctrl_msg(dev, CMD_SET_TRANSCEIVER_MODE, 0, 0, NULL, 0);
+
+ hackrf_kill_urbs(dev);
+ hackrf_free_urbs(dev);
+ hackrf_free_stream_bufs(dev);
+
+ hackrf_cleanup_queued_bufs(dev);
+
+ clear_bit(POWER_ON, &dev->flags);
+
+ mutex_unlock(&dev->v4l2_lock);
+}
+
+static struct vb2_ops hackrf_vb2_ops = {
+ .queue_setup = hackrf_queue_setup,
+ .buf_queue = hackrf_buf_queue,
+ .start_streaming = hackrf_start_streaming,
+ .stop_streaming = hackrf_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int hackrf_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct hackrf_dev *dev = video_drvdata(file);
+
+ dev_dbg(dev->dev, "\n");
+
+ strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strlcpy(cap->card, dev->vdev.name, sizeof(cap->card));
+ usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE | V4L2_CAP_TUNER;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int hackrf_s_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct hackrf_dev *dev = video_drvdata(file);
+ struct vb2_queue *q = &dev->vb_queue;
+ int i;
+
+ dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n",
+ (char *)&f->fmt.sdr.pixelformat);
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (f->fmt.sdr.pixelformat == formats[i].pixelformat) {
+ dev->pixelformat = formats[i].pixelformat;
+ dev->buffersize = formats[i].buffersize;
+ f->fmt.sdr.buffersize = formats[i].buffersize;
+ return 0;
+ }
+ }
+
+ dev->pixelformat = formats[0].pixelformat;
+ dev->buffersize = formats[0].buffersize;
+ f->fmt.sdr.pixelformat = formats[0].pixelformat;
+ f->fmt.sdr.buffersize = formats[0].buffersize;
+
+ return 0;
+}
+
+static int hackrf_g_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct hackrf_dev *dev = video_drvdata(file);
+
+ dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n",
+ (char *)&dev->pixelformat);
+
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+ f->fmt.sdr.pixelformat = dev->pixelformat;
+ f->fmt.sdr.buffersize = dev->buffersize;
+
+ return 0;
+}
+
+static int hackrf_try_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct hackrf_dev *dev = video_drvdata(file);
+ int i;
+
+ dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n",
+ (char *)&f->fmt.sdr.pixelformat);
+
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
+ f->fmt.sdr.buffersize = formats[i].buffersize;
+ return 0;
+ }
+ }
+
+ f->fmt.sdr.pixelformat = formats[0].pixelformat;
+ f->fmt.sdr.buffersize = formats[0].buffersize;
+
+ return 0;
+}
+
+static int hackrf_enum_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct hackrf_dev *dev = video_drvdata(file);
+
+ dev_dbg(dev->dev, "index=%d\n", f->index);
+
+ if (f->index >= NUM_FORMATS)
+ return -EINVAL;
+
+ strlcpy(f->description, formats[f->index].name, sizeof(f->description));
+ f->pixelformat = formats[f->index].pixelformat;
+
+ return 0;
+}
+
+static int hackrf_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *v)
+{
+ struct hackrf_dev *dev = video_drvdata(file);
+ int ret;
+
+ dev_dbg(dev->dev, "index=%d\n", v->index);
+
+ if (v->index == 0)
+ ret = 0;
+ else if (v->index == 1)
+ ret = 0;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int hackrf_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v)
+{
+ struct hackrf_dev *dev = video_drvdata(file);
+ int ret;
+
+ dev_dbg(dev->dev, "index=%d\n", v->index);
+
+ if (v->index == 0) {
+ strlcpy(v->name, "HackRF ADC", sizeof(v->name));
+ v->type = V4L2_TUNER_ADC;
+ v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+ v->rangelow = bands_adc[0].rangelow;
+ v->rangehigh = bands_adc[0].rangehigh;
+ ret = 0;
+ } else if (v->index == 1) {
+ strlcpy(v->name, "HackRF RF", sizeof(v->name));
+ v->type = V4L2_TUNER_RF;
+ v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+ v->rangelow = bands_rf[0].rangelow;
+ v->rangehigh = bands_rf[0].rangehigh;
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int hackrf_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f)
+{
+ struct hackrf_dev *dev = video_drvdata(file);
+ int ret;
+ unsigned int upper, lower;
+ u8 buf[8];
+
+ dev_dbg(dev->dev, "tuner=%d type=%d frequency=%u\n",
+ f->tuner, f->type, f->frequency);
+
+ if (f->tuner == 0) {
+ dev->f_adc = clamp_t(unsigned int, f->frequency,
+ bands_adc[0].rangelow, bands_adc[0].rangehigh);
+ dev_dbg(dev->dev, "ADC frequency=%u Hz\n", dev->f_adc);
+ upper = dev->f_adc;
+ lower = 1;
+ buf[0] = (upper >> 0) & 0xff;
+ buf[1] = (upper >> 8) & 0xff;
+ buf[2] = (upper >> 16) & 0xff;
+ buf[3] = (upper >> 24) & 0xff;
+ buf[4] = (lower >> 0) & 0xff;
+ buf[5] = (lower >> 8) & 0xff;
+ buf[6] = (lower >> 16) & 0xff;
+ buf[7] = (lower >> 24) & 0xff;
+ ret = hackrf_ctrl_msg(dev, CMD_SAMPLE_RATE_SET, 0, 0, buf, 8);
+ } else if (f->tuner == 1) {
+ dev->f_rf = clamp_t(unsigned int, f->frequency,
+ bands_rf[0].rangelow, bands_rf[0].rangehigh);
+ dev_dbg(dev->dev, "RF frequency=%u Hz\n", dev->f_rf);
+ upper = dev->f_rf / 1000000;
+ lower = dev->f_rf % 1000000;
+ buf[0] = (upper >> 0) & 0xff;
+ buf[1] = (upper >> 8) & 0xff;
+ buf[2] = (upper >> 16) & 0xff;
+ buf[3] = (upper >> 24) & 0xff;
+ buf[4] = (lower >> 0) & 0xff;
+ buf[5] = (lower >> 8) & 0xff;
+ buf[6] = (lower >> 16) & 0xff;
+ buf[7] = (lower >> 24) & 0xff;
+ ret = hackrf_ctrl_msg(dev, CMD_SET_FREQ, 0, 0, buf, 8);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int hackrf_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct hackrf_dev *dev = video_drvdata(file);
+ int ret;
+
+ dev_dbg(dev->dev, "tuner=%d type=%d\n", f->tuner, f->type);
+
+ if (f->tuner == 0) {
+ f->type = V4L2_TUNER_ADC;
+ f->frequency = dev->f_adc;
+ ret = 0;
+ } else if (f->tuner == 1) {
+ f->type = V4L2_TUNER_RF;
+ f->frequency = dev->f_rf;
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int hackrf_enum_freq_bands(struct file *file, void *priv,
+ struct v4l2_frequency_band *band)
+{
+ struct hackrf_dev *dev = video_drvdata(file);
+ int ret;
+
+ dev_dbg(dev->dev, "tuner=%d type=%d index=%d\n",
+ band->tuner, band->type, band->index);
+
+ if (band->tuner == 0) {
+ if (band->index >= ARRAY_SIZE(bands_adc)) {
+ ret = -EINVAL;
+ } else {
+ *band = bands_adc[band->index];
+ ret = 0;
+ }
+ } else if (band->tuner == 1) {
+ if (band->index >= ARRAY_SIZE(bands_rf)) {
+ ret = -EINVAL;
+ } else {
+ *band = bands_rf[band->index];
+ ret = 0;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops hackrf_ioctl_ops = {
+ .vidioc_querycap = hackrf_querycap,
+
+ .vidioc_s_fmt_sdr_cap = hackrf_s_fmt_sdr_cap,
+ .vidioc_g_fmt_sdr_cap = hackrf_g_fmt_sdr_cap,
+ .vidioc_enum_fmt_sdr_cap = hackrf_enum_fmt_sdr_cap,
+ .vidioc_try_fmt_sdr_cap = hackrf_try_fmt_sdr_cap,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_s_tuner = hackrf_s_tuner,
+ .vidioc_g_tuner = hackrf_g_tuner,
+
+ .vidioc_s_frequency = hackrf_s_frequency,
+ .vidioc_g_frequency = hackrf_g_frequency,
+ .vidioc_enum_freq_bands = hackrf_enum_freq_bands,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+};
+
+static const struct v4l2_file_operations hackrf_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static struct video_device hackrf_template = {
+ .name = "HackRF One",
+ .release = video_device_release_empty,
+ .fops = &hackrf_fops,
+ .ioctl_ops = &hackrf_ioctl_ops,
+};
+
+static void hackrf_video_release(struct v4l2_device *v)
+{
+ struct hackrf_dev *dev = container_of(v, struct hackrf_dev, v4l2_dev);
+
+ v4l2_ctrl_handler_free(&dev->hdl);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ kfree(dev);
+}
+
+static int hackrf_set_bandwidth(struct hackrf_dev *dev)
+{
+ int ret, i;
+ u16 u16tmp, u16tmp2;
+ unsigned int bandwidth;
+
+ static const struct {
+ u32 freq;
+ } bandwidth_lut[] = {
+ { 1750000}, /* 1.75 MHz */
+ { 2500000}, /* 2.5 MHz */
+ { 3500000}, /* 3.5 MHz */
+ { 5000000}, /* 5 MHz */
+ { 5500000}, /* 5.5 MHz */
+ { 6000000}, /* 6 MHz */
+ { 7000000}, /* 7 MHz */
+ { 8000000}, /* 8 MHz */
+ { 9000000}, /* 9 MHz */
+ {10000000}, /* 10 MHz */
+ {12000000}, /* 12 MHz */
+ {14000000}, /* 14 MHz */
+ {15000000}, /* 15 MHz */
+ {20000000}, /* 20 MHz */
+ {24000000}, /* 24 MHz */
+ {28000000}, /* 28 MHz */
+ };
+
+ dev_dbg(dev->dev, "bandwidth auto=%d->%d val=%d->%d f_adc=%u\n",
+ dev->bandwidth_auto->cur.val,
+ dev->bandwidth_auto->val, dev->bandwidth->cur.val,
+ dev->bandwidth->val, dev->f_adc);
+
+ if (dev->bandwidth_auto->val == true)
+ bandwidth = dev->f_adc;
+ else
+ bandwidth = dev->bandwidth->val;
+
+ for (i = 0; i < ARRAY_SIZE(bandwidth_lut); i++) {
+ if (bandwidth <= bandwidth_lut[i].freq) {
+ bandwidth = bandwidth_lut[i].freq;
+ break;
+ }
+ }
+
+ dev->bandwidth->val = bandwidth;
+ dev->bandwidth->cur.val = bandwidth;
+
+ dev_dbg(dev->dev, "bandwidth selected=%d\n", bandwidth_lut[i].freq);
+
+ u16tmp = 0;
+ u16tmp |= ((bandwidth >> 0) & 0xff) << 0;
+ u16tmp |= ((bandwidth >> 8) & 0xff) << 8;
+ u16tmp2 = 0;
+ u16tmp2 |= ((bandwidth >> 16) & 0xff) << 0;
+ u16tmp2 |= ((bandwidth >> 24) & 0xff) << 8;
+
+ ret = hackrf_ctrl_msg(dev, CMD_BASEBAND_FILTER_BANDWIDTH_SET,
+ u16tmp, u16tmp2, NULL, 0);
+ if (ret)
+ dev_dbg(dev->dev, "failed=%d\n", ret);
+
+ return ret;
+}
+
+static int hackrf_set_lna_gain(struct hackrf_dev *dev)
+{
+ int ret;
+ u8 u8tmp;
+
+ dev_dbg(dev->dev, "lna val=%d->%d\n",
+ dev->lna_gain->cur.val, dev->lna_gain->val);
+
+ ret = hackrf_ctrl_msg(dev, CMD_SET_LNA_GAIN, 0, dev->lna_gain->val,
+ &u8tmp, 1);
+ if (ret)
+ dev_dbg(dev->dev, "failed=%d\n", ret);
+
+ return ret;
+}
+
+static int hackrf_set_if_gain(struct hackrf_dev *dev)
+{
+ int ret;
+ u8 u8tmp;
+
+ dev_dbg(dev->dev, "val=%d->%d\n",
+ dev->if_gain->cur.val, dev->if_gain->val);
+
+ ret = hackrf_ctrl_msg(dev, CMD_SET_VGA_GAIN, 0, dev->if_gain->val,
+ &u8tmp, 1);
+ if (ret)
+ dev_dbg(dev->dev, "failed=%d\n", ret);
+
+ return ret;
+}
+
+static int hackrf_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hackrf_dev *dev = container_of(ctrl->handler,
+ struct hackrf_dev, hdl);
+ int ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
+ case V4L2_CID_RF_TUNER_BANDWIDTH:
+ ret = hackrf_set_bandwidth(dev);
+ break;
+ case V4L2_CID_RF_TUNER_LNA_GAIN:
+ ret = hackrf_set_lna_gain(dev);
+ break;
+ case V4L2_CID_RF_TUNER_IF_GAIN:
+ ret = hackrf_set_if_gain(dev);
+ break;
+ default:
+ dev_dbg(dev->dev, "unknown ctrl: id=%d name=%s\n",
+ ctrl->id, ctrl->name);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops hackrf_ctrl_ops = {
+ .s_ctrl = hackrf_s_ctrl,
+};
+
+static int hackrf_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct hackrf_dev *dev;
+ int ret;
+ u8 u8tmp, buf[BUF_SIZE];
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL)
+ return -ENOMEM;
+
+ mutex_init(&dev->v4l2_lock);
+ mutex_init(&dev->vb_queue_lock);
+ spin_lock_init(&dev->queued_bufs_lock);
+ INIT_LIST_HEAD(&dev->queued_bufs);
+ dev->dev = &intf->dev;
+ dev->udev = interface_to_usbdev(intf);
+ dev->f_adc = bands_adc[0].rangelow;
+ dev->f_rf = bands_rf[0].rangelow;
+ dev->pixelformat = formats[0].pixelformat;
+ dev->buffersize = formats[0].buffersize;
+
+ /* Detect device */
+ ret = hackrf_ctrl_msg(dev, CMD_BOARD_ID_READ, 0, 0, &u8tmp, 1);
+ if (ret == 0)
+ ret = hackrf_ctrl_msg(dev, CMD_VERSION_STRING_READ, 0, 0,
+ buf, BUF_SIZE);
+ if (ret) {
+ dev_err(dev->dev, "Could not detect board\n");
+ goto err_free_mem;
+ }
+
+ buf[BUF_SIZE - 1] = '\0';
+
+ dev_info(dev->dev, "Board ID: %02x\n", u8tmp);
+ dev_info(dev->dev, "Firmware version: %s\n", buf);
+
+ /* Init videobuf2 queue structure */
+ dev->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
+ dev->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+ dev->vb_queue.drv_priv = dev;
+ dev->vb_queue.buf_struct_size = sizeof(struct hackrf_frame_buf);
+ dev->vb_queue.ops = &hackrf_vb2_ops;
+ dev->vb_queue.mem_ops = &vb2_vmalloc_memops;
+ dev->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ ret = vb2_queue_init(&dev->vb_queue);
+ if (ret) {
+ dev_err(dev->dev, "Could not initialize vb2 queue\n");
+ goto err_free_mem;
+ }
+
+ /* Init video_device structure */
+ dev->vdev = hackrf_template;
+ dev->vdev.queue = &dev->vb_queue;
+ dev->vdev.queue->lock = &dev->vb_queue_lock;
+ video_set_drvdata(&dev->vdev, dev);
+
+ /* Register the v4l2_device structure */
+ dev->v4l2_dev.release = hackrf_video_release;
+ ret = v4l2_device_register(&intf->dev, &dev->v4l2_dev);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register v4l2-device (%d)\n", ret);
+ goto err_free_mem;
+ }
+
+ /* Register controls */
+ v4l2_ctrl_handler_init(&dev->hdl, 4);
+ dev->bandwidth_auto = v4l2_ctrl_new_std(&dev->hdl, &hackrf_ctrl_ops,
+ V4L2_CID_RF_TUNER_BANDWIDTH_AUTO, 0, 1, 1, 1);
+ dev->bandwidth = v4l2_ctrl_new_std(&dev->hdl, &hackrf_ctrl_ops,
+ V4L2_CID_RF_TUNER_BANDWIDTH,
+ 1750000, 28000000, 50000, 1750000);
+ v4l2_ctrl_auto_cluster(2, &dev->bandwidth_auto, 0, false);
+ dev->lna_gain = v4l2_ctrl_new_std(&dev->hdl, &hackrf_ctrl_ops,
+ V4L2_CID_RF_TUNER_LNA_GAIN, 0, 40, 8, 0);
+ dev->if_gain = v4l2_ctrl_new_std(&dev->hdl, &hackrf_ctrl_ops,
+ V4L2_CID_RF_TUNER_IF_GAIN, 0, 62, 2, 0);
+ if (dev->hdl.error) {
+ ret = dev->hdl.error;
+ dev_err(dev->dev, "Could not initialize controls\n");
+ goto err_free_controls;
+ }
+
+ v4l2_ctrl_handler_setup(&dev->hdl);
+
+ dev->v4l2_dev.ctrl_handler = &dev->hdl;
+ dev->vdev.v4l2_dev = &dev->v4l2_dev;
+ dev->vdev.lock = &dev->v4l2_lock;
+
+ ret = video_register_device(&dev->vdev, VFL_TYPE_SDR, -1);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register as video device (%d)\n",
+ ret);
+ goto err_unregister_v4l2_dev;
+ }
+ dev_info(dev->dev, "Registered as %s\n",
+ video_device_node_name(&dev->vdev));
+ dev_notice(dev->dev, "SDR API is still slightly experimental and functionality changes may follow\n");
+ return 0;
+
+err_free_controls:
+ v4l2_ctrl_handler_free(&dev->hdl);
+err_unregister_v4l2_dev:
+ v4l2_device_unregister(&dev->v4l2_dev);
+err_free_mem:
+ kfree(dev);
+ return ret;
+}
+
+/* USB device ID list */
+static struct usb_device_id hackrf_id_table[] = {
+ { USB_DEVICE(0x1d50, 0x6089) }, /* HackRF One */
+ { }
+};
+MODULE_DEVICE_TABLE(usb, hackrf_id_table);
+
+/* USB subsystem interface */
+static struct usb_driver hackrf_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = hackrf_probe,
+ .disconnect = hackrf_disconnect,
+ .id_table = hackrf_id_table,
+};
+
+module_usb_driver(hackrf_driver);
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("HackRF");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/hdpvr/hdpvr-control.c b/drivers/media/usb/hdpvr/hdpvr-control.c
index 6053661dc04b..6e86032ea5db 100644
--- a/drivers/media/usb/hdpvr/hdpvr-control.c
+++ b/drivers/media/usb/hdpvr/hdpvr-control.c
@@ -59,13 +59,10 @@ int get_video_info(struct hdpvr_device *dev, struct hdpvr_video_info *vidinf)
1000);
#ifdef HDPVR_DEBUG
- if (hdpvr_debug & MSG_INFO) {
- char print_buf[15];
- hex_dump_to_buffer(dev->usbc_buf, 5, 16, 1, print_buf,
- sizeof(print_buf), 0);
+ if (hdpvr_debug & MSG_INFO)
v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev,
- "get video info returned: %d, %s\n", ret, print_buf);
- }
+ "get video info returned: %d, %5ph\n", ret,
+ dev->usbc_buf);
#endif
mutex_unlock(&dev->usbc_mutex);
@@ -82,9 +79,6 @@ int get_video_info(struct hdpvr_device *dev, struct hdpvr_video_info *vidinf)
int get_input_lines_info(struct hdpvr_device *dev)
{
-#ifdef HDPVR_DEBUG
- char print_buf[9];
-#endif
int ret, lines;
mutex_lock(&dev->usbc_mutex);
@@ -96,13 +90,10 @@ int get_input_lines_info(struct hdpvr_device *dev)
1000);
#ifdef HDPVR_DEBUG
- if (hdpvr_debug & MSG_INFO) {
- hex_dump_to_buffer(dev->usbc_buf, 3, 16, 1, print_buf,
- sizeof(print_buf), 0);
+ if (hdpvr_debug & MSG_INFO)
v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev,
- "get input lines info returned: %d, %s\n", ret,
- print_buf);
- }
+ "get input lines info returned: %d, %3ph\n", ret,
+ dev->usbc_buf);
#else
(void)ret; /* suppress compiler warning */
#endif
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index c5638964c3f2..42b4cdf28cfd 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -124,14 +124,6 @@ static int device_authorization(struct hdpvr_device *dev)
int ret, retval = -ENOMEM;
char request_type = 0x38, rcv_request = 0x81;
char *response;
-#ifdef HDPVR_DEBUG
- size_t buf_size = 46;
- char *print_buf = kzalloc(5*buf_size+1, GFP_KERNEL);
- if (!print_buf) {
- v4l2_err(&dev->v4l2_dev, "Out of memory\n");
- return retval;
- }
-#endif
mutex_lock(&dev->usbc_mutex);
ret = usb_control_msg(dev->udev,
@@ -147,11 +139,9 @@ static int device_authorization(struct hdpvr_device *dev)
}
#ifdef HDPVR_DEBUG
else {
- hex_dump_to_buffer(dev->usbc_buf, 46, 16, 1, print_buf,
- 5*buf_size+1, 0);
v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev,
- "Status request returned, len %d: %s\n",
- ret, print_buf);
+ "Status request returned, len %d: %46ph\n",
+ ret, dev->usbc_buf);
}
#endif
@@ -189,15 +179,13 @@ static int device_authorization(struct hdpvr_device *dev)
response = dev->usbc_buf+38;
#ifdef HDPVR_DEBUG
- hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0);
- v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "challenge: %s\n",
- print_buf);
+ v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "challenge: %8ph\n",
+ response);
#endif
challenge(response);
#ifdef HDPVR_DEBUG
- hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0);
- v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n",
- print_buf);
+ v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %8ph\n",
+ response);
#endif
msleep(100);
@@ -213,9 +201,6 @@ static int device_authorization(struct hdpvr_device *dev)
retval = ret != 8;
unlock:
mutex_unlock(&dev->usbc_mutex);
-#ifdef HDPVR_DEBUG
- kfree(print_buf);
-#endif
return retval;
}
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index 26b133414032..efc761c78f72 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -120,6 +120,7 @@ struct msi2500_frame_buf {
};
struct msi2500_state {
+ struct device *dev;
struct video_device vdev;
struct v4l2_device v4l2_dev;
struct v4l2_subdev *v4l2_subdev;
@@ -153,14 +154,13 @@ struct msi2500_state {
u32 next_sample; /* for track lost packets */
u32 sample; /* for sample rate calc */
unsigned long jiffies_next;
- unsigned int sample_ctrl_bit[4];
};
/* Private functions */
static struct msi2500_frame_buf *msi2500_get_next_fill_buf(
struct msi2500_state *s)
{
- unsigned long flags = 0;
+ unsigned long flags;
struct msi2500_frame_buf *buf = NULL;
spin_lock_irqsave(&s->queued_bufs_lock, flags);
@@ -269,7 +269,7 @@ static int msi2500_convert_stream(struct msi2500_state *s, u8 *dst, u8 *src,
sample[i] = src[3] << 24 | src[2] << 16 | src[1] << 8 |
src[0] << 0;
if (i == 0 && s->next_sample != sample[0]) {
- dev_dbg_ratelimited(&s->udev->dev,
+ dev_dbg_ratelimited(s->dev,
"%d samples lost, %d %08x:%08x\n",
sample[0] - s->next_sample,
src_len, s->next_sample, sample[0]);
@@ -279,7 +279,7 @@ static int msi2500_convert_stream(struct msi2500_state *s, u8 *dst, u8 *src,
* Dump all unknown 'garbage' data - maybe we will discover
* someday if there is something rational...
*/
- dev_dbg_ratelimited(&s->udev->dev, "%*ph\n", 12, &src[4]);
+ dev_dbg_ratelimited(s->dev, "%*ph\n", 12, &src[4]);
src += 16; /* skip header */
@@ -322,8 +322,7 @@ static int msi2500_convert_stream(struct msi2500_state *s, u8 *dst, u8 *src,
}
case MSI2500_PIX_FMT_SDR_MSI2500_384: /* 384 x IQ samples */
/* Dump unknown 'garbage' data */
- dev_dbg_ratelimited(&s->udev->dev,
- "%*ph\n", 24, &src[1000]);
+ dev_dbg_ratelimited(s->dev, "%*ph\n", 24, &src[1000]);
memcpy(dst, src, 984);
src += 984 + 24;
dst += 984;
@@ -365,8 +364,7 @@ static int msi2500_convert_stream(struct msi2500_state *s, u8 *dst, u8 *src,
s->jiffies_next = jiffies + msecs_to_jiffies(MSECS);
s->sample = s->next_sample;
- dev_dbg(&s->udev->dev,
- "size=%u samples=%u msecs=%u sample rate=%lu\n",
+ dev_dbg(s->dev, "size=%u samples=%u msecs=%u sample rate=%lu\n",
src_len, samples, msecs,
samples * 1000UL / msecs);
}
@@ -387,19 +385,16 @@ static void msi2500_isoc_handler(struct urb *urb)
if (unlikely(urb->status == -ENOENT || urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN)) {
- dev_dbg(&s->udev->dev, "URB (%p) unlinked %ssynchronuously\n",
+ dev_dbg(s->dev, "URB (%p) unlinked %ssynchronuously\n",
urb, urb->status == -ENOENT ? "" : "a");
return;
}
if (unlikely(urb->status != 0)) {
- dev_dbg(&s->udev->dev,
- "msi2500_isoc_handler() called with status %d\n",
- urb->status);
+ dev_dbg(s->dev, "called with status %d\n", urb->status);
/* Give up after a number of contiguous errors */
if (++s->isoc_errors > MAX_ISOC_ERRORS)
- dev_dbg(&s->udev->dev,
- "Too many ISOC errors, bailing out\n");
+ dev_dbg(s->dev, "Too many ISOC errors, bailing out\n");
goto handler_end;
} else {
/* Reset ISOC error counter. We did get here, after all. */
@@ -413,7 +408,7 @@ static void msi2500_isoc_handler(struct urb *urb)
/* Check frame error */
fstatus = urb->iso_frame_desc[i].status;
if (unlikely(fstatus)) {
- dev_dbg_ratelimited(&s->udev->dev,
+ dev_dbg_ratelimited(s->dev,
"frame=%d/%d has error %d skipping\n",
i, urb->number_of_packets, fstatus);
continue;
@@ -430,7 +425,7 @@ static void msi2500_isoc_handler(struct urb *urb)
fbuf = msi2500_get_next_fill_buf(s);
if (unlikely(fbuf == NULL)) {
s->vb_full++;
- dev_dbg_ratelimited(&s->udev->dev,
+ dev_dbg_ratelimited(s->dev,
"videobuf is full, %d packets dropped\n",
s->vb_full);
continue;
@@ -446,22 +441,19 @@ static void msi2500_isoc_handler(struct urb *urb)
handler_end:
i = usb_submit_urb(urb, GFP_ATOMIC);
if (unlikely(i != 0))
- dev_dbg(&s->udev->dev,
- "Error (%d) re-submitting urb in msi2500_isoc_handler\n",
- i);
+ dev_dbg(s->dev, "Error (%d) re-submitting urb\n", i);
}
static void msi2500_iso_stop(struct msi2500_state *s)
{
int i;
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(s->dev, "\n");
/* Unlinking ISOC buffers one by one */
for (i = 0; i < MAX_ISO_BUFS; i++) {
if (s->urbs[i]) {
- dev_dbg(&s->udev->dev, "Unlinking URB %p\n",
- s->urbs[i]);
+ dev_dbg(s->dev, "Unlinking URB %p\n", s->urbs[i]);
usb_kill_urb(s->urbs[i]);
}
}
@@ -471,12 +463,12 @@ static void msi2500_iso_free(struct msi2500_state *s)
{
int i;
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(s->dev, "\n");
/* Freeing ISOC buffers one by one */
for (i = 0; i < MAX_ISO_BUFS; i++) {
if (s->urbs[i]) {
- dev_dbg(&s->udev->dev, "Freeing URB\n");
+ dev_dbg(s->dev, "Freeing URB\n");
if (s->urbs[i]->transfer_buffer) {
usb_free_coherent(s->udev,
s->urbs[i]->transfer_buffer_length,
@@ -492,7 +484,7 @@ static void msi2500_iso_free(struct msi2500_state *s)
/* Both v4l2_lock and vb_queue_lock should be locked when calling this */
static void msi2500_isoc_cleanup(struct msi2500_state *s)
{
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(s->dev, "\n");
msi2500_iso_stop(s);
msi2500_iso_free(s);
@@ -501,14 +493,12 @@ static void msi2500_isoc_cleanup(struct msi2500_state *s)
/* Both v4l2_lock and vb_queue_lock should be locked when calling this */
static int msi2500_isoc_init(struct msi2500_state *s)
{
- struct usb_device *udev;
struct urb *urb;
int i, j, ret;
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(s->dev, "\n");
s->isoc_errors = 0;
- udev = s->udev;
ret = usb_set_interface(s->udev, 0, 1);
if (ret)
@@ -518,23 +508,22 @@ static int msi2500_isoc_init(struct msi2500_state *s)
for (i = 0; i < MAX_ISO_BUFS; i++) {
urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL);
if (urb == NULL) {
- dev_err(&s->udev->dev,
- "Failed to allocate urb %d\n", i);
+ dev_err(s->dev, "Failed to allocate urb %d\n", i);
msi2500_isoc_cleanup(s);
return -ENOMEM;
}
s->urbs[i] = urb;
- dev_dbg(&s->udev->dev, "Allocated URB at 0x%p\n", urb);
+ dev_dbg(s->dev, "Allocated URB at 0x%p\n", urb);
urb->interval = 1;
- urb->dev = udev;
- urb->pipe = usb_rcvisocpipe(udev, 0x81);
+ urb->dev = s->udev;
+ urb->pipe = usb_rcvisocpipe(s->udev, 0x81);
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
- urb->transfer_buffer = usb_alloc_coherent(udev, ISO_BUFFER_SIZE,
+ urb->transfer_buffer = usb_alloc_coherent(s->udev,
+ ISO_BUFFER_SIZE,
GFP_KERNEL, &urb->transfer_dma);
if (urb->transfer_buffer == NULL) {
- dev_err(&s->udev->dev,
- "Failed to allocate urb buffer %d\n",
+ dev_err(s->dev, "Failed to allocate urb buffer %d\n",
i);
msi2500_isoc_cleanup(s);
return -ENOMEM;
@@ -554,13 +543,12 @@ static int msi2500_isoc_init(struct msi2500_state *s)
for (i = 0; i < MAX_ISO_BUFS; i++) {
ret = usb_submit_urb(s->urbs[i], GFP_KERNEL);
if (ret) {
- dev_err(&s->udev->dev,
- "isoc_init() submit_urb %d failed with error %d\n",
+ dev_err(s->dev, "usb_submit_urb %d failed with error %d\n",
i, ret);
msi2500_isoc_cleanup(s);
return ret;
}
- dev_dbg(&s->udev->dev, "URB 0x%p submitted.\n", s->urbs[i]);
+ dev_dbg(s->dev, "URB 0x%p submitted.\n", s->urbs[i]);
}
/* All is done... */
@@ -570,9 +558,9 @@ static int msi2500_isoc_init(struct msi2500_state *s)
/* Must be called with vb_queue_lock hold */
static void msi2500_cleanup_queued_bufs(struct msi2500_state *s)
{
- unsigned long flags = 0;
+ unsigned long flags;
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(s->dev, "\n");
spin_lock_irqsave(&s->queued_bufs_lock, flags);
while (!list_empty(&s->queued_bufs)) {
@@ -593,7 +581,7 @@ static void msi2500_disconnect(struct usb_interface *intf)
struct msi2500_state *s =
container_of(v, struct msi2500_state, v4l2_dev);
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(s->dev, "\n");
mutex_lock(&s->vb_queue_lock);
mutex_lock(&s->v4l2_lock);
@@ -613,7 +601,7 @@ static int msi2500_querycap(struct file *file, void *fh,
{
struct msi2500_state *s = video_drvdata(file);
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(s->dev, "\n");
strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
strlcpy(cap->card, s->vdev.name, sizeof(cap->card));
@@ -631,14 +619,13 @@ static int msi2500_queue_setup(struct vb2_queue *vq,
{
struct msi2500_state *s = vb2_get_drv_priv(vq);
- dev_dbg(&s->udev->dev, "%s: *nbuffers=%d\n", __func__, *nbuffers);
+ dev_dbg(s->dev, "nbuffers=%d\n", *nbuffers);
/* Absolute min and max number of buffers available for mmap() */
*nbuffers = clamp_t(unsigned int, *nbuffers, 8, 32);
*nplanes = 1;
sizes[0] = PAGE_ALIGN(s->buffersize);
- dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n",
- __func__, *nbuffers, sizes[0]);
+ dev_dbg(s->dev, "nbuffers=%d sizes[0]=%d\n", *nbuffers, sizes[0]);
return 0;
}
@@ -647,7 +634,7 @@ static void msi2500_buf_queue(struct vb2_buffer *vb)
struct msi2500_state *s = vb2_get_drv_priv(vb->vb2_queue);
struct msi2500_frame_buf *buf =
container_of(vb, struct msi2500_frame_buf, vb);
- unsigned long flags = 0;
+ unsigned long flags;
/* Check the device has not disconnected between prep and queuing */
if (unlikely(!s->udev)) {
@@ -665,16 +652,15 @@ static void msi2500_buf_queue(struct vb2_buffer *vb)
#define CMD_STOP_STREAMING 0x45
#define CMD_READ_UNKNOW 0x48
-#define msi2500_dbg_usb_control_msg(_udev, _r, _t, _v, _i, _b, _l) { \
+#define msi2500_dbg_usb_control_msg(_dev, _r, _t, _v, _i, _b, _l) { \
char *_direction; \
if (_t & USB_DIR_IN) \
_direction = "<<<"; \
else \
_direction = ">>>"; \
- dev_dbg(&_udev->dev, "%s: %02x %02x %02x %02x %02x %02x %02x %02x " \
- "%s %*ph\n", __func__, _t, _r, _v & 0xff, _v >> 8, \
- _i & 0xff, _i >> 8, _l & 0xff, _l >> 8, _direction, \
- _l, _b); \
+ dev_dbg(_dev, "%02x %02x %02x %02x %02x %02x %02x %02x %s %*ph\n", \
+ _t, _r, _v & 0xff, _v >> 8, _i & 0xff, _i >> 8, \
+ _l & 0xff, _l >> 8, _direction, _l, _b); \
}
static int msi2500_ctrl_msg(struct msi2500_state *s, u8 cmd, u32 data)
@@ -685,18 +671,16 @@ static int msi2500_ctrl_msg(struct msi2500_state *s, u8 cmd, u32 data)
u16 value = (data >> 0) & 0xffff;
u16 index = (data >> 16) & 0xffff;
- msi2500_dbg_usb_control_msg(s->udev,
+ msi2500_dbg_usb_control_msg(s->dev,
request, requesttype, value, index, NULL, 0);
-
ret = usb_control_msg(s->udev, usb_sndctrlpipe(s->udev, 0),
request, requesttype, value, index, NULL, 0, 2000);
-
if (ret)
- dev_err(&s->udev->dev, "%s: failed %d, cmd %02x, data %04x\n",
- __func__, ret, cmd, data);
+ dev_err(s->dev, "failed %d, cmd %02x, data %04x\n",
+ ret, cmd, data);
return ret;
-};
+}
#define F_REF 24000000
#define DIV_R_IN 2
@@ -785,8 +769,7 @@ static int msi2500_set_usb_adc(struct msi2500_state *s)
for (div_r_out = 4; div_r_out < 16; div_r_out += 2) {
f_vco = f_sr * div_r_out * 12;
- dev_dbg(&s->udev->dev, "%s: div_r_out=%d f_vco=%d\n",
- __func__, div_r_out, f_vco);
+ dev_dbg(s->dev, "div_r_out=%d f_vco=%d\n", div_r_out, f_vco);
if (f_vco >= 202000000)
break;
}
@@ -800,10 +783,8 @@ static int msi2500_set_usb_adc(struct msi2500_state *s)
reg3 |= ((fract >> 20) & 0x000001) << 15; /* [20] */
reg4 |= ((fract >> 0) & 0x0fffff) << 8; /* [19:0] */
- dev_dbg(&s->udev->dev,
- "%s: f_sr=%d f_vco=%d div_n=%d div_m=%d div_r_out=%d reg3=%08x reg4=%08x\n",
- __func__, f_sr, f_vco, div_n, div_m, div_r_out, reg3,
- reg4);
+ dev_dbg(s->dev, "f_sr=%d f_vco=%d div_n=%d div_m=%d div_r_out=%d reg3=%08x reg4=%08x\n",
+ f_sr, f_vco, div_n, div_m, div_r_out, reg3, reg4);
ret = msi2500_ctrl_msg(s, CMD_WREG, 0x00608008);
if (ret)
@@ -838,14 +819,14 @@ static int msi2500_set_usb_adc(struct msi2500_state *s)
goto err;
err:
return ret;
-};
+}
static int msi2500_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct msi2500_state *s = vb2_get_drv_priv(vq);
int ret;
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(s->dev, "\n");
if (!s->udev)
return -ENODEV;
@@ -873,7 +854,7 @@ static void msi2500_stop_streaming(struct vb2_queue *vq)
{
struct msi2500_state *s = vb2_get_drv_priv(vq);
- dev_dbg(&s->udev->dev, "%s:\n", __func__);
+ dev_dbg(s->dev, "\n");
mutex_lock(&s->v4l2_lock);
@@ -909,7 +890,7 @@ static int msi2500_enum_fmt_sdr_cap(struct file *file, void *priv,
{
struct msi2500_state *s = video_drvdata(file);
- dev_dbg(&s->udev->dev, "%s: index=%d\n", __func__, f->index);
+ dev_dbg(s->dev, "index=%d\n", f->index);
if (f->index >= s->num_formats)
return -EINVAL;
@@ -925,7 +906,7 @@ static int msi2500_g_fmt_sdr_cap(struct file *file, void *priv,
{
struct msi2500_state *s = video_drvdata(file);
- dev_dbg(&s->udev->dev, "%s: pixelformat fourcc %4.4s\n", __func__,
+ dev_dbg(s->dev, "pixelformat fourcc %4.4s\n",
(char *)&s->pixelformat);
f->fmt.sdr.pixelformat = s->pixelformat;
@@ -942,7 +923,7 @@ static int msi2500_s_fmt_sdr_cap(struct file *file, void *priv,
struct vb2_queue *q = &s->vb_queue;
int i;
- dev_dbg(&s->udev->dev, "%s: pixelformat fourcc %4.4s\n", __func__,
+ dev_dbg(s->dev, "pixelformat fourcc %4.4s\n",
(char *)&f->fmt.sdr.pixelformat);
if (vb2_is_busy(q))
@@ -972,7 +953,7 @@ static int msi2500_try_fmt_sdr_cap(struct file *file, void *priv,
struct msi2500_state *s = video_drvdata(file);
int i;
- dev_dbg(&s->udev->dev, "%s: pixelformat fourcc %4.4s\n", __func__,
+ dev_dbg(s->dev, "pixelformat fourcc %4.4s\n",
(char *)&f->fmt.sdr.pixelformat);
memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
@@ -995,7 +976,7 @@ static int msi2500_s_tuner(struct file *file, void *priv,
struct msi2500_state *s = video_drvdata(file);
int ret;
- dev_dbg(&s->udev->dev, "%s: index=%d\n", __func__, v->index);
+ dev_dbg(s->dev, "index=%d\n", v->index);
if (v->index == 0)
ret = 0;
@@ -1012,7 +993,7 @@ static int msi2500_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v)
struct msi2500_state *s = video_drvdata(file);
int ret;
- dev_dbg(&s->udev->dev, "%s: index=%d\n", __func__, v->index);
+ dev_dbg(s->dev, "index=%d\n", v->index);
if (v->index == 0) {
strlcpy(v->name, "Mirics MSi2500", sizeof(v->name));
@@ -1036,8 +1017,7 @@ static int msi2500_g_frequency(struct file *file, void *priv,
struct msi2500_state *s = video_drvdata(file);
int ret = 0;
- dev_dbg(&s->udev->dev, "%s: tuner=%d type=%d\n",
- __func__, f->tuner, f->type);
+ dev_dbg(s->dev, "tuner=%d type=%d\n", f->tuner, f->type);
if (f->tuner == 0) {
f->frequency = s->f_adc;
@@ -1058,15 +1038,14 @@ static int msi2500_s_frequency(struct file *file, void *priv,
struct msi2500_state *s = video_drvdata(file);
int ret;
- dev_dbg(&s->udev->dev, "%s: tuner=%d type=%d frequency=%u\n",
- __func__, f->tuner, f->type, f->frequency);
+ dev_dbg(s->dev, "tuner=%d type=%d frequency=%u\n",
+ f->tuner, f->type, f->frequency);
if (f->tuner == 0) {
s->f_adc = clamp_t(unsigned int, f->frequency,
bands[0].rangelow,
bands[0].rangehigh);
- dev_dbg(&s->udev->dev, "%s: ADC frequency=%u Hz\n",
- __func__, s->f_adc);
+ dev_dbg(s->dev, "ADC frequency=%u Hz\n", s->f_adc);
ret = msi2500_set_usb_adc(s);
} else if (f->tuner == 1) {
ret = v4l2_subdev_call(s->v4l2_subdev, tuner, s_frequency, f);
@@ -1083,8 +1062,8 @@ static int msi2500_enum_freq_bands(struct file *file, void *priv,
struct msi2500_state *s = video_drvdata(file);
int ret;
- dev_dbg(&s->udev->dev, "%s: tuner=%d type=%d index=%d\n",
- __func__, band->tuner, band->type, band->index);
+ dev_dbg(s->dev, "tuner=%d type=%d index=%d\n",
+ band->tuner, band->type, band->index);
if (band->tuner == 0) {
if (band->index >= ARRAY_SIZE(bands)) {
@@ -1169,8 +1148,7 @@ static int msi2500_transfer_one_message(struct spi_master *master,
u32 data;
list_for_each_entry(t, &m->transfers, transfer_list) {
- dev_dbg(&s->udev->dev, "%s: msg=%*ph\n",
- __func__, t->len, t->tx_buf);
+ dev_dbg(s->dev, "msg=%*ph\n", t->len, t->tx_buf);
data = 0x09; /* reg 9 is SPI adapter */
data |= ((u8 *)t->tx_buf)[0] << 8;
data |= ((u8 *)t->tx_buf)[1] << 16;
@@ -1186,8 +1164,7 @@ static int msi2500_transfer_one_message(struct spi_master *master,
static int msi2500_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct usb_device *udev = interface_to_usbdev(intf);
- struct msi2500_state *s = NULL;
+ struct msi2500_state *s;
struct v4l2_subdev *sd;
struct spi_master *master;
int ret;
@@ -1200,7 +1177,7 @@ static int msi2500_probe(struct usb_interface *intf,
s = kzalloc(sizeof(struct msi2500_state), GFP_KERNEL);
if (s == NULL) {
- pr_err("Could not allocate memory for msi2500_state\n");
+ dev_err(&intf->dev, "Could not allocate memory for state\n");
return -ENOMEM;
}
@@ -1208,12 +1185,13 @@ static int msi2500_probe(struct usb_interface *intf,
mutex_init(&s->vb_queue_lock);
spin_lock_init(&s->queued_bufs_lock);
INIT_LIST_HEAD(&s->queued_bufs);
- s->udev = udev;
+ s->dev = &intf->dev;
+ s->udev = interface_to_usbdev(intf);
s->f_adc = bands[0].rangelow;
s->pixelformat = formats[0].pixelformat;
s->buffersize = formats[0].buffersize;
s->num_formats = NUM_FORMATS;
- if (msi2500_emulated_fmt == false)
+ if (!msi2500_emulated_fmt)
s->num_formats -= 2;
/* Init videobuf2 queue structure */
@@ -1226,7 +1204,7 @@ static int msi2500_probe(struct usb_interface *intf,
s->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
ret = vb2_queue_init(&s->vb_queue);
if (ret) {
- dev_err(&s->udev->dev, "Could not initialize vb2 queue\n");
+ dev_err(s->dev, "Could not initialize vb2 queue\n");
goto err_free_mem;
}
@@ -1240,13 +1218,12 @@ static int msi2500_probe(struct usb_interface *intf,
s->v4l2_dev.release = msi2500_video_release;
ret = v4l2_device_register(&intf->dev, &s->v4l2_dev);
if (ret) {
- dev_err(&s->udev->dev,
- "Failed to register v4l2-device (%d)\n", ret);
+ dev_err(s->dev, "Failed to register v4l2-device (%d)\n", ret);
goto err_free_mem;
}
/* SPI master adapter */
- master = spi_alloc_master(&s->udev->dev, 0);
+ master = spi_alloc_master(s->dev, 0);
if (master == NULL) {
ret = -ENOMEM;
goto err_unregister_v4l2_dev;
@@ -1267,7 +1244,7 @@ static int msi2500_probe(struct usb_interface *intf,
sd = v4l2_spi_new_subdev(&s->v4l2_dev, master, &board_info);
s->v4l2_subdev = sd;
if (sd == NULL) {
- dev_err(&s->udev->dev, "cannot get v4l2 subdevice\n");
+ dev_err(s->dev, "cannot get v4l2 subdevice\n");
ret = -ENODEV;
goto err_unregister_master;
}
@@ -1276,7 +1253,7 @@ static int msi2500_probe(struct usb_interface *intf,
v4l2_ctrl_handler_init(&s->hdl, 0);
if (s->hdl.error) {
ret = s->hdl.error;
- dev_err(&s->udev->dev, "Could not initialize controls\n");
+ dev_err(s->dev, "Could not initialize controls\n");
goto err_free_controls;
}
@@ -1289,16 +1266,13 @@ static int msi2500_probe(struct usb_interface *intf,
ret = video_register_device(&s->vdev, VFL_TYPE_SDR, -1);
if (ret) {
- dev_err(&s->udev->dev,
- "Failed to register as video device (%d)\n",
+ dev_err(s->dev, "Failed to register as video device (%d)\n",
ret);
goto err_unregister_v4l2_dev;
}
- dev_info(&s->udev->dev, "Registered as %s\n",
+ dev_info(s->dev, "Registered as %s\n",
video_device_node_name(&s->vdev));
- dev_notice(&s->udev->dev,
- "%s: SDR API is still slightly experimental and functionality changes may follow\n",
- KBUILD_MODNAME);
+ dev_notice(s->dev, "SDR API is still slightly experimental and functionality changes may follow\n");
return 0;
diff --git a/drivers/media/usb/pwc/pwc-v4l.c b/drivers/media/usb/pwc/pwc-v4l.c
index aa7449eaca08..3d987984602f 100644
--- a/drivers/media/usb/pwc/pwc-v4l.c
+++ b/drivers/media/usb/pwc/pwc-v4l.c
@@ -52,7 +52,7 @@ enum { custom_autocontour, custom_contour, custom_noise_reduction,
custom_awb_speed, custom_awb_delay,
custom_save_user, custom_restore_user, custom_restore_factory };
-const char * const pwc_auto_whitebal_qmenu[] = {
+static const char * const pwc_auto_whitebal_qmenu[] = {
"Indoor (Incandescant Lighting) Mode",
"Outdoor (Sunlight) Mode",
"Indoor (Fluorescent Lighting) Mode",
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 2c901861034a..ccc00099b261 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -2245,7 +2245,7 @@ static int s2255_probe(struct usb_interface *interface,
}
atomic_set(&dev->num_channels, 0);
- dev->pid = le16_to_cpu(id->idProduct);
+ dev->pid = id->idProduct;
dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL);
if (!dev->fw_data)
goto errorFWDATA1;
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 1836a416d806..94e10b10b66e 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -277,14 +277,14 @@ static int smsusb1_load_firmware(struct usb_device *udev, int id, int board_id)
rc = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 2),
fw_buffer, fw->size, &dummy, 1000);
- sms_info("sent %zd(%d) bytes, rc %d", fw->size, dummy, rc);
+ sms_info("sent %zu(%d) bytes, rc %d", fw->size, dummy, rc);
kfree(fw_buffer);
} else {
sms_err("failed to allocate firmware buffer");
rc = -ENOMEM;
}
- sms_info("read FW %s, size=%zd", fw_filename, fw->size);
+ sms_info("read FW %s, size=%zu", fw_filename, fw->size);
release_firmware(fw);
@@ -655,6 +655,8 @@ static const struct usb_device_id smsusb_id_table[] = {
.driver_info = SMS1XXX_BOARD_ONDA_MDTV_DATA_CARD },
{ USB_DEVICE(0x3275, 0x0080),
.driver_info = SMS1XXX_BOARD_SIANO_RIO },
+ { USB_DEVICE(0x2013, 0x0257),
+ .driver_info = SMS1XXX_BOARD_PCTV_77E },
{ } /* Terminating entry */
};
diff --git a/drivers/media/usb/ttusb-dec/ttusbdecfe.c b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
index 5c45c9d0712d..9c29552aedec 100644
--- a/drivers/media/usb/ttusb-dec/ttusbdecfe.c
+++ b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
@@ -156,6 +156,9 @@ static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struc
0x00, 0x00, 0x00, 0x00,
0x00, 0x00 };
+ if (cmd->msg_len > sizeof(b) - 4)
+ return -EINVAL;
+
memcpy(&b[4], cmd->msg, cmd->msg_len);
state->config->send_command(fe, 0x72,
diff --git a/drivers/media/usb/usbtv/Kconfig b/drivers/media/usb/usbtv/Kconfig
index 7c5b86006ee6..b833c5b9094e 100644
--- a/drivers/media/usb/usbtv/Kconfig
+++ b/drivers/media/usb/usbtv/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_USBTV
tristate "USBTV007 video capture support"
- depends on VIDEO_V4L2
+ depends on VIDEO_V4L2 && SND
+ select SND_PCM
select VIDEOBUF2_VMALLOC
---help---
diff --git a/drivers/media/usb/usbtv/Makefile b/drivers/media/usb/usbtv/Makefile
index 775316a88ea6..f555cf8a3dd2 100644
--- a/drivers/media/usb/usbtv/Makefile
+++ b/drivers/media/usb/usbtv/Makefile
@@ -1,4 +1,5 @@
usbtv-y := usbtv-core.o \
- usbtv-video.o
+ usbtv-video.o \
+ usbtv-audio.o
obj-$(CONFIG_VIDEO_USBTV) += usbtv.o
diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
new file mode 100644
index 000000000000..78c12d22dfbb
--- /dev/null
+++ b/drivers/media/usb/usbtv/usbtv-audio.c
@@ -0,0 +1,385 @@
+/*
+ * Fushicai USBTV007 Audio-Video Grabber Driver
+ *
+ * Product web site:
+ * http://www.fushicai.com/products_detail/&productId=d05449ee-b690-42f9-a661-aa7353894bed.html
+ *
+ * Copyright (c) 2013 Federico Simoncelli
+ * All rights reserved.
+ * No physical hardware was harmed running Windows during the
+ * reverse-engineering activity
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ */
+
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/ac97_codec.h>
+#include <sound/pcm_params.h>
+
+#include "usbtv.h"
+
+static struct snd_pcm_hardware snd_usbtv_digital_hw = {
+ .info = SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .period_bytes_min = 11059,
+ .period_bytes_max = 13516,
+ .periods_min = 2,
+ .periods_max = 98,
+ .buffer_bytes_max = 62720 * 8, /* value in usbaudio.c */
+};
+
+static int snd_usbtv_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct usbtv *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ chip->snd_substream = substream;
+ runtime->hw = snd_usbtv_digital_hw;
+
+ return 0;
+}
+
+static int snd_usbtv_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct usbtv *chip = snd_pcm_substream_chip(substream);
+
+ if (atomic_read(&chip->snd_stream)) {
+ atomic_set(&chip->snd_stream, 0);
+ schedule_work(&chip->snd_trigger);
+ }
+
+ return 0;
+}
+
+static int snd_usbtv_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ int rv;
+ struct usbtv *chip = snd_pcm_substream_chip(substream);
+
+ rv = snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+
+ if (rv < 0) {
+ dev_warn(chip->dev, "pcm audio buffer allocation failure %i\n",
+ rv);
+ return rv;
+ }
+
+ return 0;
+}
+
+static int snd_usbtv_hw_free(struct snd_pcm_substream *substream)
+{
+ snd_pcm_lib_free_pages(substream);
+ return 0;
+}
+
+static int snd_usbtv_prepare(struct snd_pcm_substream *substream)
+{
+ struct usbtv *chip = snd_pcm_substream_chip(substream);
+
+ chip->snd_buffer_pos = 0;
+ chip->snd_period_pos = 0;
+
+ return 0;
+}
+
+static void usbtv_audio_urb_received(struct urb *urb)
+{
+ struct usbtv *chip = urb->context;
+ struct snd_pcm_substream *substream = chip->snd_substream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ size_t i, frame_bytes, chunk_length, buffer_pos, period_pos;
+ int period_elapsed;
+ void *urb_current;
+
+ switch (urb->status) {
+ case 0:
+ case -ETIMEDOUT:
+ break;
+ case -ENOENT:
+ case -EPROTO:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ return;
+ default:
+ dev_warn(chip->dev, "unknown audio urb status %i\n",
+ urb->status);
+ }
+
+ if (!atomic_read(&chip->snd_stream))
+ return;
+
+ frame_bytes = runtime->frame_bits >> 3;
+ chunk_length = USBTV_CHUNK / frame_bytes;
+
+ buffer_pos = chip->snd_buffer_pos;
+ period_pos = chip->snd_period_pos;
+ period_elapsed = 0;
+
+ for (i = 0; i < urb->actual_length; i += USBTV_CHUNK_SIZE) {
+ urb_current = urb->transfer_buffer + i + USBTV_AUDIO_HDRSIZE;
+
+ if (buffer_pos + chunk_length >= runtime->buffer_size) {
+ size_t cnt = (runtime->buffer_size - buffer_pos) *
+ frame_bytes;
+ memcpy(runtime->dma_area + buffer_pos * frame_bytes,
+ urb_current, cnt);
+ memcpy(runtime->dma_area, urb_current + cnt,
+ chunk_length * frame_bytes - cnt);
+ } else {
+ memcpy(runtime->dma_area + buffer_pos * frame_bytes,
+ urb_current, chunk_length * frame_bytes);
+ }
+
+ buffer_pos += chunk_length;
+ period_pos += chunk_length;
+
+ if (buffer_pos >= runtime->buffer_size)
+ buffer_pos -= runtime->buffer_size;
+
+ if (period_pos >= runtime->period_size) {
+ period_pos -= runtime->period_size;
+ period_elapsed = 1;
+ }
+ }
+
+ snd_pcm_stream_lock(substream);
+
+ chip->snd_buffer_pos = buffer_pos;
+ chip->snd_period_pos = period_pos;
+
+ snd_pcm_stream_unlock(substream);
+
+ if (period_elapsed)
+ snd_pcm_period_elapsed(substream);
+
+ usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+static int usbtv_audio_start(struct usbtv *chip)
+{
+ unsigned int pipe;
+ static const u16 setup[][2] = {
+ /* These seem to enable the device. */
+ { USBTV_BASE + 0x0008, 0x0001 },
+ { USBTV_BASE + 0x01d0, 0x00ff },
+ { USBTV_BASE + 0x01d9, 0x0002 },
+
+ { USBTV_BASE + 0x01da, 0x0013 },
+ { USBTV_BASE + 0x01db, 0x0012 },
+ { USBTV_BASE + 0x01e9, 0x0002 },
+ { USBTV_BASE + 0x01ec, 0x006c },
+ { USBTV_BASE + 0x0294, 0x0020 },
+ { USBTV_BASE + 0x0255, 0x00cf },
+ { USBTV_BASE + 0x0256, 0x0020 },
+ { USBTV_BASE + 0x01eb, 0x0030 },
+ { USBTV_BASE + 0x027d, 0x00a6 },
+ { USBTV_BASE + 0x0280, 0x0011 },
+ { USBTV_BASE + 0x0281, 0x0040 },
+ { USBTV_BASE + 0x0282, 0x0011 },
+ { USBTV_BASE + 0x0283, 0x0040 },
+ { 0xf891, 0x0010 },
+
+ /* this sets the input from composite */
+ { USBTV_BASE + 0x0284, 0x00aa },
+ };
+
+ chip->snd_bulk_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (chip->snd_bulk_urb == NULL)
+ goto err_alloc_urb;
+
+ pipe = usb_rcvbulkpipe(chip->udev, USBTV_AUDIO_ENDP);
+
+ chip->snd_bulk_urb->transfer_buffer = kzalloc(
+ USBTV_AUDIO_URBSIZE, GFP_KERNEL);
+ if (chip->snd_bulk_urb->transfer_buffer == NULL)
+ goto err_transfer_buffer;
+
+ usb_fill_bulk_urb(chip->snd_bulk_urb, chip->udev, pipe,
+ chip->snd_bulk_urb->transfer_buffer, USBTV_AUDIO_URBSIZE,
+ usbtv_audio_urb_received, chip);
+
+ /* starting the stream */
+ usbtv_set_regs(chip, setup, ARRAY_SIZE(setup));
+
+ usb_clear_halt(chip->udev, pipe);
+ usb_submit_urb(chip->snd_bulk_urb, GFP_ATOMIC);
+
+ return 0;
+
+err_transfer_buffer:
+ usb_free_urb(chip->snd_bulk_urb);
+ chip->snd_bulk_urb = NULL;
+
+err_alloc_urb:
+ return -ENOMEM;
+}
+
+static int usbtv_audio_stop(struct usbtv *chip)
+{
+ static const u16 setup[][2] = {
+ /* The original windows driver sometimes sends also:
+ * { USBTV_BASE + 0x00a2, 0x0013 }
+ * but it seems useless and its real effects are untested at
+ * the moment.
+ */
+ { USBTV_BASE + 0x027d, 0x0000 },
+ { USBTV_BASE + 0x0280, 0x0010 },
+ { USBTV_BASE + 0x0282, 0x0010 },
+ };
+
+ if (chip->snd_bulk_urb) {
+ usb_kill_urb(chip->snd_bulk_urb);
+ kfree(chip->snd_bulk_urb->transfer_buffer);
+ usb_free_urb(chip->snd_bulk_urb);
+ chip->snd_bulk_urb = NULL;
+ }
+
+ usbtv_set_regs(chip, setup, ARRAY_SIZE(setup));
+
+ return 0;
+}
+
+void usbtv_audio_suspend(struct usbtv *usbtv)
+{
+ if (atomic_read(&usbtv->snd_stream) && usbtv->snd_bulk_urb)
+ usb_kill_urb(usbtv->snd_bulk_urb);
+}
+
+void usbtv_audio_resume(struct usbtv *usbtv)
+{
+ if (atomic_read(&usbtv->snd_stream) && usbtv->snd_bulk_urb)
+ usb_submit_urb(usbtv->snd_bulk_urb, GFP_ATOMIC);
+}
+
+static void snd_usbtv_trigger(struct work_struct *work)
+{
+ struct usbtv *chip = container_of(work, struct usbtv, snd_trigger);
+
+ if (atomic_read(&chip->snd_stream))
+ usbtv_audio_start(chip);
+ else
+ usbtv_audio_stop(chip);
+}
+
+static int snd_usbtv_card_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct usbtv *chip = snd_pcm_substream_chip(substream);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ atomic_set(&chip->snd_stream, 1);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ atomic_set(&chip->snd_stream, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ schedule_work(&chip->snd_trigger);
+
+ return 0;
+}
+
+static snd_pcm_uframes_t snd_usbtv_pointer(struct snd_pcm_substream *substream)
+{
+ struct usbtv *chip = snd_pcm_substream_chip(substream);
+
+ return chip->snd_buffer_pos;
+}
+
+static struct snd_pcm_ops snd_usbtv_pcm_ops = {
+ .open = snd_usbtv_pcm_open,
+ .close = snd_usbtv_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_usbtv_hw_params,
+ .hw_free = snd_usbtv_hw_free,
+ .prepare = snd_usbtv_prepare,
+ .trigger = snd_usbtv_card_trigger,
+ .pointer = snd_usbtv_pointer,
+};
+
+int usbtv_audio_init(struct usbtv *usbtv)
+{
+ int rv;
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+
+ INIT_WORK(&usbtv->snd_trigger, snd_usbtv_trigger);
+ atomic_set(&usbtv->snd_stream, 0);
+
+ rv = snd_card_new(&usbtv->udev->dev, SNDRV_DEFAULT_IDX1, "usbtv",
+ THIS_MODULE, 0, &card);
+ if (rv < 0)
+ return rv;
+
+ strlcpy(card->driver, usbtv->dev->driver->name, sizeof(card->driver));
+ strlcpy(card->shortname, "usbtv", sizeof(card->shortname));
+ snprintf(card->longname, sizeof(card->longname),
+ "USBTV Audio at bus %d device %d", usbtv->udev->bus->busnum,
+ usbtv->udev->devnum);
+
+ snd_card_set_dev(card, usbtv->dev);
+
+ usbtv->snd = card;
+
+ rv = snd_pcm_new(card, "USBTV Audio", 0, 0, 1, &pcm);
+ if (rv < 0)
+ goto err;
+
+ strlcpy(pcm->name, "USBTV Audio Input", sizeof(pcm->name));
+ pcm->info_flags = 0;
+ pcm->private_data = usbtv;
+
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usbtv_pcm_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL), USBTV_AUDIO_BUFFER,
+ USBTV_AUDIO_BUFFER);
+
+ rv = snd_card_register(card);
+ if (rv)
+ goto err;
+
+ return 0;
+
+err:
+ usbtv->snd = NULL;
+ snd_card_free(card);
+
+ return rv;
+}
+
+void usbtv_audio_free(struct usbtv *usbtv)
+{
+ if (usbtv->snd && usbtv->udev) {
+ snd_card_free(usbtv->snd);
+ usbtv->snd = NULL;
+ }
+}
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index 473fab81b602..29428bef272c 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -1,5 +1,5 @@
/*
- * Fushicai USBTV007 Video Grabber Driver
+ * Fushicai USBTV007 Audio-Video Grabber Driver
*
* Product web site:
* http://www.fushicai.com/products_detail/&productId=d05449ee-b690-42f9-a661-aa7353894bed.html
@@ -84,12 +84,19 @@ static int usbtv_probe(struct usb_interface *intf,
if (ret < 0)
goto usbtv_video_fail;
+ ret = usbtv_audio_init(usbtv);
+ if (ret < 0)
+ goto usbtv_audio_fail;
+
/* for simplicity we exploit the v4l2_device reference counting */
v4l2_device_get(&usbtv->v4l2_dev);
- dev_info(dev, "Fushicai USBTV007 Video Grabber\n");
+ dev_info(dev, "Fushicai USBTV007 Audio-Video Grabber\n");
return 0;
+usbtv_audio_fail:
+ usbtv_video_free(usbtv);
+
usbtv_video_fail:
usb_set_intfdata(intf, NULL);
usb_put_dev(usbtv->udev);
@@ -101,11 +108,13 @@ usbtv_video_fail:
static void usbtv_disconnect(struct usb_interface *intf)
{
struct usbtv *usbtv = usb_get_intfdata(intf);
+
usb_set_intfdata(intf, NULL);
if (!usbtv)
return;
+ usbtv_audio_free(usbtv);
usbtv_video_free(usbtv);
usb_put_dev(usbtv->udev);
@@ -122,8 +131,8 @@ static struct usb_device_id usbtv_id_table[] = {
};
MODULE_DEVICE_TABLE(usb, usbtv_id_table);
-MODULE_AUTHOR("Lubomir Rintel");
-MODULE_DESCRIPTION("Fushicai USBTV007 Video Grabber Driver");
+MODULE_AUTHOR("Lubomir Rintel, Federico Simoncelli");
+MODULE_DESCRIPTION("Fushicai USBTV007 Audio-Video Grabber Driver");
MODULE_LICENSE("Dual BSD/GPL");
static struct usb_driver usbtv_usb_driver = {
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 030c5854b4b3..9d3525f659f0 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -1,5 +1,5 @@
/*
- * Fushicai USBTV007 Video Grabber Driver
+ * Fushicai USBTV007 Audio-Video Grabber Driver
*
* Product web site:
* http://www.fushicai.com/products_detail/&productId=d05449ee-b690-42f9-a661-aa7353894bed.html
@@ -79,7 +79,6 @@ static int usbtv_select_input(struct usbtv *usbtv, int input)
{ USBTV_BASE + 0x011f, 0x00f2 },
{ USBTV_BASE + 0x0127, 0x0060 },
{ USBTV_BASE + 0x00ae, 0x0010 },
- { USBTV_BASE + 0x0284, 0x00aa },
{ USBTV_BASE + 0x0239, 0x0060 },
};
@@ -88,7 +87,6 @@ static int usbtv_select_input(struct usbtv *usbtv, int input)
{ USBTV_BASE + 0x011f, 0x00ff },
{ USBTV_BASE + 0x0127, 0x0060 },
{ USBTV_BASE + 0x00ae, 0x0030 },
- { USBTV_BASE + 0x0284, 0x0088 },
{ USBTV_BASE + 0x0239, 0x0060 },
};
@@ -225,7 +223,6 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
{ USBTV_BASE + 0x0159, 0x0006 },
{ USBTV_BASE + 0x015d, 0x0000 },
- { USBTV_BASE + 0x0284, 0x0088 },
{ USBTV_BASE + 0x0003, 0x0004 },
{ USBTV_BASE + 0x0100, 0x00d3 },
{ USBTV_BASE + 0x0115, 0x0015 },
@@ -256,7 +253,7 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
* 720 pixel lines, as the chunk is 240 words long, which is 480 pixels.
* Therefore, we break down the chunk into two halves before copyting,
* so that we can interleave a line if needed. */
-static void usbtv_chunk_to_vbuf(u32 *frame, u32 *src, int chunk_no, int odd)
+static void usbtv_chunk_to_vbuf(u32 *frame, __be32 *src, int chunk_no, int odd)
{
int half;
@@ -266,6 +263,7 @@ static void usbtv_chunk_to_vbuf(u32 *frame, u32 *src, int chunk_no, int odd)
int part_index = (line * 2 + !odd) * 3 + (part_no % 3);
u32 *dst = &frame[part_index * USBTV_CHUNK/2];
+
memcpy(dst, src, USBTV_CHUNK/2 * sizeof(*src));
src += USBTV_CHUNK/2;
}
@@ -274,7 +272,7 @@ static void usbtv_chunk_to_vbuf(u32 *frame, u32 *src, int chunk_no, int odd)
/* Called for each 256-byte image chunk.
* First word identifies the chunk, followed by 240 words of image
* data and padding. */
-static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
+static void usbtv_image_chunk(struct usbtv *usbtv, __be32 *chunk)
{
int frame_id, odd, chunk_no;
u32 *frame;
@@ -365,7 +363,7 @@ static void usbtv_iso_cb(struct urb *ip)
for (offset = 0; USBTV_CHUNK_SIZE * offset < size; offset++)
usbtv_image_chunk(usbtv,
- (u32 *)&data[USBTV_CHUNK_SIZE * offset]);
+ (__be32 *)&data[USBTV_CHUNK_SIZE * offset]);
}
resubmit:
@@ -410,6 +408,7 @@ static void usbtv_stop(struct usbtv *usbtv)
/* Cancel running transfers. */
for (i = 0; i < USBTV_ISOC_TRANSFERS; i++) {
struct urb *ip = usbtv->isoc_urbs[i];
+
if (ip == NULL)
continue;
usb_kill_urb(ip);
@@ -434,6 +433,8 @@ static int usbtv_start(struct usbtv *usbtv)
int i;
int ret;
+ usbtv_audio_suspend(usbtv);
+
ret = usb_set_interface(usbtv->udev, 0, 0);
if (ret < 0)
return ret;
@@ -446,6 +447,8 @@ static int usbtv_start(struct usbtv *usbtv)
if (ret < 0)
return ret;
+ usbtv_audio_resume(usbtv);
+
for (i = 0; i < USBTV_ISOC_TRANSFERS; i++) {
struct urb *ip;
@@ -559,6 +562,7 @@ static int usbtv_g_input(struct file *file, void *priv, unsigned int *i)
static int usbtv_s_input(struct file *file, void *priv, unsigned int i)
{
struct usbtv *usbtv = video_drvdata(file);
+
return usbtv_select_input(usbtv, i);
}
diff --git a/drivers/media/usb/usbtv/usbtv.h b/drivers/media/usb/usbtv/usbtv.h
index cb1d388cc647..968119581fab 100644
--- a/drivers/media/usb/usbtv/usbtv.h
+++ b/drivers/media/usb/usbtv/usbtv.h
@@ -1,5 +1,5 @@
/*
- * Fushicai USBTV007 Video Grabber Driver
+ * Fushicai USBTV007 Audio-Video Grabber Driver
*
* Copyright (c) 2013 Lubomir Rintel
* All rights reserved.
@@ -28,6 +28,7 @@
/* Hardware. */
#define USBTV_VIDEO_ENDP 0x81
+#define USBTV_AUDIO_ENDP 0x83
#define USBTV_BASE 0xc000
#define USBTV_REQUEST_REG 12
@@ -39,6 +40,10 @@
#define USBTV_CHUNK_SIZE 256
#define USBTV_CHUNK 240
+#define USBTV_AUDIO_URBSIZE 20480
+#define USBTV_AUDIO_HDRSIZE 4
+#define USBTV_AUDIO_BUFFER 65536
+
/* Chunk header. */
#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \
== 0x88000000)
@@ -91,9 +96,23 @@ struct usbtv {
int iso_size;
unsigned int sequence;
struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS];
+
+ /* audio */
+ struct snd_card *snd;
+ struct snd_pcm_substream *snd_substream;
+ atomic_t snd_stream;
+ struct work_struct snd_trigger;
+ struct urb *snd_bulk_urb;
+ size_t snd_buffer_pos;
+ size_t snd_period_pos;
};
int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size);
int usbtv_video_init(struct usbtv *usbtv);
void usbtv_video_free(struct usbtv *usbtv);
+
+int usbtv_audio_init(struct usbtv *usbtv);
+void usbtv_audio_free(struct usbtv *usbtv);
+void usbtv_audio_suspend(struct usbtv *usbtv);
+void usbtv_audio_resume(struct usbtv *usbtv);
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 0eb82106d2ff..3e59b288b8a8 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -309,9 +309,8 @@ static struct uvc_control_info uvc_ctrls[] = {
.selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
.index = 12,
.size = 4,
- .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_MIN
- | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES
- | UVC_CTRL_FLAG_GET_DEF
+ .flags = UVC_CTRL_FLAG_SET_CUR
+ | UVC_CTRL_FLAG_GET_RANGE
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
{
@@ -391,6 +390,35 @@ static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping,
data[2] = min((int)abs(value), 0xff);
}
+static __s32 uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping,
+ __u8 query, const __u8 *data)
+{
+ unsigned int first = mapping->offset / 8;
+ __s8 rel = (__s8)data[first];
+
+ switch (query) {
+ case UVC_GET_CUR:
+ return (rel == 0) ? 0 : (rel > 0 ? data[first+1]
+ : -data[first+1]);
+ case UVC_GET_MIN:
+ return -data[first+1];
+ case UVC_GET_MAX:
+ case UVC_GET_RES:
+ case UVC_GET_DEF:
+ default:
+ return data[first+1];
+ }
+}
+
+static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping,
+ __s32 value, __u8 *data)
+{
+ unsigned int first = mapping->offset / 8;
+
+ data[first] = value == 0 ? 0 : (value > 0) ? 1 : 0xff;
+ data[first+1] = min_t(int, abs(value), 0xff);
+}
+
static struct uvc_control_mapping uvc_ctrl_mappings[] = {
{
.id = V4L2_CID_BRIGHTNESS,
@@ -677,6 +705,30 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
},
{
+ .id = V4L2_CID_PAN_SPEED,
+ .name = "Pan (Speed)",
+ .entity = UVC_GUID_UVC_CAMERA,
+ .selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
+ .size = 16,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_INTEGER,
+ .data_type = UVC_CTRL_DATA_TYPE_SIGNED,
+ .get = uvc_ctrl_get_rel_speed,
+ .set = uvc_ctrl_set_rel_speed,
+ },
+ {
+ .id = V4L2_CID_TILT_SPEED,
+ .name = "Tilt (Speed)",
+ .entity = UVC_GUID_UVC_CAMERA,
+ .selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
+ .size = 16,
+ .offset = 16,
+ .v4l2_type = V4L2_CTRL_TYPE_INTEGER,
+ .data_type = UVC_CTRL_DATA_TYPE_SIGNED,
+ .get = uvc_ctrl_get_rel_speed,
+ .set = uvc_ctrl_set_rel_speed,
+ },
+ {
.id = V4L2_CID_PRIVACY,
.name = "Privacy",
.entity = UVC_GUID_UVC_CAMERA,
@@ -1795,7 +1847,7 @@ done:
* - Handle restore order (Auto-Exposure Mode should be restored before
* Exposure Time).
*/
-int uvc_ctrl_resume_device(struct uvc_device *dev)
+int uvc_ctrl_restore_values(struct uvc_device *dev)
{
struct uvc_control *ctrl;
struct uvc_entity *entity;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index f8135f4e3b52..7c8322d4fc63 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2000,7 +2000,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
int ret = 0;
if (reset) {
- ret = uvc_ctrl_resume_device(dev);
+ ret = uvc_ctrl_restore_values(dev);
if (ret < 0)
return ret;
}
@@ -2175,6 +2175,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0 },
+ /* Logitech HD Pro Webcam C920 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x046d,
+ .idProduct = 0x082d,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_RESTORE_CTRLS_ON_INIT },
/* Chicony CNF7129 (Asus EEE 100HE) */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2229,6 +2238,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_DEF },
+ /* Dell XPS M1330 (OmniVision OV7670 webcam) */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x05a9,
+ .idProduct = 0x7670,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_PROBE_DEF },
/* Apple Built-In iSight */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 378ae02e593b..60a8e2c3631e 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -318,6 +318,7 @@ static int uvc_v4l2_set_format(struct uvc_streaming *stream,
stream->ctrl = probe;
stream->cur_format = format;
stream->cur_frame = frame;
+ stream->frame_size = fmt->fmt.pix.sizeimage;
done:
mutex_unlock(&stream->mutex);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 9144a2f3ed82..9ace520bb079 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1143,7 +1143,7 @@ static int uvc_video_encode_data(struct uvc_streaming *stream,
static void uvc_video_validate_buffer(const struct uvc_streaming *stream,
struct uvc_buffer *buf)
{
- if (buf->length != buf->bytesused &&
+ if (stream->frame_size != buf->bytesused &&
!(stream->cur_format->flags & UVC_FMT_FLAG_COMPRESSED))
buf->error = 1;
}
@@ -1463,7 +1463,7 @@ static unsigned int uvc_endpoint_max_bpi(struct usb_device *dev,
switch (dev->speed) {
case USB_SPEED_SUPER:
- return ep->ss_ep_comp.wBytesPerInterval;
+ return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
case USB_SPEED_HIGH:
psize = usb_endpoint_maxp(&ep->desc);
return (psize & 0x07ff) * (1 + ((psize >> 11) & 3));
@@ -1678,6 +1678,12 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
}
}
+ /* The Logitech C920 temporarily forgets that it should not be adjusting
+ * Exposure Absolute during init so restore controls to stored values.
+ */
+ if (stream->dev->quirks & UVC_QUIRK_RESTORE_CTRLS_ON_INIT)
+ uvc_ctrl_restore_values(stream->dev);
+
return 0;
}
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index b1f69a6d4068..6f676c29ec09 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -147,6 +147,7 @@
#define UVC_QUIRK_FIX_BANDWIDTH 0x00000080
#define UVC_QUIRK_PROBE_DEF 0x00000100
#define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200
+#define UVC_QUIRK_RESTORE_CTRLS_ON_INIT 0x00000400
/* Format flags */
#define UVC_FMT_FLAG_COMPRESSED 0x00000001
@@ -456,6 +457,8 @@ struct uvc_streaming {
struct uvc_format *def_format;
struct uvc_format *cur_format;
struct uvc_frame *cur_frame;
+ size_t frame_size;
+
/* Protect access to ctrl, cur_format, cur_frame and hardware video
* probe control.
*/
@@ -688,7 +691,7 @@ extern int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
const struct uvc_control_mapping *mapping);
extern int uvc_ctrl_init_device(struct uvc_device *dev);
extern void uvc_ctrl_cleanup_device(struct uvc_device *dev);
-extern int uvc_ctrl_resume_device(struct uvc_device *dev);
+extern int uvc_ctrl_restore_values(struct uvc_device *dev);
extern int uvc_ctrl_begin(struct uvc_video_chain *chain);
extern int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index 06c18ba16fa0..559f8372e2eb 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -601,7 +601,7 @@ static int tuner_probe(struct i2c_client *client,
t->name = "(tuner unset)";
t->type = UNSET;
t->audmode = V4L2_TUNER_MODE_STEREO;
- t->standby = 1;
+ t->standby = true;
t->radio_freq = 87.5 * 16000; /* Initial freq range */
t->tv_freq = 400 * 16; /* Sets freq to VHF High - needed for some PLL's to properly start */
@@ -1260,7 +1260,9 @@ static int tuner_suspend(struct device *dev)
tuner_dbg("suspend\n");
- if (!t->standby && analog_ops->standby)
+ if (t->fe.ops.tuner_ops.suspend)
+ t->fe.ops.tuner_ops.suspend(&t->fe);
+ else if (!t->standby && analog_ops->standby)
analog_ops->standby(&t->fe);
return 0;
@@ -1273,7 +1275,9 @@ static int tuner_resume(struct device *dev)
tuner_dbg("resume\n");
- if (!t->standby)
+ if (t->fe.ops.tuner_ops.resume)
+ t->fe.ops.tuner_ops.resume(&t->fe);
+ else if (!t->standby)
if (set_mode(t, t->mode) == 0)
set_freq(t, 0);
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index ccaa38f65cf1..2e9d81f4c1a5 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -435,16 +435,13 @@ static unsigned int clamp_align(unsigned int x, unsigned int min,
/* Bits that must be zero to be aligned */
unsigned int mask = ~((1 << align) - 1);
+ /* Clamp to aligned min and max */
+ x = clamp(x, (min + ~mask) & mask, max & mask);
+
/* Round to nearest aligned value */
if (align)
x = (x + (1 << (align - 1))) & mask;
- /* Clamp to aligned value of min and max */
- if (x < min)
- x = (min + ~mask) & mask;
- else if (x > max)
- x = max & mask;
-
return x;
}
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index cca6c2f76b3a..e502a5fb2994 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -328,7 +328,7 @@ struct v4l2_buffer32 {
__u32 reserved;
};
-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
enum v4l2_memory memory)
{
void __user *up_pln;
@@ -357,7 +357,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
return 0;
}
-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
enum v4l2_memory memory)
{
if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
@@ -427,7 +427,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
* by passing a very big num_planes value */
uplane = compat_alloc_user_space(num_planes *
sizeof(struct v4l2_plane));
- kp->m.planes = uplane;
+ kp->m.planes = (__force struct v4l2_plane *)uplane;
while (--num_planes >= 0) {
ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
@@ -498,7 +498,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
if (num_planes == 0)
return 0;
- uplane = kp->m.planes;
+ uplane = (__force struct v4l2_plane __user *)kp->m.planes;
if (get_user(p, &up->m.planes))
return -EFAULT;
uplane32 = compat_ptr(p);
@@ -562,7 +562,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
get_user(kp->flags, &up->flags) ||
copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
return -EFAULT;
- kp->base = compat_ptr(tmp);
+ kp->base = (__force void *)compat_ptr(tmp);
return 0;
}
@@ -667,11 +667,15 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
n * sizeof(struct v4l2_ext_control32)))
return -EFAULT;
kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
- kp->controls = kcontrols;
+ kp->controls = (__force struct v4l2_ext_control *)kcontrols;
while (--n >= 0) {
+ u32 id;
+
if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
return -EFAULT;
- if (ctrl_is_pointer(kcontrols->id)) {
+ if (get_user(id, &kcontrols->id))
+ return -EFAULT;
+ if (ctrl_is_pointer(id)) {
void __user *s;
if (get_user(p, &ucontrols->string))
@@ -689,7 +693,8 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
{
struct v4l2_ext_control32 __user *ucontrols;
- struct v4l2_ext_control __user *kcontrols = kp->controls;
+ struct v4l2_ext_control __user *kcontrols =
+ (__force struct v4l2_ext_control __user *)kp->controls;
int n = kp->count;
compat_caddr_t p;
@@ -711,11 +716,14 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
while (--n >= 0) {
unsigned size = sizeof(*ucontrols);
+ u32 id;
+ if (get_user(id, &kcontrols->id))
+ return -EFAULT;
/* Do not modify the pointer when copying a pointer control.
The contents of the pointer was changed, not the pointer
itself. */
- if (ctrl_is_pointer(kcontrols->id))
+ if (ctrl_is_pointer(id))
size -= sizeof(ucontrols->value64);
if (copy_in_user(ucontrols, kcontrols, size))
return -EFAULT;
@@ -770,7 +778,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
get_user(tmp, &up->edid) ||
copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
return -EFAULT;
- kp->edid = compat_ptr(tmp);
+ kp->edid = (__force u8 *)compat_ptr(tmp);
return 0;
}
@@ -783,7 +791,7 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
put_user(kp->start_block, &up->start_block) ||
put_user(kp->blocks, &up->blocks) ||
put_user(tmp, &up->edid) ||
- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
+ copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
return -EFAULT;
return 0;
}
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index f030d6a9e044..86012140923f 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -796,6 +796,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop";
case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status";
case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range";
+ case V4L2_CID_PAN_SPEED: return "Pan, Speed";
+ case V4L2_CID_TILT_SPEED: return "Tilt, Speed";
/* FM Radio Modulator controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -859,6 +861,10 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_VBLANK: return "Vertical Blanking";
case V4L2_CID_HBLANK: return "Horizontal Blanking";
case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain";
+ case V4L2_CID_TEST_PATTERN_RED: return "Red Pixel Value";
+ case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value";
+ case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value";
+ case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value";
/* Image processing controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index ce1c9f5d9dee..b1d8dbb39665 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -164,7 +164,8 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
bt->width > cap->max_width ||
bt->pixelclock < cap->min_pixelclock ||
bt->pixelclock > cap->max_pixelclock ||
- (cap->standards && !(bt->standards & cap->standards)) ||
+ (cap->standards && bt->standards &&
+ !(bt->standards & cap->standards)) ||
(bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
(!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
return false;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index d15e16737eef..9ccb19a435ef 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -562,7 +562,7 @@ static void v4l_print_ext_controls(const void *arg, bool write_only)
pr_cont("class=0x%x, count=%d, error_idx=%d",
p->ctrl_class, p->count, p->error_idx);
for (i = 0; i < p->count; i++) {
- if (p->controls[i].size)
+ if (!p->controls[i].size)
pr_cont(", id/val=0x%x/0x%x",
p->controls[i].id, p->controls[i].value);
else
@@ -1153,9 +1153,9 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: {
- struct v4l2_clip *clips = p->fmt.win.clips;
+ struct v4l2_clip __user *clips = p->fmt.win.clips;
u32 clipcount = p->fmt.win.clipcount;
- void *bitmap = p->fmt.win.bitmap;
+ void __user *bitmap = p->fmt.win.bitmap;
memset(&p->fmt, 0, sizeof(p->fmt));
p->fmt.win.clips = clips;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index b4d235c13fbf..543631c3557a 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -501,11 +501,20 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
struct v4l2_subdev_format *source_fmt,
struct v4l2_subdev_format *sink_fmt)
{
+ /* The width, height and code must match. */
if (source_fmt->format.width != sink_fmt->format.width
|| source_fmt->format.height != sink_fmt->format.height
|| source_fmt->format.code != sink_fmt->format.code)
return -EINVAL;
+ /* The field order must match, or the sink field order must be NONE
+ * to support interlaced hardware connected to bridges that support
+ * progressive formats only.
+ */
+ if (source_fmt->format.field != sink_fmt->format.field &&
+ sink_fmt->format.field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index fb5ee5dd8fe9..b91a266d0b7e 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -441,11 +441,6 @@ int videobuf_reqbufs(struct videobuf_queue *q,
unsigned int size, count;
int retval;
- if (req->count < 1) {
- dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
- return -EINVAL;
- }
-
if (req->memory != V4L2_MEMORY_MMAP &&
req->memory != V4L2_MEMORY_USERPTR &&
req->memory != V4L2_MEMORY_OVERLAY) {
@@ -471,6 +466,12 @@ int videobuf_reqbufs(struct videobuf_queue *q,
goto done;
}
+ if (req->count == 0) {
+ dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
+ retval = __videobuf_free(q);
+ goto done;
+ }
+
count = req->count;
if (count > VIDEO_MAX_FRAME)
count = VIDEO_MAX_FRAME;
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 3c8cc023a5a5..3ff15f1c9d70 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -253,9 +253,11 @@ int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
return 0;
out_free_pages:
while (i > 0) {
- void *addr = page_address(dma->vaddr_pages[i]);
- dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]);
+ void *addr;
+
i--;
+ addr = page_address(dma->vaddr_pages[i]);
+ dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]);
}
kfree(dma->dma_addr);
dma->dma_addr = NULL;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 25d3ae2188cb..f2e43de3dd87 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -36,7 +36,7 @@ module_param(debug, int, 0644);
#define dprintk(level, fmt, arg...) \
do { \
if (debug >= level) \
- pr_debug("vb2: %s: " fmt, __func__, ## arg); \
+ pr_info("vb2: %s: " fmt, __func__, ## arg); \
} while (0)
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -882,7 +882,9 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
* We already have buffers allocated, so first check if they
* are not in use and can be freed.
*/
+ mutex_lock(&q->mmap_lock);
if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
+ mutex_unlock(&q->mmap_lock);
dprintk(1, "memory in use, cannot free\n");
return -EBUSY;
}
@@ -894,6 +896,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
*/
__vb2_queue_cancel(q);
ret = __vb2_queue_free(q, q->num_buffers);
+ mutex_unlock(&q->mmap_lock);
if (ret)
return ret;
@@ -955,6 +958,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
*/
}
+ mutex_lock(&q->mmap_lock);
q->num_buffers = allocated_buffers;
if (ret < 0) {
@@ -963,8 +967,10 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
* from q->num_buffers.
*/
__vb2_queue_free(q, allocated_buffers);
+ mutex_unlock(&q->mmap_lock);
return ret;
}
+ mutex_unlock(&q->mmap_lock);
/*
* Return the number of successfully allocated buffers
@@ -1063,6 +1069,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
*/
}
+ mutex_lock(&q->mmap_lock);
q->num_buffers += allocated_buffers;
if (ret < 0) {
@@ -1071,8 +1078,10 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create
* from q->num_buffers.
*/
__vb2_queue_free(q, allocated_buffers);
+ mutex_unlock(&q->mmap_lock);
return -ENOMEM;
}
+ mutex_unlock(&q->mmap_lock);
/*
* Return the number of successfully allocated buffers
@@ -1581,7 +1590,6 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)
static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
{
struct vb2_queue *q = vb->vb2_queue;
- struct rw_semaphore *mmap_sem;
int ret;
ret = __verify_length(vb, b);
@@ -1618,26 +1626,9 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
ret = __qbuf_mmap(vb, b);
break;
case V4L2_MEMORY_USERPTR:
- /*
- * In case of user pointer buffers vb2 allocators need to get
- * direct access to userspace pages. This requires getting
- * the mmap semaphore for read access in the current process
- * structure. The same semaphore is taken before calling mmap
- * operation, while both qbuf/prepare_buf and mmap are called
- * by the driver or v4l2 core with the driver's lock held.
- * To avoid an AB-BA deadlock (mmap_sem then driver's lock in
- * mmap and driver's lock then mmap_sem in qbuf/prepare_buf),
- * the videobuf2 core releases the driver's lock, takes
- * mmap_sem and then takes the driver's lock again.
- */
- mmap_sem = &current->mm->mmap_sem;
- call_void_qop(q, wait_prepare, q);
- down_read(mmap_sem);
- call_void_qop(q, wait_finish, q);
-
+ down_read(&current->mm->mmap_sem);
ret = __qbuf_userptr(vb, b);
-
- up_read(mmap_sem);
+ up_read(&current->mm->mmap_sem);
break;
case V4L2_MEMORY_DMABUF:
ret = __qbuf_dmabuf(vb, b);
@@ -2504,7 +2495,9 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
return -EINVAL;
}
+ mutex_lock(&q->mmap_lock);
ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
+ mutex_unlock(&q->mmap_lock);
if (ret)
return ret;
@@ -2523,6 +2516,7 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
unsigned long off = pgoff << PAGE_SHIFT;
struct vb2_buffer *vb;
unsigned int buffer, plane;
+ void *vaddr;
int ret;
if (q->memory != V4L2_MEMORY_MMAP) {
@@ -2539,7 +2533,8 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
vb = q->bufs[buffer];
- return (unsigned long)vb2_plane_vaddr(vb, plane);
+ vaddr = vb2_plane_vaddr(vb, plane);
+ return vaddr ? (unsigned long)vaddr : -EINVAL;
}
EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
#endif
@@ -2686,6 +2681,7 @@ int vb2_queue_init(struct vb2_queue *q)
INIT_LIST_HEAD(&q->queued_list);
INIT_LIST_HEAD(&q->done_list);
spin_lock_init(&q->done_lock);
+ mutex_init(&q->mmap_lock);
init_waitqueue_head(&q->done_wq);
if (q->buf_struct_size == 0)
@@ -2707,7 +2703,9 @@ void vb2_queue_release(struct vb2_queue *q)
{
__vb2_cleanup_fileio(q);
__vb2_queue_cancel(q);
+ mutex_lock(&q->mmap_lock);
__vb2_queue_free(q, q->num_buffers);
+ mutex_unlock(&q->mmap_lock);
}
EXPORT_SYMBOL_GPL(vb2_queue_release);
@@ -2985,6 +2983,12 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
buf->queued = 0;
buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
: vb2_plane_size(q->bufs[index], 0);
+ /* Compensate for data_offset on read in the multiplanar case. */
+ if (is_multiplanar && read &&
+ fileio->b.m.planes[0].data_offset < buf->size) {
+ buf->pos = fileio->b.m.planes[0].data_offset;
+ buf->size -= buf->pos;
+ }
} else {
buf = &fileio->bufs[index];
}
@@ -3372,15 +3376,8 @@ EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
{
struct video_device *vdev = video_devdata(file);
- struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
- int err;
- if (lock && mutex_lock_interruptible(lock))
- return -ERESTARTSYS;
- err = vb2_mmap(vdev->queue, vma);
- if (lock)
- mutex_unlock(lock);
- return err;
+ return vb2_mmap(vdev->queue, vma);
}
EXPORT_SYMBOL_GPL(vb2_fop_mmap);
@@ -3499,15 +3496,8 @@ unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct video_device *vdev = video_devdata(file);
- struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
- int ret;
- if (lock && mutex_lock_interruptible(lock))
- return -ERESTARTSYS;
- ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
- if (lock)
- mutex_unlock(lock);
- return ret;
+ return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
}
EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
#endif
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index de5abf244746..cf66ef1ffaf3 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -454,6 +454,21 @@ config MFD_MAX8998
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_MENF21BMC
+ tristate "MEN 14F021P00 Board Management Controller Support"
+ depends on I2C
+ select MFD_CORE
+ help
+ Say yes here to add support for the MEN 14F021P00 BMC
+ which is a Board Management Controller connected to the I2C bus.
+ The device supports multiple sub-devices like LED, HWMON and WDT.
+ This driver provides common support for accessing the devices;
+ additional drivers must be enabled in order to use the
+ functionality of the BMC device.
+
+ This driver can also be built as a module. If so the module
+ will be called menf21bmc.
+
config EZX_PCAP
bool "Motorola EZXPCAP Support"
depends on SPI_MASTER
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index f00148782d9b..d58068aa8aa9 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -169,6 +169,7 @@ obj-$(CONFIG_MFD_AS3711) += as3711.o
obj-$(CONFIG_MFD_AS3722) += as3722.o
obj-$(CONFIG_MFD_STW481X) += stw481x.o
obj-$(CONFIG_MFD_IPAQ_MICRO) += ipaq-micro.o
+obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o
intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
diff --git a/drivers/mfd/menf21bmc.c b/drivers/mfd/menf21bmc.c
new file mode 100644
index 000000000000..1c274345820c
--- /dev/null
+++ b/drivers/mfd/menf21bmc.c
@@ -0,0 +1,132 @@
+/*
+ * MEN 14F021P00 Board Management Controller (BMC) MFD Core Driver.
+ *
+ * Copyright (C) 2014 MEN Mikro Elektronik Nuernberg GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+
+#define BMC_CMD_WDT_EXIT_PROD 0x18
+#define BMC_CMD_WDT_PROD_STAT 0x19
+#define BMC_CMD_REV_MAJOR 0x80
+#define BMC_CMD_REV_MINOR 0x81
+#define BMC_CMD_REV_MAIN 0x82
+
+static struct mfd_cell menf21bmc_cell[] = {
+ { .name = "menf21bmc_wdt", },
+ { .name = "menf21bmc_led", },
+ { .name = "menf21bmc_hwmon", }
+};
+
+static int menf21bmc_wdt_exit_prod_mode(struct i2c_client *client)
+{
+ int val, ret;
+
+ val = i2c_smbus_read_byte_data(client, BMC_CMD_WDT_PROD_STAT);
+ if (val < 0)
+ return val;
+
+ /*
+ * Production mode should be not active after delivery of the Board.
+ * To be sure we check it, inform the user and exit the mode
+ * if active.
+ */
+ if (val == 0x00) {
+ dev_info(&client->dev,
+ "BMC in production mode. Exit production mode\n");
+
+ ret = i2c_smbus_write_byte(client, BMC_CMD_WDT_EXIT_PROD);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+menf21bmc_probe(struct i2c_client *client, const struct i2c_device_id *ids)
+{
+ int rev_major, rev_minor, rev_main;
+ int ret;
+
+ ret = i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_BYTE);
+ if (!ret)
+ return -ENODEV;
+
+ rev_major = i2c_smbus_read_word_data(client, BMC_CMD_REV_MAJOR);
+ if (rev_major < 0) {
+ dev_err(&client->dev, "failed to get BMC major revision\n");
+ return rev_major;
+ }
+
+ rev_minor = i2c_smbus_read_word_data(client, BMC_CMD_REV_MINOR);
+ if (rev_minor < 0) {
+ dev_err(&client->dev, "failed to get BMC minor revision\n");
+ return rev_minor;
+ }
+
+ rev_main = i2c_smbus_read_word_data(client, BMC_CMD_REV_MAIN);
+ if (rev_main < 0) {
+ dev_err(&client->dev, "failed to get BMC main revision\n");
+ return rev_main;
+ }
+
+ dev_info(&client->dev, "FW Revision: %02d.%02d.%02d\n",
+ rev_major, rev_minor, rev_main);
+
+ /*
+ * We have to exit the Production Mode of the BMC to activate the
+ * Watchdog functionality and the BIOS life sign monitoring.
+ */
+ ret = menf21bmc_wdt_exit_prod_mode(client);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to leave production mode\n");
+ return ret;
+ }
+
+ ret = mfd_add_devices(&client->dev, 0, menf21bmc_cell,
+ ARRAY_SIZE(menf21bmc_cell), NULL, 0, NULL);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to add BMC sub-devices\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int menf21bmc_remove(struct i2c_client *client)
+{
+ mfd_remove_devices(&client->dev);
+ return 0;
+}
+
+static const struct i2c_device_id menf21bmc_id_table[] = {
+ { "menf21bmc" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, menf21bmc_id_table);
+
+static struct i2c_driver menf21bmc_driver = {
+ .driver.name = "menf21bmc",
+ .id_table = menf21bmc_id_table,
+ .probe = menf21bmc_probe,
+ .remove = menf21bmc_remove,
+};
+
+module_i2c_driver(menf21bmc_driver);
+
+MODULE_DESCRIPTION("MEN 14F021P00 BMC mfd core driver");
+MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index b841180c7c74..bbeb4516facf 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -527,4 +527,5 @@ source "drivers/misc/vmw_vmci/Kconfig"
source "drivers/misc/mic/Kconfig"
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
+source "drivers/misc/cxl/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 5497d026e651..7d5c4cd118c4 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -55,3 +55,4 @@ obj-y += mic/
obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
+obj-$(CONFIG_CXL_BASE) += cxl/
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index 7be89832db19..7e97e53f9ff2 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -749,13 +749,8 @@ static ssize_t fpga_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos)
{
struct fpga_dev *priv = filp->private_data;
-
- count = min_t(size_t, priv->bytes - *f_pos, count);
- if (copy_to_user(buf, priv->vb.vaddr + *f_pos, count))
- return -EFAULT;
-
- *f_pos += count;
- return count;
+ return simple_read_from_buffer(buf, count, ppos,
+ priv->vb.vaddr, priv->bytes);
}
static loff_t fpga_llseek(struct file *filp, loff_t offset, int origin)
@@ -767,26 +762,7 @@ static loff_t fpga_llseek(struct file *filp, loff_t offset, int origin)
if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
return -EINVAL;
- switch (origin) {
- case SEEK_SET: /* seek relative to the beginning of the file */
- newpos = offset;
- break;
- case SEEK_CUR: /* seek relative to current position in the file */
- newpos = filp->f_pos + offset;
- break;
- case SEEK_END: /* seek relative to the end of the file */
- newpos = priv->fw_size - offset;
- break;
- default:
- return -EINVAL;
- }
-
- /* check for sanity */
- if (newpos > priv->fw_size)
- return -EINVAL;
-
- filp->f_pos = newpos;
- return newpos;
+ return fixed_size_llseek(file, offset, origin, priv->fw_size);
}
static const struct file_operations fpga_fops = {
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
new file mode 100644
index 000000000000..a990b39b4dfb
--- /dev/null
+++ b/drivers/misc/cxl/Kconfig
@@ -0,0 +1,25 @@
+#
+# IBM Coherent Accelerator (CXL) compatible devices
+#
+
+config CXL_BASE
+ bool
+ default n
+ select PPC_COPRO_BASE
+
+config CXL
+ tristate "Support for IBM Coherent Accelerators (CXL)"
+ depends on PPC_POWERNV && PCI_MSI
+ select CXL_BASE
+ default m
+ help
+ Select this option to enable driver support for IBM Coherent
+ Accelerators (CXL). CXL is otherwise known as Coherent Accelerator
+ Processor Interface (CAPI). CAPI allows accelerators in FPGAs to be
+ coherently attached to a CPU via an MMU. This driver enables
+ userspace programs to access these accelerators via /dev/cxl/afuM.N
+ devices.
+
+ CAPI adapters are found in POWER8 based systems.
+
+ If unsure, say N.
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
new file mode 100644
index 000000000000..165e98fef2c2
--- /dev/null
+++ b/drivers/misc/cxl/Makefile
@@ -0,0 +1,3 @@
+cxl-y += main.o file.o irq.o fault.o native.o context.o sysfs.o debugfs.o pci.o
+obj-$(CONFIG_CXL) += cxl.o
+obj-$(CONFIG_CXL_BASE) += base.o
diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c
new file mode 100644
index 000000000000..0654ad83675e
--- /dev/null
+++ b/drivers/misc/cxl/base.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/rcupdate.h>
+#include <asm/errno.h>
+#include <misc/cxl.h>
+#include "cxl.h"
+
+/* protected by rcu */
+static struct cxl_calls *cxl_calls;
+
+atomic_t cxl_use_count = ATOMIC_INIT(0);
+EXPORT_SYMBOL(cxl_use_count);
+
+#ifdef CONFIG_CXL_MODULE
+
+static inline struct cxl_calls *cxl_calls_get(void)
+{
+ struct cxl_calls *calls = NULL;
+
+ rcu_read_lock();
+ calls = rcu_dereference(cxl_calls);
+ if (calls && !try_module_get(calls->owner))
+ calls = NULL;
+ rcu_read_unlock();
+
+ return calls;
+}
+
+static inline void cxl_calls_put(struct cxl_calls *calls)
+{
+ BUG_ON(calls != cxl_calls);
+
+ /* we don't need to rcu this, as we hold a reference to the module */
+ module_put(cxl_calls->owner);
+}
+
+#else /* !defined CONFIG_CXL_MODULE */
+
+static inline struct cxl_calls *cxl_calls_get(void)
+{
+ return cxl_calls;
+}
+
+static inline void cxl_calls_put(struct cxl_calls *calls) { }
+
+#endif /* CONFIG_CXL_MODULE */
+
+void cxl_slbia(struct mm_struct *mm)
+{
+ struct cxl_calls *calls;
+
+ calls = cxl_calls_get();
+ if (!calls)
+ return;
+
+ if (cxl_ctx_in_use())
+ calls->cxl_slbia(mm);
+
+ cxl_calls_put(calls);
+}
+
+int register_cxl_calls(struct cxl_calls *calls)
+{
+ if (cxl_calls)
+ return -EBUSY;
+
+ rcu_assign_pointer(cxl_calls, calls);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(register_cxl_calls);
+
+void unregister_cxl_calls(struct cxl_calls *calls)
+{
+ BUG_ON(cxl_calls->owner != calls->owner);
+ RCU_INIT_POINTER(cxl_calls, NULL);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(unregister_cxl_calls);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
new file mode 100644
index 000000000000..cca472109135
--- /dev/null
+++ b/drivers/misc/cxl/context.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/bitmap.h>
+#include <linux/sched.h>
+#include <linux/pid.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <asm/cputable.h>
+#include <asm/current.h>
+#include <asm/copro.h>
+
+#include "cxl.h"
+
+/*
+ * Allocates space for a CXL context.
+ */
+struct cxl_context *cxl_context_alloc(void)
+{
+ return kzalloc(sizeof(struct cxl_context), GFP_KERNEL);
+}
+
+/*
+ * Initialises a CXL context.
+ */
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
+{
+ int i;
+
+ spin_lock_init(&ctx->sste_lock);
+ ctx->afu = afu;
+ ctx->master = master;
+ ctx->pid = NULL; /* Set in start work ioctl */
+
+ /*
+ * Allocate the segment table before we put it in the IDR so that we
+ * can always access it when dereferenced from IDR. For the same
+ * reason, the segment table is only destroyed after the context is
+ * removed from the IDR. Access to this in the IOCTL is protected by
+ * Linux filesytem symantics (can't IOCTL until open is complete).
+ */
+ i = cxl_alloc_sst(ctx);
+ if (i)
+ return i;
+
+ INIT_WORK(&ctx->fault_work, cxl_handle_fault);
+
+ init_waitqueue_head(&ctx->wq);
+ spin_lock_init(&ctx->lock);
+
+ ctx->irq_bitmap = NULL;
+ ctx->pending_irq = false;
+ ctx->pending_fault = false;
+ ctx->pending_afu_err = false;
+
+ /*
+ * When we have to destroy all contexts in cxl_context_detach_all() we
+ * end up with afu_release_irqs() called from inside a
+ * idr_for_each_entry(). Hence we need to make sure that anything
+ * dereferenced from this IDR is ok before we allocate the IDR here.
+ * This clears out the IRQ ranges to ensure this.
+ */
+ for (i = 0; i < CXL_IRQ_RANGES; i++)
+ ctx->irqs.range[i] = 0;
+
+ mutex_init(&ctx->status_mutex);
+
+ ctx->status = OPENED;
+
+ /*
+ * Allocating IDR! We better make sure everything's setup that
+ * dereferences from it.
+ */
+ idr_preload(GFP_KERNEL);
+ spin_lock(&afu->contexts_lock);
+ i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
+ ctx->afu->num_procs, GFP_NOWAIT);
+ spin_unlock(&afu->contexts_lock);
+ idr_preload_end();
+ if (i < 0)
+ return i;
+
+ ctx->pe = i;
+ ctx->elem = &ctx->afu->spa[i];
+ ctx->pe_inserted = false;
+ return 0;
+}
+
+/*
+ * Map a per-context mmio space into the given vma.
+ */
+int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
+{
+ u64 len = vma->vm_end - vma->vm_start;
+ len = min(len, ctx->psn_size);
+
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size);
+ }
+
+ /* make sure there is a valid per process space for this AFU */
+ if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
+ pr_devel("AFU doesn't support mmio space\n");
+ return -EINVAL;
+ }
+
+ /* Can't mmap until the AFU is enabled */
+ if (!ctx->afu->enabled)
+ return -EBUSY;
+
+ pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
+ ctx->psn_phys, ctx->pe , ctx->master);
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ return vm_iomap_memory(vma, ctx->psn_phys, len);
+}
+
+/*
+ * Detach a context from the hardware. This disables interrupts and doesn't
+ * return until all outstanding interrupts for this context have completed. The
+ * hardware should no longer access *ctx after this has returned.
+ */
+static void __detach_context(struct cxl_context *ctx)
+{
+ enum cxl_context_status status;
+
+ mutex_lock(&ctx->status_mutex);
+ status = ctx->status;
+ ctx->status = CLOSED;
+ mutex_unlock(&ctx->status_mutex);
+ if (status != STARTED)
+ return;
+
+ WARN_ON(cxl_detach_process(ctx));
+ afu_release_irqs(ctx);
+ flush_work(&ctx->fault_work); /* Only needed for dedicated process */
+ wake_up_all(&ctx->wq);
+}
+
+/*
+ * Detach the given context from the AFU. This doesn't actually
+ * free the context but it should stop the context running in hardware
+ * (ie. prevent this context from generating any further interrupts
+ * so that it can be freed).
+ */
+void cxl_context_detach(struct cxl_context *ctx)
+{
+ __detach_context(ctx);
+}
+
+/*
+ * Detach all contexts on the given AFU.
+ */
+void cxl_context_detach_all(struct cxl_afu *afu)
+{
+ struct cxl_context *ctx;
+ int tmp;
+
+ rcu_read_lock();
+ idr_for_each_entry(&afu->contexts_idr, ctx, tmp)
+ /*
+ * Anything done in here needs to be setup before the IDR is
+ * created and torn down after the IDR removed
+ */
+ __detach_context(ctx);
+ rcu_read_unlock();
+}
+
+void cxl_context_free(struct cxl_context *ctx)
+{
+ spin_lock(&ctx->afu->contexts_lock);
+ idr_remove(&ctx->afu->contexts_idr, ctx->pe);
+ spin_unlock(&ctx->afu->contexts_lock);
+ synchronize_rcu();
+
+ free_page((u64)ctx->sstp);
+ ctx->sstp = NULL;
+
+ put_pid(ctx->pid);
+ kfree(ctx);
+}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
new file mode 100644
index 000000000000..3d2b8677ec8a
--- /dev/null
+++ b/drivers/misc/cxl/cxl.h
@@ -0,0 +1,629 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _CXL_H_
+#define _CXL_H_
+
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/pid.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <asm/cputable.h>
+#include <asm/mmu.h>
+#include <asm/reg.h>
+#include <misc/cxl.h>
+
+#include <uapi/misc/cxl.h>
+
+extern uint cxl_verbose;
+
+#define CXL_TIMEOUT 5
+
+/*
+ * Bump version each time a user API change is made, whether it is
+ * backwards compatible ot not.
+ */
+#define CXL_API_VERSION 1
+#define CXL_API_VERSION_COMPATIBLE 1
+
+/*
+ * Opaque types to avoid accidentally passing registers for the wrong MMIO
+ *
+ * At the end of the day, I'm not married to using typedef here, but it might
+ * (and has!) help avoid bugs like mixing up CXL_PSL_CtxTime and
+ * CXL_PSL_CtxTime_An, or calling cxl_p1n_write instead of cxl_p1_write.
+ *
+ * I'm quite happy if these are changed back to #defines before upstreaming, it
+ * should be little more than a regexp search+replace operation in this file.
+ */
+typedef struct {
+ const int x;
+} cxl_p1_reg_t;
+typedef struct {
+ const int x;
+} cxl_p1n_reg_t;
+typedef struct {
+ const int x;
+} cxl_p2n_reg_t;
+#define cxl_reg_off(reg) \
+ (reg.x)
+
+/* Memory maps. Ref CXL Appendix A */
+
+/* PSL Privilege 1 Memory Map */
+/* Configuration and Control area */
+static const cxl_p1_reg_t CXL_PSL_CtxTime = {0x0000};
+static const cxl_p1_reg_t CXL_PSL_ErrIVTE = {0x0008};
+static const cxl_p1_reg_t CXL_PSL_KEY1 = {0x0010};
+static const cxl_p1_reg_t CXL_PSL_KEY2 = {0x0018};
+static const cxl_p1_reg_t CXL_PSL_Control = {0x0020};
+/* Downloading */
+static const cxl_p1_reg_t CXL_PSL_DLCNTL = {0x0060};
+static const cxl_p1_reg_t CXL_PSL_DLADDR = {0x0068};
+
+/* PSL Lookaside Buffer Management Area */
+static const cxl_p1_reg_t CXL_PSL_LBISEL = {0x0080};
+static const cxl_p1_reg_t CXL_PSL_SLBIE = {0x0088};
+static const cxl_p1_reg_t CXL_PSL_SLBIA = {0x0090};
+static const cxl_p1_reg_t CXL_PSL_TLBIE = {0x00A0};
+static const cxl_p1_reg_t CXL_PSL_TLBIA = {0x00A8};
+static const cxl_p1_reg_t CXL_PSL_AFUSEL = {0x00B0};
+
+/* 0x00C0:7EFF Implementation dependent area */
+static const cxl_p1_reg_t CXL_PSL_FIR1 = {0x0100};
+static const cxl_p1_reg_t CXL_PSL_FIR2 = {0x0108};
+static const cxl_p1_reg_t CXL_PSL_VERSION = {0x0118};
+static const cxl_p1_reg_t CXL_PSL_RESLCKTO = {0x0128};
+static const cxl_p1_reg_t CXL_PSL_FIR_CNTL = {0x0148};
+static const cxl_p1_reg_t CXL_PSL_DSNDCTL = {0x0150};
+static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158};
+static const cxl_p1_reg_t CXL_PSL_TRACE = {0x0170};
+/* 0x7F00:7FFF Reserved PCIe MSI-X Pending Bit Array area */
+/* 0x8000:FFFF Reserved PCIe MSI-X Table Area */
+
+/* PSL Slice Privilege 1 Memory Map */
+/* Configuration Area */
+static const cxl_p1n_reg_t CXL_PSL_SR_An = {0x00};
+static const cxl_p1n_reg_t CXL_PSL_LPID_An = {0x08};
+static const cxl_p1n_reg_t CXL_PSL_AMBAR_An = {0x10};
+static const cxl_p1n_reg_t CXL_PSL_SPOffset_An = {0x18};
+static const cxl_p1n_reg_t CXL_PSL_ID_An = {0x20};
+static const cxl_p1n_reg_t CXL_PSL_SERR_An = {0x28};
+/* Memory Management and Lookaside Buffer Management */
+static const cxl_p1n_reg_t CXL_PSL_SDR_An = {0x30};
+static const cxl_p1n_reg_t CXL_PSL_AMOR_An = {0x38};
+/* Pointer Area */
+static const cxl_p1n_reg_t CXL_HAURP_An = {0x80};
+static const cxl_p1n_reg_t CXL_PSL_SPAP_An = {0x88};
+static const cxl_p1n_reg_t CXL_PSL_LLCMD_An = {0x90};
+/* Control Area */
+static const cxl_p1n_reg_t CXL_PSL_SCNTL_An = {0xA0};
+static const cxl_p1n_reg_t CXL_PSL_CtxTime_An = {0xA8};
+static const cxl_p1n_reg_t CXL_PSL_IVTE_Offset_An = {0xB0};
+static const cxl_p1n_reg_t CXL_PSL_IVTE_Limit_An = {0xB8};
+/* 0xC0:FF Implementation Dependent Area */
+static const cxl_p1n_reg_t CXL_PSL_FIR_SLICE_An = {0xC0};
+static const cxl_p1n_reg_t CXL_AFU_DEBUG_An = {0xC8};
+static const cxl_p1n_reg_t CXL_PSL_APCALLOC_A = {0xD0};
+static const cxl_p1n_reg_t CXL_PSL_COALLOC_A = {0xD8};
+static const cxl_p1n_reg_t CXL_PSL_RXCTL_A = {0xE0};
+static const cxl_p1n_reg_t CXL_PSL_SLICE_TRACE = {0xE8};
+
+/* PSL Slice Privilege 2 Memory Map */
+/* Configuration and Control Area */
+static const cxl_p2n_reg_t CXL_PSL_PID_TID_An = {0x000};
+static const cxl_p2n_reg_t CXL_CSRP_An = {0x008};
+static const cxl_p2n_reg_t CXL_AURP0_An = {0x010};
+static const cxl_p2n_reg_t CXL_AURP1_An = {0x018};
+static const cxl_p2n_reg_t CXL_SSTP0_An = {0x020};
+static const cxl_p2n_reg_t CXL_SSTP1_An = {0x028};
+static const cxl_p2n_reg_t CXL_PSL_AMR_An = {0x030};
+/* Segment Lookaside Buffer Management */
+static const cxl_p2n_reg_t CXL_SLBIE_An = {0x040};
+static const cxl_p2n_reg_t CXL_SLBIA_An = {0x048};
+static const cxl_p2n_reg_t CXL_SLBI_Select_An = {0x050};
+/* Interrupt Registers */
+static const cxl_p2n_reg_t CXL_PSL_DSISR_An = {0x060};
+static const cxl_p2n_reg_t CXL_PSL_DAR_An = {0x068};
+static const cxl_p2n_reg_t CXL_PSL_DSR_An = {0x070};
+static const cxl_p2n_reg_t CXL_PSL_TFC_An = {0x078};
+static const cxl_p2n_reg_t CXL_PSL_PEHandle_An = {0x080};
+static const cxl_p2n_reg_t CXL_PSL_ErrStat_An = {0x088};
+/* AFU Registers */
+static const cxl_p2n_reg_t CXL_AFU_Cntl_An = {0x090};
+static const cxl_p2n_reg_t CXL_AFU_ERR_An = {0x098};
+/* Work Element Descriptor */
+static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
+/* 0x0C0:FFF Implementation Dependent Area */
+
+#define CXL_PSL_SPAP_Addr 0x0ffffffffffff000ULL
+#define CXL_PSL_SPAP_Size 0x0000000000000ff0ULL
+#define CXL_PSL_SPAP_Size_Shift 4
+#define CXL_PSL_SPAP_V 0x0000000000000001ULL
+
+/****** CXL_PSL_DLCNTL *****************************************************/
+#define CXL_PSL_DLCNTL_D (0x1ull << (63-28))
+#define CXL_PSL_DLCNTL_C (0x1ull << (63-29))
+#define CXL_PSL_DLCNTL_E (0x1ull << (63-30))
+#define CXL_PSL_DLCNTL_S (0x1ull << (63-31))
+#define CXL_PSL_DLCNTL_CE (CXL_PSL_DLCNTL_C | CXL_PSL_DLCNTL_E)
+#define CXL_PSL_DLCNTL_DCES (CXL_PSL_DLCNTL_D | CXL_PSL_DLCNTL_CE | CXL_PSL_DLCNTL_S)
+
+/****** CXL_PSL_SR_An ******************************************************/
+#define CXL_PSL_SR_An_SF MSR_SF /* 64bit */
+#define CXL_PSL_SR_An_TA (1ull << (63-1)) /* Tags active, GA1: 0 */
+#define CXL_PSL_SR_An_HV MSR_HV /* Hypervisor, GA1: 0 */
+#define CXL_PSL_SR_An_PR MSR_PR /* Problem state, GA1: 1 */
+#define CXL_PSL_SR_An_ISL (1ull << (63-53)) /* Ignore Segment Large Page */
+#define CXL_PSL_SR_An_TC (1ull << (63-54)) /* Page Table secondary hash */
+#define CXL_PSL_SR_An_US (1ull << (63-56)) /* User state, GA1: X */
+#define CXL_PSL_SR_An_SC (1ull << (63-58)) /* Segment Table secondary hash */
+#define CXL_PSL_SR_An_R MSR_DR /* Relocate, GA1: 1 */
+#define CXL_PSL_SR_An_MP (1ull << (63-62)) /* Master Process */
+#define CXL_PSL_SR_An_LE (1ull << (63-63)) /* Little Endian */
+
+/****** CXL_PSL_LLCMD_An ****************************************************/
+#define CXL_LLCMD_TERMINATE 0x0001000000000000ULL
+#define CXL_LLCMD_REMOVE 0x0002000000000000ULL
+#define CXL_LLCMD_SUSPEND 0x0003000000000000ULL
+#define CXL_LLCMD_RESUME 0x0004000000000000ULL
+#define CXL_LLCMD_ADD 0x0005000000000000ULL
+#define CXL_LLCMD_UPDATE 0x0006000000000000ULL
+#define CXL_LLCMD_HANDLE_MASK 0x000000000000ffffULL
+
+/****** CXL_PSL_ID_An ****************************************************/
+#define CXL_PSL_ID_An_F (1ull << (63-31))
+#define CXL_PSL_ID_An_L (1ull << (63-30))
+
+/****** CXL_PSL_SCNTL_An ****************************************************/
+#define CXL_PSL_SCNTL_An_CR (0x1ull << (63-15))
+/* Programming Modes: */
+#define CXL_PSL_SCNTL_An_PM_MASK (0xffffull << (63-31))
+#define CXL_PSL_SCNTL_An_PM_Shared (0x0000ull << (63-31))
+#define CXL_PSL_SCNTL_An_PM_OS (0x0001ull << (63-31))
+#define CXL_PSL_SCNTL_An_PM_Process (0x0002ull << (63-31))
+#define CXL_PSL_SCNTL_An_PM_AFU (0x0004ull << (63-31))
+#define CXL_PSL_SCNTL_An_PM_AFU_PBT (0x0104ull << (63-31))
+/* Purge Status (ro) */
+#define CXL_PSL_SCNTL_An_Ps_MASK (0x3ull << (63-39))
+#define CXL_PSL_SCNTL_An_Ps_Pending (0x1ull << (63-39))
+#define CXL_PSL_SCNTL_An_Ps_Complete (0x3ull << (63-39))
+/* Purge */
+#define CXL_PSL_SCNTL_An_Pc (0x1ull << (63-48))
+/* Suspend Status (ro) */
+#define CXL_PSL_SCNTL_An_Ss_MASK (0x3ull << (63-55))
+#define CXL_PSL_SCNTL_An_Ss_Pending (0x1ull << (63-55))
+#define CXL_PSL_SCNTL_An_Ss_Complete (0x3ull << (63-55))
+/* Suspend Control */
+#define CXL_PSL_SCNTL_An_Sc (0x1ull << (63-63))
+
+/* AFU Slice Enable Status (ro) */
+#define CXL_AFU_Cntl_An_ES_MASK (0x7ull << (63-2))
+#define CXL_AFU_Cntl_An_ES_Disabled (0x0ull << (63-2))
+#define CXL_AFU_Cntl_An_ES_Enabled (0x4ull << (63-2))
+/* AFU Slice Enable */
+#define CXL_AFU_Cntl_An_E (0x1ull << (63-3))
+/* AFU Slice Reset status (ro) */
+#define CXL_AFU_Cntl_An_RS_MASK (0x3ull << (63-5))
+#define CXL_AFU_Cntl_An_RS_Pending (0x1ull << (63-5))
+#define CXL_AFU_Cntl_An_RS_Complete (0x2ull << (63-5))
+/* AFU Slice Reset */
+#define CXL_AFU_Cntl_An_RA (0x1ull << (63-7))
+
+/****** CXL_SSTP0/1_An ******************************************************/
+/* These top bits are for the segment that CONTAINS the segment table */
+#define CXL_SSTP0_An_B_SHIFT SLB_VSID_SSIZE_SHIFT
+#define CXL_SSTP0_An_KS (1ull << (63-2))
+#define CXL_SSTP0_An_KP (1ull << (63-3))
+#define CXL_SSTP0_An_N (1ull << (63-4))
+#define CXL_SSTP0_An_L (1ull << (63-5))
+#define CXL_SSTP0_An_C (1ull << (63-6))
+#define CXL_SSTP0_An_TA (1ull << (63-7))
+#define CXL_SSTP0_An_LP_SHIFT (63-9) /* 2 Bits */
+/* And finally, the virtual address & size of the segment table: */
+#define CXL_SSTP0_An_SegTableSize_SHIFT (63-31) /* 12 Bits */
+#define CXL_SSTP0_An_SegTableSize_MASK \
+ (((1ull << 12) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT)
+#define CXL_SSTP0_An_STVA_U_MASK ((1ull << (63-49))-1)
+#define CXL_SSTP1_An_STVA_L_MASK (~((1ull << (63-55))-1))
+#define CXL_SSTP1_An_V (1ull << (63-63))
+
+/****** CXL_PSL_SLBIE_[An] **************************************************/
+/* write: */
+#define CXL_SLBIE_C PPC_BIT(36) /* Class */
+#define CXL_SLBIE_SS PPC_BITMASK(37, 38) /* Segment Size */
+#define CXL_SLBIE_SS_SHIFT PPC_BITLSHIFT(38)
+#define CXL_SLBIE_TA PPC_BIT(38) /* Tags Active */
+/* read: */
+#define CXL_SLBIE_MAX PPC_BITMASK(24, 31)
+#define CXL_SLBIE_PENDING PPC_BITMASK(56, 63)
+
+/****** Common to all CXL_TLBIA/SLBIA_[An] **********************************/
+#define CXL_TLB_SLB_P (1ull) /* Pending (read) */
+
+/****** Common to all CXL_TLB/SLB_IA/IE_[An] registers **********************/
+#define CXL_TLB_SLB_IQ_ALL (0ull) /* Inv qualifier */
+#define CXL_TLB_SLB_IQ_LPID (1ull) /* Inv qualifier */
+#define CXL_TLB_SLB_IQ_LPIDPID (3ull) /* Inv qualifier */
+
+/****** CXL_PSL_AFUSEL ******************************************************/
+#define CXL_PSL_AFUSEL_A (1ull << (63-55)) /* Adapter wide invalidates affect all AFUs */
+
+/****** CXL_PSL_DSISR_An ****************************************************/
+#define CXL_PSL_DSISR_An_DS (1ull << (63-0)) /* Segment not found */
+#define CXL_PSL_DSISR_An_DM (1ull << (63-1)) /* PTE not found (See also: M) or protection fault */
+#define CXL_PSL_DSISR_An_ST (1ull << (63-2)) /* Segment Table PTE not found */
+#define CXL_PSL_DSISR_An_UR (1ull << (63-3)) /* AURP PTE not found */
+#define CXL_PSL_DSISR_TRANS (CXL_PSL_DSISR_An_DS | CXL_PSL_DSISR_An_DM | CXL_PSL_DSISR_An_ST | CXL_PSL_DSISR_An_UR)
+#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */
+#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */
+#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */
+/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */
+#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */
+#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */
+#define CXL_PSL_DSISR_An_A (1ull << (63-37)) /* AFU lock access to write through or cache inhibited storage */
+#define CXL_PSL_DSISR_An_S DSISR_ISSTORE /* Access was afu_wr or afu_zero */
+#define CXL_PSL_DSISR_An_K DSISR_KEYFAULT /* Access not permitted by virtual page class key protection */
+
+/****** CXL_PSL_TFC_An ******************************************************/
+#define CXL_PSL_TFC_An_A (1ull << (63-28)) /* Acknowledge non-translation fault */
+#define CXL_PSL_TFC_An_C (1ull << (63-29)) /* Continue (abort transaction) */
+#define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */
+#define CXL_PSL_TFC_An_R (1ull << (63-31)) /* Restart PSL transaction */
+
+/* cxl_process_element->software_status */
+#define CXL_PE_SOFTWARE_STATE_V (1ul << (31 - 0)) /* Valid */
+#define CXL_PE_SOFTWARE_STATE_C (1ul << (31 - 29)) /* Complete */
+#define CXL_PE_SOFTWARE_STATE_S (1ul << (31 - 30)) /* Suspend */
+#define CXL_PE_SOFTWARE_STATE_T (1ul << (31 - 31)) /* Terminate */
+
+/* SPA->sw_command_status */
+#define CXL_SPA_SW_CMD_MASK 0xffff000000000000ULL
+#define CXL_SPA_SW_CMD_TERMINATE 0x0001000000000000ULL
+#define CXL_SPA_SW_CMD_REMOVE 0x0002000000000000ULL
+#define CXL_SPA_SW_CMD_SUSPEND 0x0003000000000000ULL
+#define CXL_SPA_SW_CMD_RESUME 0x0004000000000000ULL
+#define CXL_SPA_SW_CMD_ADD 0x0005000000000000ULL
+#define CXL_SPA_SW_CMD_UPDATE 0x0006000000000000ULL
+#define CXL_SPA_SW_STATE_MASK 0x0000ffff00000000ULL
+#define CXL_SPA_SW_STATE_TERMINATED 0x0000000100000000ULL
+#define CXL_SPA_SW_STATE_REMOVED 0x0000000200000000ULL
+#define CXL_SPA_SW_STATE_SUSPENDED 0x0000000300000000ULL
+#define CXL_SPA_SW_STATE_RESUMED 0x0000000400000000ULL
+#define CXL_SPA_SW_STATE_ADDED 0x0000000500000000ULL
+#define CXL_SPA_SW_STATE_UPDATED 0x0000000600000000ULL
+#define CXL_SPA_SW_PSL_ID_MASK 0x00000000ffff0000ULL
+#define CXL_SPA_SW_LINK_MASK 0x000000000000ffffULL
+
+#define CXL_MAX_SLICES 4
+#define MAX_AFU_MMIO_REGS 3
+
+#define CXL_MODE_DEDICATED 0x1
+#define CXL_MODE_DIRECTED 0x2
+#define CXL_MODE_TIME_SLICED 0x4
+#define CXL_SUPPORTED_MODES (CXL_MODE_DEDICATED | CXL_MODE_DIRECTED)
+
+enum cxl_context_status {
+ CLOSED,
+ OPENED,
+ STARTED
+};
+
+enum prefault_modes {
+ CXL_PREFAULT_NONE,
+ CXL_PREFAULT_WED,
+ CXL_PREFAULT_ALL,
+};
+
+struct cxl_sste {
+ __be64 esid_data;
+ __be64 vsid_data;
+};
+
+#define to_cxl_adapter(d) container_of(d, struct cxl, dev)
+#define to_cxl_afu(d) container_of(d, struct cxl_afu, dev)
+
+struct cxl_afu {
+ irq_hw_number_t psl_hwirq;
+ irq_hw_number_t serr_hwirq;
+ unsigned int serr_virq;
+ void __iomem *p1n_mmio;
+ void __iomem *p2n_mmio;
+ phys_addr_t psn_phys;
+ u64 pp_offset;
+ u64 pp_size;
+ void __iomem *afu_desc_mmio;
+ struct cxl *adapter;
+ struct device dev;
+ struct cdev afu_cdev_s, afu_cdev_m, afu_cdev_d;
+ struct device *chardev_s, *chardev_m, *chardev_d;
+ struct idr contexts_idr;
+ struct dentry *debugfs;
+ spinlock_t contexts_lock;
+ struct mutex spa_mutex;
+ spinlock_t afu_cntl_lock;
+
+ /*
+ * Only the first part of the SPA is used for the process element
+ * linked list. The only other part that software needs to worry about
+ * is sw_command_status, which we store a separate pointer to.
+ * Everything else in the SPA is only used by hardware
+ */
+ struct cxl_process_element *spa;
+ __be64 *sw_command_status;
+ unsigned int spa_size;
+ int spa_order;
+ int spa_max_procs;
+ unsigned int psl_virq;
+
+ int pp_irqs;
+ int irqs_max;
+ int num_procs;
+ int max_procs_virtualised;
+ int slice;
+ int modes_supported;
+ int current_mode;
+ enum prefault_modes prefault_mode;
+ bool psa;
+ bool pp_psa;
+ bool enabled;
+};
+
+/*
+ * This is a cxl context. If the PSL is in dedicated mode, there will be one
+ * of these per AFU. If in AFU directed there can be lots of these.
+ */
+struct cxl_context {
+ struct cxl_afu *afu;
+
+ /* Problem state MMIO */
+ phys_addr_t psn_phys;
+ u64 psn_size;
+
+ spinlock_t sste_lock; /* Protects segment table entries */
+ struct cxl_sste *sstp;
+ u64 sstp0, sstp1;
+ unsigned int sst_size, sst_lru;
+
+ wait_queue_head_t wq;
+ struct pid *pid;
+ spinlock_t lock; /* Protects pending_irq_mask, pending_fault and fault_addr */
+ /* Only used in PR mode */
+ u64 process_token;
+
+ unsigned long *irq_bitmap; /* Accessed from IRQ context */
+ struct cxl_irq_ranges irqs;
+ u64 fault_addr;
+ u64 fault_dsisr;
+ u64 afu_err;
+
+ /*
+ * This status and it's lock pretects start and detach context
+ * from racing. It also prevents detach from racing with
+ * itself
+ */
+ enum cxl_context_status status;
+ struct mutex status_mutex;
+
+
+ /* XXX: Is it possible to need multiple work items at once? */
+ struct work_struct fault_work;
+ u64 dsisr;
+ u64 dar;
+
+ struct cxl_process_element *elem;
+
+ int pe; /* process element handle */
+ u32 irq_count;
+ bool pe_inserted;
+ bool master;
+ bool kernel;
+ bool pending_irq;
+ bool pending_fault;
+ bool pending_afu_err;
+};
+
+struct cxl {
+ void __iomem *p1_mmio;
+ void __iomem *p2_mmio;
+ irq_hw_number_t err_hwirq;
+ unsigned int err_virq;
+ spinlock_t afu_list_lock;
+ struct cxl_afu *afu[CXL_MAX_SLICES];
+ struct device dev;
+ struct dentry *trace;
+ struct dentry *psl_err_chk;
+ struct dentry *debugfs;
+ struct bin_attribute cxl_attr;
+ int adapter_num;
+ int user_irqs;
+ u64 afu_desc_off;
+ u64 afu_desc_size;
+ u64 ps_off;
+ u64 ps_size;
+ u16 psl_rev;
+ u16 base_image;
+ u8 vsec_status;
+ u8 caia_major;
+ u8 caia_minor;
+ u8 slices;
+ bool user_image_loaded;
+ bool perst_loads_image;
+ bool perst_select_user;
+};
+
+int cxl_alloc_one_irq(struct cxl *adapter);
+void cxl_release_one_irq(struct cxl *adapter, int hwirq);
+int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num);
+void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter);
+int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq);
+
+/* common == phyp + powernv */
+struct cxl_process_element_common {
+ __be32 tid;
+ __be32 pid;
+ __be64 csrp;
+ __be64 aurp0;
+ __be64 aurp1;
+ __be64 sstp0;
+ __be64 sstp1;
+ __be64 amr;
+ u8 reserved3[4];
+ __be64 wed;
+} __packed;
+
+/* just powernv */
+struct cxl_process_element {
+ __be64 sr;
+ __be64 SPOffset;
+ __be64 sdr;
+ __be64 haurp;
+ __be32 ctxtime;
+ __be16 ivte_offsets[4];
+ __be16 ivte_ranges[4];
+ __be32 lpid;
+ struct cxl_process_element_common common;
+ __be32 software_state;
+} __packed;
+
+static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg)
+{
+ WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
+ return cxl->p1_mmio + cxl_reg_off(reg);
+}
+
+#define cxl_p1_write(cxl, reg, val) \
+ out_be64(_cxl_p1_addr(cxl, reg), val)
+#define cxl_p1_read(cxl, reg) \
+ in_be64(_cxl_p1_addr(cxl, reg))
+
+static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg)
+{
+ WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
+ return afu->p1n_mmio + cxl_reg_off(reg);
+}
+
+#define cxl_p1n_write(afu, reg, val) \
+ out_be64(_cxl_p1n_addr(afu, reg), val)
+#define cxl_p1n_read(afu, reg) \
+ in_be64(_cxl_p1n_addr(afu, reg))
+
+static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg)
+{
+ return afu->p2n_mmio + cxl_reg_off(reg);
+}
+
+#define cxl_p2n_write(afu, reg, val) \
+ out_be64(_cxl_p2n_addr(afu, reg), val)
+#define cxl_p2n_read(afu, reg) \
+ in_be64(_cxl_p2n_addr(afu, reg))
+
+struct cxl_calls {
+ void (*cxl_slbia)(struct mm_struct *mm);
+ struct module *owner;
+};
+int register_cxl_calls(struct cxl_calls *calls);
+void unregister_cxl_calls(struct cxl_calls *calls);
+
+int cxl_alloc_adapter_nr(struct cxl *adapter);
+void cxl_remove_adapter_nr(struct cxl *adapter);
+
+int cxl_file_init(void);
+void cxl_file_exit(void);
+int cxl_register_adapter(struct cxl *adapter);
+int cxl_register_afu(struct cxl_afu *afu);
+int cxl_chardev_d_afu_add(struct cxl_afu *afu);
+int cxl_chardev_m_afu_add(struct cxl_afu *afu);
+int cxl_chardev_s_afu_add(struct cxl_afu *afu);
+void cxl_chardev_afu_remove(struct cxl_afu *afu);
+
+void cxl_context_detach_all(struct cxl_afu *afu);
+void cxl_context_free(struct cxl_context *ctx);
+void cxl_context_detach(struct cxl_context *ctx);
+
+int cxl_sysfs_adapter_add(struct cxl *adapter);
+void cxl_sysfs_adapter_remove(struct cxl *adapter);
+int cxl_sysfs_afu_add(struct cxl_afu *afu);
+void cxl_sysfs_afu_remove(struct cxl_afu *afu);
+int cxl_sysfs_afu_m_add(struct cxl_afu *afu);
+void cxl_sysfs_afu_m_remove(struct cxl_afu *afu);
+
+int cxl_afu_activate_mode(struct cxl_afu *afu, int mode);
+int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode);
+int cxl_afu_deactivate_mode(struct cxl_afu *afu);
+int cxl_afu_select_best_mode(struct cxl_afu *afu);
+
+unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
+ irq_handler_t handler, void *cookie);
+void cxl_unmap_irq(unsigned int virq, void *cookie);
+int cxl_register_psl_irq(struct cxl_afu *afu);
+void cxl_release_psl_irq(struct cxl_afu *afu);
+int cxl_register_psl_err_irq(struct cxl *adapter);
+void cxl_release_psl_err_irq(struct cxl *adapter);
+int cxl_register_serr_irq(struct cxl_afu *afu);
+void cxl_release_serr_irq(struct cxl_afu *afu);
+int afu_register_irqs(struct cxl_context *ctx, u32 count);
+void afu_release_irqs(struct cxl_context *ctx);
+irqreturn_t cxl_slice_irq_err(int irq, void *data);
+
+int cxl_debugfs_init(void);
+void cxl_debugfs_exit(void);
+int cxl_debugfs_adapter_add(struct cxl *adapter);
+void cxl_debugfs_adapter_remove(struct cxl *adapter);
+int cxl_debugfs_afu_add(struct cxl_afu *afu);
+void cxl_debugfs_afu_remove(struct cxl_afu *afu);
+
+void cxl_handle_fault(struct work_struct *work);
+void cxl_prefault(struct cxl_context *ctx, u64 wed);
+
+struct cxl *get_cxl_adapter(int num);
+int cxl_alloc_sst(struct cxl_context *ctx);
+
+void init_cxl_native(void);
+
+struct cxl_context *cxl_context_alloc(void);
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master);
+void cxl_context_free(struct cxl_context *ctx);
+int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
+
+/* This matches the layout of the H_COLLECT_CA_INT_INFO retbuf */
+struct cxl_irq_info {
+ u64 dsisr;
+ u64 dar;
+ u64 dsr;
+ u32 pid;
+ u32 tid;
+ u64 afu_err;
+ u64 errstat;
+ u64 padding[3]; /* to match the expected retbuf size for plpar_hcall9 */
+};
+
+int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed,
+ u64 amr);
+int cxl_detach_process(struct cxl_context *ctx);
+
+int cxl_get_irq(struct cxl_context *ctx, struct cxl_irq_info *info);
+int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
+
+int cxl_check_error(struct cxl_afu *afu);
+int cxl_afu_slbia(struct cxl_afu *afu);
+int cxl_tlb_slb_invalidate(struct cxl *adapter);
+int cxl_afu_disable(struct cxl_afu *afu);
+int cxl_afu_reset(struct cxl_afu *afu);
+int cxl_psl_purge(struct cxl_afu *afu);
+
+void cxl_stop_trace(struct cxl *cxl);
+
+extern struct pci_driver cxl_pci_driver;
+
+#endif
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
new file mode 100644
index 000000000000..825c412580bc
--- /dev/null
+++ b/drivers/misc/cxl/debugfs.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "cxl.h"
+
+static struct dentry *cxl_debugfs;
+
+void cxl_stop_trace(struct cxl *adapter)
+{
+ int slice;
+
+ /* Stop the trace */
+ cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL);
+
+ /* Stop the slice traces */
+ spin_lock(&adapter->afu_list_lock);
+ for (slice = 0; slice < adapter->slices; slice++) {
+ if (adapter->afu[slice])
+ cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE, 0x8000000000000000LL);
+ }
+ spin_unlock(&adapter->afu_list_lock);
+}
+
+/* Helpers to export CXL mmaped IO registers via debugfs */
+static int debugfs_io_u64_get(void *data, u64 *val)
+{
+ *val = in_be64((u64 __iomem *)data);
+ return 0;
+}
+
+static int debugfs_io_u64_set(void *data, u64 val)
+{
+ out_be64((u64 __iomem *)data, val);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set, "0x%016llx\n");
+
+static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
+ struct dentry *parent, u64 __iomem *value)
+{
+ return debugfs_create_file(name, mode, parent, (void *)value, &fops_io_x64);
+}
+
+int cxl_debugfs_adapter_add(struct cxl *adapter)
+{
+ struct dentry *dir;
+ char buf[32];
+
+ if (!cxl_debugfs)
+ return -ENODEV;
+
+ snprintf(buf, 32, "card%i", adapter->adapter_num);
+ dir = debugfs_create_dir(buf, cxl_debugfs);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+ adapter->debugfs = dir;
+
+ debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1));
+ debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2));
+ debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL));
+ debugfs_create_io_x64("err_ivte", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_ErrIVTE));
+
+ debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE));
+
+ return 0;
+}
+
+void cxl_debugfs_adapter_remove(struct cxl *adapter)
+{
+ debugfs_remove_recursive(adapter->debugfs);
+}
+
+int cxl_debugfs_afu_add(struct cxl_afu *afu)
+{
+ struct dentry *dir;
+ char buf[32];
+
+ if (!afu->adapter->debugfs)
+ return -ENODEV;
+
+ snprintf(buf, 32, "psl%i.%i", afu->adapter->adapter_num, afu->slice);
+ dir = debugfs_create_dir(buf, afu->adapter->debugfs);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+ afu->debugfs = dir;
+
+ debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An));
+ debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An));
+ debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An));
+ debugfs_create_io_x64("sr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SR_An));
+
+ debugfs_create_io_x64("dsisr", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DSISR_An));
+ debugfs_create_io_x64("dar", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DAR_An));
+ debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An));
+ debugfs_create_io_x64("sstp1", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP1_An));
+ debugfs_create_io_x64("err_status", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_ErrStat_An));
+
+ debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE));
+
+ return 0;
+}
+
+void cxl_debugfs_afu_remove(struct cxl_afu *afu)
+{
+ debugfs_remove_recursive(afu->debugfs);
+}
+
+int __init cxl_debugfs_init(void)
+{
+ struct dentry *ent;
+ ent = debugfs_create_dir("cxl", NULL);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+ cxl_debugfs = ent;
+
+ return 0;
+}
+
+void cxl_debugfs_exit(void)
+{
+ debugfs_remove_recursive(cxl_debugfs);
+}
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
new file mode 100644
index 000000000000..69506ebd4d07
--- /dev/null
+++ b/drivers/misc/cxl/fault.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/pid.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "cxl" "."
+#include <asm/current.h>
+#include <asm/copro.h>
+#include <asm/mmu.h>
+
+#include "cxl.h"
+
+static struct cxl_sste* find_free_sste(struct cxl_sste *primary_group,
+ bool sec_hash,
+ struct cxl_sste *secondary_group,
+ unsigned int *lru)
+{
+ unsigned int i, entry;
+ struct cxl_sste *sste, *group = primary_group;
+
+ for (i = 0; i < 2; i++) {
+ for (entry = 0; entry < 8; entry++) {
+ sste = group + entry;
+ if (!(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
+ return sste;
+ }
+ if (!sec_hash)
+ break;
+ group = secondary_group;
+ }
+ /* Nothing free, select an entry to cast out */
+ if (sec_hash && (*lru & 0x8))
+ sste = secondary_group + (*lru & 0x7);
+ else
+ sste = primary_group + (*lru & 0x7);
+ *lru = (*lru + 1) & 0xf;
+
+ return sste;
+}
+
+static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
+{
+ /* mask is the group index, we search primary and secondary here. */
+ unsigned int mask = (ctx->sst_size >> 7)-1; /* SSTP0[SegTableSize] */
+ bool sec_hash = 1;
+ struct cxl_sste *sste;
+ unsigned int hash;
+ unsigned long flags;
+
+
+ sec_hash = !!(cxl_p1n_read(ctx->afu, CXL_PSL_SR_An) & CXL_PSL_SR_An_SC);
+
+ if (slb->vsid & SLB_VSID_B_1T)
+ hash = (slb->esid >> SID_SHIFT_1T) & mask;
+ else /* 256M */
+ hash = (slb->esid >> SID_SHIFT) & mask;
+
+ spin_lock_irqsave(&ctx->sste_lock, flags);
+ sste = find_free_sste(ctx->sstp + (hash << 3), sec_hash,
+ ctx->sstp + ((~hash & mask) << 3), &ctx->sst_lru);
+
+ pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
+ sste - ctx->sstp, slb->vsid, slb->esid);
+
+ sste->vsid_data = cpu_to_be64(slb->vsid);
+ sste->esid_data = cpu_to_be64(slb->esid);
+ spin_unlock_irqrestore(&ctx->sste_lock, flags);
+}
+
+static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm,
+ u64 ea)
+{
+ struct copro_slb slb = {0,0};
+ int rc;
+
+ if (!(rc = copro_calculate_slb(mm, ea, &slb))) {
+ cxl_load_segment(ctx, &slb);
+ }
+
+ return rc;
+}
+
+static void cxl_ack_ae(struct cxl_context *ctx)
+{
+ unsigned long flags;
+
+ cxl_ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ ctx->pending_fault = true;
+ ctx->fault_addr = ctx->dar;
+ ctx->fault_dsisr = ctx->dsisr;
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ wake_up_all(&ctx->wq);
+}
+
+static int cxl_handle_segment_miss(struct cxl_context *ctx,
+ struct mm_struct *mm, u64 ea)
+{
+ int rc;
+
+ pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
+
+ if ((rc = cxl_fault_segment(ctx, mm, ea)))
+ cxl_ack_ae(ctx);
+ else {
+
+ mb(); /* Order seg table write to TFC MMIO write */
+ cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void cxl_handle_page_fault(struct cxl_context *ctx,
+ struct mm_struct *mm, u64 dsisr, u64 dar)
+{
+ unsigned flt = 0;
+ int result;
+ unsigned long access, flags;
+
+ if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
+ pr_devel("copro_handle_mm_fault failed: %#x\n", result);
+ return cxl_ack_ae(ctx);
+ }
+
+ /*
+ * update_mmu_cache() will not have loaded the hash since current->trap
+ * is not a 0x400 or 0x300, so just call hash_page_mm() here.
+ */
+ access = _PAGE_PRESENT;
+ if (dsisr & CXL_PSL_DSISR_An_S)
+ access |= _PAGE_RW;
+ if ((!ctx->kernel) || ~(dar & (1ULL << 63)))
+ access |= _PAGE_USER;
+ local_irq_save(flags);
+ hash_page_mm(mm, dar, access, 0x300);
+ local_irq_restore(flags);
+
+ pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
+ cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
+}
+
+void cxl_handle_fault(struct work_struct *fault_work)
+{
+ struct cxl_context *ctx =
+ container_of(fault_work, struct cxl_context, fault_work);
+ u64 dsisr = ctx->dsisr;
+ u64 dar = ctx->dar;
+ struct task_struct *task;
+ struct mm_struct *mm;
+
+ if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
+ cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
+ cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
+ /* Most likely explanation is harmless - a dedicated process
+ * has detached and these were cleared by the PSL purge, but
+ * warn about it just in case */
+ dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
+ return;
+ }
+
+ pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
+ "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
+
+ if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
+ pr_devel("cxl_handle_fault unable to get task %i\n",
+ pid_nr(ctx->pid));
+ cxl_ack_ae(ctx);
+ return;
+ }
+ if (!(mm = get_task_mm(task))) {
+ pr_devel("cxl_handle_fault unable to get mm %i\n",
+ pid_nr(ctx->pid));
+ cxl_ack_ae(ctx);
+ goto out;
+ }
+
+ if (dsisr & CXL_PSL_DSISR_An_DS)
+ cxl_handle_segment_miss(ctx, mm, dar);
+ else if (dsisr & CXL_PSL_DSISR_An_DM)
+ cxl_handle_page_fault(ctx, mm, dsisr, dar);
+ else
+ WARN(1, "cxl_handle_fault has nothing to handle\n");
+
+ mmput(mm);
+out:
+ put_task_struct(task);
+}
+
+static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
+{
+ int rc;
+ struct task_struct *task;
+ struct mm_struct *mm;
+
+ if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
+ pr_devel("cxl_prefault_one unable to get task %i\n",
+ pid_nr(ctx->pid));
+ return;
+ }
+ if (!(mm = get_task_mm(task))) {
+ pr_devel("cxl_prefault_one unable to get mm %i\n",
+ pid_nr(ctx->pid));
+ put_task_struct(task);
+ return;
+ }
+
+ rc = cxl_fault_segment(ctx, mm, ea);
+
+ mmput(mm);
+ put_task_struct(task);
+}
+
+static u64 next_segment(u64 ea, u64 vsid)
+{
+ if (vsid & SLB_VSID_B_1T)
+ ea |= (1ULL << 40) - 1;
+ else
+ ea |= (1ULL << 28) - 1;
+
+ return ea + 1;
+}
+
+static void cxl_prefault_vma(struct cxl_context *ctx)
+{
+ u64 ea, last_esid = 0;
+ struct copro_slb slb;
+ struct vm_area_struct *vma;
+ int rc;
+ struct task_struct *task;
+ struct mm_struct *mm;
+
+ if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
+ pr_devel("cxl_prefault_vma unable to get task %i\n",
+ pid_nr(ctx->pid));
+ return;
+ }
+ if (!(mm = get_task_mm(task))) {
+ pr_devel("cxl_prefault_vm unable to get mm %i\n",
+ pid_nr(ctx->pid));
+ goto out1;
+ }
+
+ down_read(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for (ea = vma->vm_start; ea < vma->vm_end;
+ ea = next_segment(ea, slb.vsid)) {
+ rc = copro_calculate_slb(mm, ea, &slb);
+ if (rc)
+ continue;
+
+ if (last_esid == slb.esid)
+ continue;
+
+ cxl_load_segment(ctx, &slb);
+ last_esid = slb.esid;
+ }
+ }
+ up_read(&mm->mmap_sem);
+
+ mmput(mm);
+out1:
+ put_task_struct(task);
+}
+
+void cxl_prefault(struct cxl_context *ctx, u64 wed)
+{
+ switch (ctx->afu->prefault_mode) {
+ case CXL_PREFAULT_WED:
+ cxl_prefault_one(ctx, wed);
+ break;
+ case CXL_PREFAULT_ALL:
+ cxl_prefault_vma(ctx);
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
new file mode 100644
index 000000000000..378b099e7c0b
--- /dev/null
+++ b/drivers/misc/cxl/file.c
@@ -0,0 +1,518 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/bitmap.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/pid.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/cputable.h>
+#include <asm/current.h>
+#include <asm/copro.h>
+
+#include "cxl.h"
+
+#define CXL_NUM_MINORS 256 /* Total to reserve */
+#define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */
+
+#define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS)
+#define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice))
+#define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1)
+#define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2)
+#define CXL_AFU_MKDEV_D(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_D(afu))
+#define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu))
+#define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu))
+
+#define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS)
+#define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3)
+
+#define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0)
+
+static dev_t cxl_dev;
+
+static struct class *cxl_class;
+
+static int __afu_open(struct inode *inode, struct file *file, bool master)
+{
+ struct cxl *adapter;
+ struct cxl_afu *afu;
+ struct cxl_context *ctx;
+ int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
+ int slice = CXL_DEVT_AFU(inode->i_rdev);
+ int rc = -ENODEV;
+
+ pr_devel("afu_open afu%i.%i\n", slice, adapter_num);
+
+ if (!(adapter = get_cxl_adapter(adapter_num)))
+ return -ENODEV;
+
+ if (slice > adapter->slices)
+ goto err_put_adapter;
+
+ spin_lock(&adapter->afu_list_lock);
+ if (!(afu = adapter->afu[slice])) {
+ spin_unlock(&adapter->afu_list_lock);
+ goto err_put_adapter;
+ }
+ get_device(&afu->dev);
+ spin_unlock(&adapter->afu_list_lock);
+
+ if (!afu->current_mode)
+ goto err_put_afu;
+
+ if (!(ctx = cxl_context_alloc())) {
+ rc = -ENOMEM;
+ goto err_put_afu;
+ }
+
+ if ((rc = cxl_context_init(ctx, afu, master)))
+ goto err_put_afu;
+
+ pr_devel("afu_open pe: %i\n", ctx->pe);
+ file->private_data = ctx;
+ cxl_ctx_get();
+
+ /* Our ref on the AFU will now hold the adapter */
+ put_device(&adapter->dev);
+
+ return 0;
+
+err_put_afu:
+ put_device(&afu->dev);
+err_put_adapter:
+ put_device(&adapter->dev);
+ return rc;
+}
+static int afu_open(struct inode *inode, struct file *file)
+{
+ return __afu_open(inode, file, false);
+}
+
+static int afu_master_open(struct inode *inode, struct file *file)
+{
+ return __afu_open(inode, file, true);
+}
+
+static int afu_release(struct inode *inode, struct file *file)
+{
+ struct cxl_context *ctx = file->private_data;
+
+ pr_devel("%s: closing cxl file descriptor. pe: %i\n",
+ __func__, ctx->pe);
+ cxl_context_detach(ctx);
+
+ put_device(&ctx->afu->dev);
+
+ /*
+ * At this this point all bottom halfs have finished and we should be
+ * getting no more IRQs from the hardware for this context. Once it's
+ * removed from the IDR (and RCU synchronised) it's safe to free the
+ * sstp and context.
+ */
+ cxl_context_free(ctx);
+
+ cxl_ctx_put();
+ return 0;
+}
+
+static long afu_ioctl_start_work(struct cxl_context *ctx,
+ struct cxl_ioctl_start_work __user *uwork)
+{
+ struct cxl_ioctl_start_work work;
+ u64 amr = 0;
+ int rc;
+
+ pr_devel("%s: pe: %i\n", __func__, ctx->pe);
+
+ mutex_lock(&ctx->status_mutex);
+ if (ctx->status != OPENED) {
+ rc = -EIO;
+ goto out;
+ }
+
+ if (copy_from_user(&work, uwork,
+ sizeof(struct cxl_ioctl_start_work))) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * if any of the reserved fields are set or any of the unused
+ * flags are set it's invalid
+ */
+ if (work.reserved1 || work.reserved2 || work.reserved3 ||
+ work.reserved4 || work.reserved5 || work.reserved6 ||
+ (work.flags & ~CXL_START_WORK_ALL)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (!(work.flags & CXL_START_WORK_NUM_IRQS))
+ work.num_interrupts = ctx->afu->pp_irqs;
+ else if ((work.num_interrupts < ctx->afu->pp_irqs) ||
+ (work.num_interrupts > ctx->afu->irqs_max)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ if ((rc = afu_register_irqs(ctx, work.num_interrupts)))
+ goto out;
+
+ if (work.flags & CXL_START_WORK_AMR)
+ amr = work.amr & mfspr(SPRN_UAMOR);
+
+ /*
+ * We grab the PID here and not in the file open to allow for the case
+ * where a process (master, some daemon, etc) has opened the chardev on
+ * behalf of another process, so the AFU's mm gets bound to the process
+ * that performs this ioctl and not the process that opened the file.
+ */
+ ctx->pid = get_pid(get_task_pid(current, PIDTYPE_PID));
+
+ if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor,
+ amr)))
+ goto out;
+
+ ctx->status = STARTED;
+ rc = 0;
+out:
+ mutex_unlock(&ctx->status_mutex);
+ return rc;
+}
+static long afu_ioctl_process_element(struct cxl_context *ctx,
+ int __user *upe)
+{
+ pr_devel("%s: pe: %i\n", __func__, ctx->pe);
+
+ if (copy_to_user(upe, &ctx->pe, sizeof(__u32)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct cxl_context *ctx = file->private_data;
+
+ if (ctx->status == CLOSED)
+ return -EIO;
+
+ pr_devel("afu_ioctl\n");
+ switch (cmd) {
+ case CXL_IOCTL_START_WORK:
+ return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg);
+ case CXL_IOCTL_GET_PROCESS_ELEMENT:
+ return afu_ioctl_process_element(ctx, (__u32 __user *)arg);
+ }
+ return -EINVAL;
+}
+
+static long afu_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return afu_ioctl(file, cmd, arg);
+}
+
+static int afu_mmap(struct file *file, struct vm_area_struct *vm)
+{
+ struct cxl_context *ctx = file->private_data;
+
+ /* AFU must be started before we can MMIO */
+ if (ctx->status != STARTED)
+ return -EIO;
+
+ return cxl_context_iomap(ctx, vm);
+}
+
+static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
+{
+ struct cxl_context *ctx = file->private_data;
+ int mask = 0;
+ unsigned long flags;
+
+
+ poll_wait(file, &ctx->wq, poll);
+
+ pr_devel("afu_poll wait done pe: %i\n", ctx->pe);
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ if (ctx->pending_irq || ctx->pending_fault ||
+ ctx->pending_afu_err)
+ mask |= POLLIN | POLLRDNORM;
+ else if (ctx->status == CLOSED)
+ /* Only error on closed when there are no futher events pending
+ */
+ mask |= POLLERR;
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask);
+
+ return mask;
+}
+
+static inline int ctx_event_pending(struct cxl_context *ctx)
+{
+ return (ctx->pending_irq || ctx->pending_fault ||
+ ctx->pending_afu_err || (ctx->status == CLOSED));
+}
+
+static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
+ loff_t *off)
+{
+ struct cxl_context *ctx = file->private_data;
+ struct cxl_event event;
+ unsigned long flags;
+ int rc;
+ DEFINE_WAIT(wait);
+
+ if (count < CXL_READ_MIN_SIZE)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+
+ for (;;) {
+ prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE);
+ if (ctx_event_pending(ctx))
+ break;
+
+ if (file->f_flags & O_NONBLOCK) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if (signal_pending(current)) {
+ rc = -ERESTARTSYS;
+ goto out;
+ }
+
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ pr_devel("afu_read going to sleep...\n");
+ schedule();
+ pr_devel("afu_read woken up\n");
+ spin_lock_irqsave(&ctx->lock, flags);
+ }
+
+ finish_wait(&ctx->wq, &wait);
+
+ memset(&event, 0, sizeof(event));
+ event.header.process_element = ctx->pe;
+ event.header.size = sizeof(struct cxl_event_header);
+ if (ctx->pending_irq) {
+ pr_devel("afu_read delivering AFU interrupt\n");
+ event.header.size += sizeof(struct cxl_event_afu_interrupt);
+ event.header.type = CXL_EVENT_AFU_INTERRUPT;
+ event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1;
+ clear_bit(event.irq.irq - 1, ctx->irq_bitmap);
+ if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count))
+ ctx->pending_irq = false;
+ } else if (ctx->pending_fault) {
+ pr_devel("afu_read delivering data storage fault\n");
+ event.header.size += sizeof(struct cxl_event_data_storage);
+ event.header.type = CXL_EVENT_DATA_STORAGE;
+ event.fault.addr = ctx->fault_addr;
+ event.fault.dsisr = ctx->fault_dsisr;
+ ctx->pending_fault = false;
+ } else if (ctx->pending_afu_err) {
+ pr_devel("afu_read delivering afu error\n");
+ event.header.size += sizeof(struct cxl_event_afu_error);
+ event.header.type = CXL_EVENT_AFU_ERROR;
+ event.afu_error.error = ctx->afu_err;
+ ctx->pending_afu_err = false;
+ } else if (ctx->status == CLOSED) {
+ pr_devel("afu_read fatal error\n");
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ return -EIO;
+ } else
+ WARN(1, "afu_read must be buggy\n");
+
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ if (copy_to_user(buf, &event, event.header.size))
+ return -EFAULT;
+ return event.header.size;
+
+out:
+ finish_wait(&ctx->wq, &wait);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ return rc;
+}
+
+static const struct file_operations afu_fops = {
+ .owner = THIS_MODULE,
+ .open = afu_open,
+ .poll = afu_poll,
+ .read = afu_read,
+ .release = afu_release,
+ .unlocked_ioctl = afu_ioctl,
+ .compat_ioctl = afu_compat_ioctl,
+ .mmap = afu_mmap,
+};
+
+static const struct file_operations afu_master_fops = {
+ .owner = THIS_MODULE,
+ .open = afu_master_open,
+ .poll = afu_poll,
+ .read = afu_read,
+ .release = afu_release,
+ .unlocked_ioctl = afu_ioctl,
+ .compat_ioctl = afu_compat_ioctl,
+ .mmap = afu_mmap,
+};
+
+
+static char *cxl_devnode(struct device *dev, umode_t *mode)
+{
+ if (CXL_DEVT_IS_CARD(dev->devt)) {
+ /*
+ * These minor numbers will eventually be used to program the
+ * PSL and AFUs once we have dynamic reprogramming support
+ */
+ return NULL;
+ }
+ return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
+}
+
+extern struct class *cxl_class;
+
+static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev,
+ struct device **chardev, char *postfix, char *desc,
+ const struct file_operations *fops)
+{
+ struct device *dev;
+ int rc;
+
+ cdev_init(cdev, fops);
+ if ((rc = cdev_add(cdev, devt, 1))) {
+ dev_err(&afu->dev, "Unable to add %s chardev: %i\n", desc, rc);
+ return rc;
+ }
+
+ dev = device_create(cxl_class, &afu->dev, devt, afu,
+ "afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix);
+ if (IS_ERR(dev)) {
+ dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc);
+ rc = PTR_ERR(dev);
+ goto err;
+ }
+
+ *chardev = dev;
+
+ return 0;
+err:
+ cdev_del(cdev);
+ return rc;
+}
+
+int cxl_chardev_d_afu_add(struct cxl_afu *afu)
+{
+ return cxl_add_chardev(afu, CXL_AFU_MKDEV_D(afu), &afu->afu_cdev_d,
+ &afu->chardev_d, "d", "dedicated",
+ &afu_master_fops); /* Uses master fops */
+}
+
+int cxl_chardev_m_afu_add(struct cxl_afu *afu)
+{
+ return cxl_add_chardev(afu, CXL_AFU_MKDEV_M(afu), &afu->afu_cdev_m,
+ &afu->chardev_m, "m", "master",
+ &afu_master_fops);
+}
+
+int cxl_chardev_s_afu_add(struct cxl_afu *afu)
+{
+ return cxl_add_chardev(afu, CXL_AFU_MKDEV_S(afu), &afu->afu_cdev_s,
+ &afu->chardev_s, "s", "shared",
+ &afu_fops);
+}
+
+void cxl_chardev_afu_remove(struct cxl_afu *afu)
+{
+ if (afu->chardev_d) {
+ cdev_del(&afu->afu_cdev_d);
+ device_unregister(afu->chardev_d);
+ afu->chardev_d = NULL;
+ }
+ if (afu->chardev_m) {
+ cdev_del(&afu->afu_cdev_m);
+ device_unregister(afu->chardev_m);
+ afu->chardev_m = NULL;
+ }
+ if (afu->chardev_s) {
+ cdev_del(&afu->afu_cdev_s);
+ device_unregister(afu->chardev_s);
+ afu->chardev_s = NULL;
+ }
+}
+
+int cxl_register_afu(struct cxl_afu *afu)
+{
+ afu->dev.class = cxl_class;
+
+ return device_register(&afu->dev);
+}
+
+int cxl_register_adapter(struct cxl *adapter)
+{
+ adapter->dev.class = cxl_class;
+
+ /*
+ * Future: When we support dynamically reprogramming the PSL & AFU we
+ * will expose the interface to do that via a chardev:
+ * adapter->dev.devt = CXL_CARD_MKDEV(adapter);
+ */
+
+ return device_register(&adapter->dev);
+}
+
+int __init cxl_file_init(void)
+{
+ int rc;
+
+ /*
+ * If these change we really need to update API. Either change some
+ * flags or update API version number CXL_API_VERSION.
+ */
+ BUILD_BUG_ON(CXL_API_VERSION != 1);
+ BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
+ BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
+ BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
+ BUILD_BUG_ON(sizeof(struct cxl_event_data_storage) != 32);
+ BUILD_BUG_ON(sizeof(struct cxl_event_afu_error) != 16);
+
+ if ((rc = alloc_chrdev_region(&cxl_dev, 0, CXL_NUM_MINORS, "cxl"))) {
+ pr_err("Unable to allocate CXL major number: %i\n", rc);
+ return rc;
+ }
+
+ pr_devel("CXL device allocated, MAJOR %i\n", MAJOR(cxl_dev));
+
+ cxl_class = class_create(THIS_MODULE, "cxl");
+ if (IS_ERR(cxl_class)) {
+ pr_err("Unable to create CXL class\n");
+ rc = PTR_ERR(cxl_class);
+ goto err;
+ }
+ cxl_class->devnode = cxl_devnode;
+
+ return 0;
+
+err:
+ unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
+ return rc;
+}
+
+void cxl_file_exit(void)
+{
+ unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
+ class_destroy(cxl_class);
+}
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
new file mode 100644
index 000000000000..336020c8e1af
--- /dev/null
+++ b/drivers/misc/cxl/irq.c
@@ -0,0 +1,402 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/pid.h>
+#include <asm/cputable.h>
+#include <misc/cxl.h>
+
+#include "cxl.h"
+
+/* XXX: This is implementation specific */
+static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat)
+{
+ u64 fir1, fir2, fir_slice, serr, afu_debug;
+
+ fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
+ fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
+ fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
+ serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
+ afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
+
+ dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
+ dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%.16llx\n", fir1);
+ dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%.16llx\n", fir2);
+ dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
+ dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
+ dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
+
+ dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
+ cxl_stop_trace(ctx->afu->adapter);
+
+ return cxl_ack_irq(ctx, 0, errstat);
+}
+
+irqreturn_t cxl_slice_irq_err(int irq, void *data)
+{
+ struct cxl_afu *afu = data;
+ u64 fir_slice, errstat, serr, afu_debug;
+
+ WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
+
+ serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
+ fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
+ errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
+ afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
+ dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
+ dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
+ dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%.16llx\n", errstat);
+ dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
+
+ cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cxl_irq_err(int irq, void *data)
+{
+ struct cxl *adapter = data;
+ u64 fir1, fir2, err_ivte;
+
+ WARN(1, "CXL ERROR interrupt %i\n", irq);
+
+ err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
+ dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%.16llx\n", err_ivte);
+
+ dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
+ cxl_stop_trace(adapter);
+
+ fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
+ fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
+
+ dev_crit(&adapter->dev, "PSL_FIR1: 0x%.16llx\nPSL_FIR2: 0x%.16llx\n", fir1, fir2);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar)
+{
+ ctx->dsisr = dsisr;
+ ctx->dar = dar;
+ schedule_work(&ctx->fault_work);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cxl_irq(int irq, void *data)
+{
+ struct cxl_context *ctx = data;
+ struct cxl_irq_info irq_info;
+ u64 dsisr, dar;
+ int result;
+
+ if ((result = cxl_get_irq(ctx, &irq_info))) {
+ WARN(1, "Unable to get CXL IRQ Info: %i\n", result);
+ return IRQ_HANDLED;
+ }
+
+ dsisr = irq_info.dsisr;
+ dar = irq_info.dar;
+
+ pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
+
+ if (dsisr & CXL_PSL_DSISR_An_DS) {
+ /*
+ * We don't inherently need to sleep to handle this, but we do
+ * need to get a ref to the task's mm, which we can't do from
+ * irq context without the potential for a deadlock since it
+ * takes the task_lock. An alternate option would be to keep a
+ * reference to the task's mm the entire time it has cxl open,
+ * but to do that we need to solve the issue where we hold a
+ * ref to the mm, but the mm can hold a ref to the fd after an
+ * mmap preventing anything from being cleaned up.
+ */
+ pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe);
+ return schedule_cxl_fault(ctx, dsisr, dar);
+ }
+
+ if (dsisr & CXL_PSL_DSISR_An_M)
+ pr_devel("CXL interrupt: PTE not found\n");
+ if (dsisr & CXL_PSL_DSISR_An_P)
+ pr_devel("CXL interrupt: Storage protection violation\n");
+ if (dsisr & CXL_PSL_DSISR_An_A)
+ pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n");
+ if (dsisr & CXL_PSL_DSISR_An_S)
+ pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n");
+ if (dsisr & CXL_PSL_DSISR_An_K)
+ pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n");
+
+ if (dsisr & CXL_PSL_DSISR_An_DM) {
+ /*
+ * In some cases we might be able to handle the fault
+ * immediately if hash_page would succeed, but we still need
+ * the task's mm, which as above we can't get without a lock
+ */
+ pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe);
+ return schedule_cxl_fault(ctx, dsisr, dar);
+ }
+ if (dsisr & CXL_PSL_DSISR_An_ST)
+ WARN(1, "CXL interrupt: Segment Table PTE not found\n");
+ if (dsisr & CXL_PSL_DSISR_An_UR)
+ pr_devel("CXL interrupt: AURP PTE not found\n");
+ if (dsisr & CXL_PSL_DSISR_An_PE)
+ return handle_psl_slice_error(ctx, dsisr, irq_info.errstat);
+ if (dsisr & CXL_PSL_DSISR_An_AE) {
+ pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info.afu_err);
+
+ if (ctx->pending_afu_err) {
+ /*
+ * This shouldn't happen - the PSL treats these errors
+ * as fatal and will have reset the AFU, so there's not
+ * much point buffering multiple AFU errors.
+ * OTOH if we DO ever see a storm of these come in it's
+ * probably best that we log them somewhere:
+ */
+ dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
+ "undelivered to pe %i: %.llx\n",
+ ctx->pe, irq_info.afu_err);
+ } else {
+ spin_lock(&ctx->lock);
+ ctx->afu_err = irq_info.afu_err;
+ ctx->pending_afu_err = 1;
+ spin_unlock(&ctx->lock);
+
+ wake_up_all(&ctx->wq);
+ }
+
+ cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
+ }
+ if (dsisr & CXL_PSL_DSISR_An_OC)
+ pr_devel("CXL interrupt: OS Context Warning\n");
+
+ WARN(1, "Unhandled CXL PSL IRQ\n");
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cxl_irq_multiplexed(int irq, void *data)
+{
+ struct cxl_afu *afu = data;
+ struct cxl_context *ctx;
+ int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
+ int ret;
+
+ rcu_read_lock();
+ ctx = idr_find(&afu->contexts_idr, ph);
+ if (ctx) {
+ ret = cxl_irq(irq, ctx);
+ rcu_read_unlock();
+ return ret;
+ }
+ rcu_read_unlock();
+
+ WARN(1, "Unable to demultiplex CXL PSL IRQ\n");
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cxl_irq_afu(int irq, void *data)
+{
+ struct cxl_context *ctx = data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
+ int irq_off, afu_irq = 1;
+ __u16 range;
+ int r;
+
+ for (r = 1; r < CXL_IRQ_RANGES; r++) {
+ irq_off = hwirq - ctx->irqs.offset[r];
+ range = ctx->irqs.range[r];
+ if (irq_off >= 0 && irq_off < range) {
+ afu_irq += irq_off;
+ break;
+ }
+ afu_irq += range;
+ }
+ if (unlikely(r >= CXL_IRQ_RANGES)) {
+ WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
+ ctx->pe, irq, hwirq);
+ return IRQ_HANDLED;
+ }
+
+ pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
+ afu_irq, ctx->pe, irq, hwirq);
+
+ if (unlikely(!ctx->irq_bitmap)) {
+ WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n");
+ return IRQ_HANDLED;
+ }
+ spin_lock(&ctx->lock);
+ set_bit(afu_irq - 1, ctx->irq_bitmap);
+ ctx->pending_irq = true;
+ spin_unlock(&ctx->lock);
+
+ wake_up_all(&ctx->wq);
+
+ return IRQ_HANDLED;
+}
+
+unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
+ irq_handler_t handler, void *cookie)
+{
+ unsigned int virq;
+ int result;
+
+ /* IRQ Domain? */
+ virq = irq_create_mapping(NULL, hwirq);
+ if (!virq) {
+ dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n");
+ return 0;
+ }
+
+ cxl_setup_irq(adapter, hwirq, virq);
+
+ pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
+
+ result = request_irq(virq, handler, 0, "cxl", cookie);
+ if (result) {
+ dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
+ return 0;
+ }
+
+ return virq;
+}
+
+void cxl_unmap_irq(unsigned int virq, void *cookie)
+{
+ free_irq(virq, cookie);
+ irq_dispose_mapping(virq);
+}
+
+static int cxl_register_one_irq(struct cxl *adapter,
+ irq_handler_t handler,
+ void *cookie,
+ irq_hw_number_t *dest_hwirq,
+ unsigned int *dest_virq)
+{
+ int hwirq, virq;
+
+ if ((hwirq = cxl_alloc_one_irq(adapter)) < 0)
+ return hwirq;
+
+ if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie)))
+ goto err;
+
+ *dest_hwirq = hwirq;
+ *dest_virq = virq;
+
+ return 0;
+
+err:
+ cxl_release_one_irq(adapter, hwirq);
+ return -ENOMEM;
+}
+
+int cxl_register_psl_err_irq(struct cxl *adapter)
+{
+ int rc;
+
+ if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter,
+ &adapter->err_hwirq,
+ &adapter->err_virq)))
+ return rc;
+
+ cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff);
+
+ return 0;
+}
+
+void cxl_release_psl_err_irq(struct cxl *adapter)
+{
+ cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
+ cxl_unmap_irq(adapter->err_virq, adapter);
+ cxl_release_one_irq(adapter, adapter->err_hwirq);
+}
+
+int cxl_register_serr_irq(struct cxl_afu *afu)
+{
+ u64 serr;
+ int rc;
+
+ if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu,
+ &afu->serr_hwirq,
+ &afu->serr_virq)))
+ return rc;
+
+ serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
+ serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
+ cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
+
+ return 0;
+}
+
+void cxl_release_serr_irq(struct cxl_afu *afu)
+{
+ cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
+ cxl_unmap_irq(afu->serr_virq, afu);
+ cxl_release_one_irq(afu->adapter, afu->serr_hwirq);
+}
+
+int cxl_register_psl_irq(struct cxl_afu *afu)
+{
+ return cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu,
+ &afu->psl_hwirq, &afu->psl_virq);
+}
+
+void cxl_release_psl_irq(struct cxl_afu *afu)
+{
+ cxl_unmap_irq(afu->psl_virq, afu);
+ cxl_release_one_irq(afu->adapter, afu->psl_hwirq);
+}
+
+int afu_register_irqs(struct cxl_context *ctx, u32 count)
+{
+ irq_hw_number_t hwirq;
+ int rc, r, i;
+
+ if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count)))
+ return rc;
+
+ /* Multiplexed PSL Interrupt */
+ ctx->irqs.offset[0] = ctx->afu->psl_hwirq;
+ ctx->irqs.range[0] = 1;
+
+ ctx->irq_count = count;
+ ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
+ sizeof(*ctx->irq_bitmap), GFP_KERNEL);
+ if (!ctx->irq_bitmap)
+ return -ENOMEM;
+ for (r = 1; r < CXL_IRQ_RANGES; r++) {
+ hwirq = ctx->irqs.offset[r];
+ for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
+ cxl_map_irq(ctx->afu->adapter, hwirq,
+ cxl_irq_afu, ctx);
+ }
+ }
+
+ return 0;
+}
+
+void afu_release_irqs(struct cxl_context *ctx)
+{
+ irq_hw_number_t hwirq;
+ unsigned int virq;
+ int r, i;
+
+ for (r = 1; r < CXL_IRQ_RANGES; r++) {
+ hwirq = ctx->irqs.offset[r];
+ for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
+ virq = irq_find_mapping(NULL, hwirq);
+ if (virq)
+ cxl_unmap_irq(virq, ctx);
+ }
+ }
+
+ cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
+}
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
new file mode 100644
index 000000000000..4cde9b661642
--- /dev/null
+++ b/drivers/misc/cxl/main.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/pci.h>
+#include <asm/cputable.h>
+#include <misc/cxl.h>
+
+#include "cxl.h"
+
+static DEFINE_SPINLOCK(adapter_idr_lock);
+static DEFINE_IDR(cxl_adapter_idr);
+
+uint cxl_verbose;
+module_param_named(verbose, cxl_verbose, uint, 0600);
+MODULE_PARM_DESC(verbose, "Enable verbose dmesg output");
+
+static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm)
+{
+ struct task_struct *task;
+ unsigned long flags;
+ if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
+ pr_devel("%s unable to get task %i\n",
+ __func__, pid_nr(ctx->pid));
+ return;
+ }
+
+ if (task->mm != mm)
+ goto out_put;
+
+ pr_devel("%s matched mm - card: %i afu: %i pe: %i\n", __func__,
+ ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe);
+
+ spin_lock_irqsave(&ctx->sste_lock, flags);
+ memset(ctx->sstp, 0, ctx->sst_size);
+ spin_unlock_irqrestore(&ctx->sste_lock, flags);
+ mb();
+ cxl_afu_slbia(ctx->afu);
+out_put:
+ put_task_struct(task);
+}
+
+static inline void cxl_slbia_core(struct mm_struct *mm)
+{
+ struct cxl *adapter;
+ struct cxl_afu *afu;
+ struct cxl_context *ctx;
+ int card, slice, id;
+
+ pr_devel("%s called\n", __func__);
+
+ spin_lock(&adapter_idr_lock);
+ idr_for_each_entry(&cxl_adapter_idr, adapter, card) {
+ /* XXX: Make this lookup faster with link from mm to ctx */
+ spin_lock(&adapter->afu_list_lock);
+ for (slice = 0; slice < adapter->slices; slice++) {
+ afu = adapter->afu[slice];
+ if (!afu->enabled)
+ continue;
+ rcu_read_lock();
+ idr_for_each_entry(&afu->contexts_idr, ctx, id)
+ _cxl_slbia(ctx, mm);
+ rcu_read_unlock();
+ }
+ spin_unlock(&adapter->afu_list_lock);
+ }
+ spin_unlock(&adapter_idr_lock);
+}
+
+static struct cxl_calls cxl_calls = {
+ .cxl_slbia = cxl_slbia_core,
+ .owner = THIS_MODULE,
+};
+
+int cxl_alloc_sst(struct cxl_context *ctx)
+{
+ unsigned long vsid;
+ u64 ea_mask, size, sstp0, sstp1;
+
+ sstp0 = 0;
+ sstp1 = 0;
+
+ ctx->sst_size = PAGE_SIZE;
+ ctx->sst_lru = 0;
+ ctx->sstp = (struct cxl_sste *)get_zeroed_page(GFP_KERNEL);
+ if (!ctx->sstp) {
+ pr_err("cxl_alloc_sst: Unable to allocate segment table\n");
+ return -ENOMEM;
+ }
+ pr_devel("SSTP allocated at 0x%p\n", ctx->sstp);
+
+ vsid = get_kernel_vsid((u64)ctx->sstp, mmu_kernel_ssize) << 12;
+
+ sstp0 |= (u64)mmu_kernel_ssize << CXL_SSTP0_An_B_SHIFT;
+ sstp0 |= (SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp) << 50;
+
+ size = (((u64)ctx->sst_size >> 8) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT;
+ if (unlikely(size & ~CXL_SSTP0_An_SegTableSize_MASK)) {
+ WARN(1, "Impossible segment table size\n");
+ return -EINVAL;
+ }
+ sstp0 |= size;
+
+ if (mmu_kernel_ssize == MMU_SEGSIZE_256M)
+ ea_mask = 0xfffff00ULL;
+ else
+ ea_mask = 0xffffffff00ULL;
+
+ sstp0 |= vsid >> (50-14); /* Top 14 bits of VSID */
+ sstp1 |= (vsid << (64-(50-14))) & ~ea_mask;
+ sstp1 |= (u64)ctx->sstp & ea_mask;
+ sstp1 |= CXL_SSTP1_An_V;
+
+ pr_devel("Looked up %#llx: slbfee. %#llx (ssize: %x, vsid: %#lx), copied to SSTP0: %#llx, SSTP1: %#llx\n",
+ (u64)ctx->sstp, (u64)ctx->sstp & ESID_MASK, mmu_kernel_ssize, vsid, sstp0, sstp1);
+
+ /* Store calculated sstp hardware points for use later */
+ ctx->sstp0 = sstp0;
+ ctx->sstp1 = sstp1;
+
+ return 0;
+}
+
+/* Find a CXL adapter by it's number and increase it's refcount */
+struct cxl *get_cxl_adapter(int num)
+{
+ struct cxl *adapter;
+
+ spin_lock(&adapter_idr_lock);
+ if ((adapter = idr_find(&cxl_adapter_idr, num)))
+ get_device(&adapter->dev);
+ spin_unlock(&adapter_idr_lock);
+
+ return adapter;
+}
+
+int cxl_alloc_adapter_nr(struct cxl *adapter)
+{
+ int i;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&adapter_idr_lock);
+ i = idr_alloc(&cxl_adapter_idr, adapter, 0, 0, GFP_NOWAIT);
+ spin_unlock(&adapter_idr_lock);
+ idr_preload_end();
+ if (i < 0)
+ return i;
+
+ adapter->adapter_num = i;
+
+ return 0;
+}
+
+void cxl_remove_adapter_nr(struct cxl *adapter)
+{
+ idr_remove(&cxl_adapter_idr, adapter->adapter_num);
+}
+
+int cxl_afu_select_best_mode(struct cxl_afu *afu)
+{
+ if (afu->modes_supported & CXL_MODE_DIRECTED)
+ return cxl_afu_activate_mode(afu, CXL_MODE_DIRECTED);
+
+ if (afu->modes_supported & CXL_MODE_DEDICATED)
+ return cxl_afu_activate_mode(afu, CXL_MODE_DEDICATED);
+
+ dev_warn(&afu->dev, "No supported programming modes available\n");
+ /* We don't fail this so the user can inspect sysfs */
+ return 0;
+}
+
+static int __init init_cxl(void)
+{
+ int rc = 0;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return -EPERM;
+
+ if ((rc = cxl_file_init()))
+ return rc;
+
+ cxl_debugfs_init();
+
+ if ((rc = register_cxl_calls(&cxl_calls)))
+ goto err;
+
+ if ((rc = pci_register_driver(&cxl_pci_driver)))
+ goto err1;
+
+ return 0;
+err1:
+ unregister_cxl_calls(&cxl_calls);
+err:
+ cxl_debugfs_exit();
+ cxl_file_exit();
+
+ return rc;
+}
+
+static void exit_cxl(void)
+{
+ pci_unregister_driver(&cxl_pci_driver);
+
+ cxl_debugfs_exit();
+ cxl_file_exit();
+ unregister_cxl_calls(&cxl_calls);
+}
+
+module_init(init_cxl);
+module_exit(exit_cxl);
+
+MODULE_DESCRIPTION("IBM Coherent Accelerator");
+MODULE_AUTHOR("Ian Munsie <imunsie@au1.ibm.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
new file mode 100644
index 000000000000..623286a77114
--- /dev/null
+++ b/drivers/misc/cxl/native.c
@@ -0,0 +1,683 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <asm/synch.h>
+#include <misc/cxl.h>
+
+#include "cxl.h"
+
+static int afu_control(struct cxl_afu *afu, u64 command,
+ u64 result, u64 mask, bool enabled)
+{
+ u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
+ unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
+
+ spin_lock(&afu->afu_cntl_lock);
+ pr_devel("AFU command starting: %llx\n", command);
+
+ cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command);
+
+ AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
+ while ((AFU_Cntl & mask) != result) {
+ if (time_after_eq(jiffies, timeout)) {
+ dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
+ spin_unlock(&afu->afu_cntl_lock);
+ return -EBUSY;
+ }
+ pr_devel_ratelimited("AFU control... (0x%.16llx)\n",
+ AFU_Cntl | command);
+ cpu_relax();
+ AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
+ };
+ pr_devel("AFU command complete: %llx\n", command);
+ afu->enabled = enabled;
+ spin_unlock(&afu->afu_cntl_lock);
+
+ return 0;
+}
+
+static int afu_enable(struct cxl_afu *afu)
+{
+ pr_devel("AFU enable request\n");
+
+ return afu_control(afu, CXL_AFU_Cntl_An_E,
+ CXL_AFU_Cntl_An_ES_Enabled,
+ CXL_AFU_Cntl_An_ES_MASK, true);
+}
+
+int cxl_afu_disable(struct cxl_afu *afu)
+{
+ pr_devel("AFU disable request\n");
+
+ return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled,
+ CXL_AFU_Cntl_An_ES_MASK, false);
+}
+
+/* This will disable as well as reset */
+int cxl_afu_reset(struct cxl_afu *afu)
+{
+ pr_devel("AFU reset request\n");
+
+ return afu_control(afu, CXL_AFU_Cntl_An_RA,
+ CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
+ CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
+ false);
+}
+
+static int afu_check_and_enable(struct cxl_afu *afu)
+{
+ if (afu->enabled)
+ return 0;
+ return afu_enable(afu);
+}
+
+int cxl_psl_purge(struct cxl_afu *afu)
+{
+ u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
+ u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
+ u64 dsisr, dar;
+ u64 start, end;
+ unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
+
+ pr_devel("PSL purge request\n");
+
+ if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
+ WARN(1, "psl_purge request while AFU not disabled!\n");
+ cxl_afu_disable(afu);
+ }
+
+ cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
+ PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
+ start = local_clock();
+ PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
+ while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK)
+ == CXL_PSL_SCNTL_An_Ps_Pending) {
+ if (time_after_eq(jiffies, timeout)) {
+ dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
+ return -EBUSY;
+ }
+ dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
+ pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%.16llx PSL_DSISR: 0x%.16llx\n", PSL_CNTL, dsisr);
+ if (dsisr & CXL_PSL_DSISR_TRANS) {
+ dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
+ dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%.16llx, DAR: 0x%.16llx\n", dsisr, dar);
+ cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
+ } else if (dsisr) {
+ dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%.16llx\n", dsisr);
+ cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
+ } else {
+ cpu_relax();
+ }
+ PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
+ };
+ end = local_clock();
+ pr_devel("PSL purged in %lld ns\n", end - start);
+
+ cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
+ PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
+ return 0;
+}
+
+static int spa_max_procs(int spa_size)
+{
+ /*
+ * From the CAIA:
+ * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
+ * Most of that junk is really just an overly-complicated way of saying
+ * the last 256 bytes are __aligned(128), so it's really:
+ * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
+ * and
+ * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
+ * so
+ * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
+ * Ignore the alignment (which is safe in this case as long as we are
+ * careful with our rounding) and solve for n:
+ */
+ return ((spa_size / 8) - 96) / 17;
+}
+
+static int alloc_spa(struct cxl_afu *afu)
+{
+ u64 spap;
+
+ /* Work out how many pages to allocate */
+ afu->spa_order = 0;
+ do {
+ afu->spa_order++;
+ afu->spa_size = (1 << afu->spa_order) * PAGE_SIZE;
+ afu->spa_max_procs = spa_max_procs(afu->spa_size);
+ } while (afu->spa_max_procs < afu->num_procs);
+
+ WARN_ON(afu->spa_size > 0x100000); /* Max size supported by the hardware */
+
+ if (!(afu->spa = (struct cxl_process_element *)
+ __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->spa_order))) {
+ pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
+ return -ENOMEM;
+ }
+ pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
+ 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs);
+
+ afu->sw_command_status = (__be64 *)((char *)afu->spa +
+ ((afu->spa_max_procs + 3) * 128));
+
+ spap = virt_to_phys(afu->spa) & CXL_PSL_SPAP_Addr;
+ spap |= ((afu->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
+ spap |= CXL_PSL_SPAP_V;
+ pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap);
+ cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
+
+ return 0;
+}
+
+static void release_spa(struct cxl_afu *afu)
+{
+ free_pages((unsigned long) afu->spa, afu->spa_order);
+}
+
+int cxl_tlb_slb_invalidate(struct cxl *adapter)
+{
+ unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
+
+ pr_devel("CXL adapter wide TLBIA & SLBIA\n");
+
+ cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
+
+ cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
+ while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
+ if (time_after_eq(jiffies, timeout)) {
+ dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
+ return -EBUSY;
+ }
+ cpu_relax();
+ }
+
+ cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
+ while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
+ if (time_after_eq(jiffies, timeout)) {
+ dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
+ return -EBUSY;
+ }
+ cpu_relax();
+ }
+ return 0;
+}
+
+int cxl_afu_slbia(struct cxl_afu *afu)
+{
+ unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
+
+ pr_devel("cxl_afu_slbia issuing SLBIA command\n");
+ cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL);
+ while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) {
+ if (time_after_eq(jiffies, timeout)) {
+ dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
+ return -EBUSY;
+ }
+ cpu_relax();
+ }
+ return 0;
+}
+
+static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
+{
+ int rc;
+
+ /* 1. Disable SSTP by writing 0 to SSTP1[V] */
+ cxl_p2n_write(afu, CXL_SSTP1_An, 0);
+
+ /* 2. Invalidate all SLB entries */
+ if ((rc = cxl_afu_slbia(afu)))
+ return rc;
+
+ /* 3. Set SSTP0_An */
+ cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
+
+ /* 4. Set SSTP1_An */
+ cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
+
+ return 0;
+}
+
+/* Using per slice version may improve performance here. (ie. SLBIA_An) */
+static void slb_invalid(struct cxl_context *ctx)
+{
+ struct cxl *adapter = ctx->afu->adapter;
+ u64 slbia;
+
+ WARN_ON(!mutex_is_locked(&ctx->afu->spa_mutex));
+
+ cxl_p1_write(adapter, CXL_PSL_LBISEL,
+ ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
+ be32_to_cpu(ctx->elem->lpid));
+ cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
+
+ while (1) {
+ slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
+ if (!(slbia & CXL_TLB_SLB_P))
+ break;
+ cpu_relax();
+ }
+}
+
+static int do_process_element_cmd(struct cxl_context *ctx,
+ u64 cmd, u64 pe_state)
+{
+ u64 state;
+
+ WARN_ON(!ctx->afu->enabled);
+
+ ctx->elem->software_state = cpu_to_be32(pe_state);
+ smp_wmb();
+ *(ctx->afu->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
+ smp_mb();
+ cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
+ while (1) {
+ state = be64_to_cpup(ctx->afu->sw_command_status);
+ if (state == ~0ULL) {
+ pr_err("cxl: Error adding process element to AFU\n");
+ return -1;
+ }
+ if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) ==
+ (cmd | (cmd >> 16) | ctx->pe))
+ break;
+ /*
+ * The command won't finish in the PSL if there are
+ * outstanding DSIs. Hence we need to yield here in
+ * case there are outstanding DSIs that we need to
+ * service. Tuning possiblity: we could wait for a
+ * while before sched
+ */
+ schedule();
+
+ }
+ return 0;
+}
+
+static int add_process_element(struct cxl_context *ctx)
+{
+ int rc = 0;
+
+ mutex_lock(&ctx->afu->spa_mutex);
+ pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
+ if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
+ ctx->pe_inserted = true;
+ pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
+ mutex_unlock(&ctx->afu->spa_mutex);
+ return rc;
+}
+
+static int terminate_process_element(struct cxl_context *ctx)
+{
+ int rc = 0;
+
+ /* fast path terminate if it's already invalid */
+ if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
+ return rc;
+
+ mutex_lock(&ctx->afu->spa_mutex);
+ pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
+ rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
+ CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
+ ctx->elem->software_state = 0; /* Remove Valid bit */
+ pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
+ mutex_unlock(&ctx->afu->spa_mutex);
+ return rc;
+}
+
+static int remove_process_element(struct cxl_context *ctx)
+{
+ int rc = 0;
+
+ mutex_lock(&ctx->afu->spa_mutex);
+ pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
+ if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0)))
+ ctx->pe_inserted = false;
+ slb_invalid(ctx);
+ pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
+ mutex_unlock(&ctx->afu->spa_mutex);
+
+ return rc;
+}
+
+
+static void assign_psn_space(struct cxl_context *ctx)
+{
+ if (!ctx->afu->pp_size || ctx->master) {
+ ctx->psn_phys = ctx->afu->psn_phys;
+ ctx->psn_size = ctx->afu->adapter->ps_size;
+ } else {
+ ctx->psn_phys = ctx->afu->psn_phys +
+ (ctx->afu->pp_offset + ctx->afu->pp_size * ctx->pe);
+ ctx->psn_size = ctx->afu->pp_size;
+ }
+}
+
+static int activate_afu_directed(struct cxl_afu *afu)
+{
+ int rc;
+
+ dev_info(&afu->dev, "Activating AFU directed mode\n");
+
+ if (alloc_spa(afu))
+ return -ENOMEM;
+
+ cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
+ cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
+ cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
+
+ afu->current_mode = CXL_MODE_DIRECTED;
+ afu->num_procs = afu->max_procs_virtualised;
+
+ if ((rc = cxl_chardev_m_afu_add(afu)))
+ return rc;
+
+ if ((rc = cxl_sysfs_afu_m_add(afu)))
+ goto err;
+
+ if ((rc = cxl_chardev_s_afu_add(afu)))
+ goto err1;
+
+ return 0;
+err1:
+ cxl_sysfs_afu_m_remove(afu);
+err:
+ cxl_chardev_afu_remove(afu);
+ return rc;
+}
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
+#else
+#define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
+#endif
+
+static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
+{
+ u64 sr;
+ int r, result;
+
+ assign_psn_space(ctx);
+
+ ctx->elem->ctxtime = 0; /* disable */
+ ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
+ ctx->elem->haurp = 0; /* disable */
+ ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1));
+
+ sr = CXL_PSL_SR_An_SC;
+ if (ctx->master)
+ sr |= CXL_PSL_SR_An_MP;
+ if (mfspr(SPRN_LPCR) & LPCR_TC)
+ sr |= CXL_PSL_SR_An_TC;
+ /* HV=0, PR=1, R=1 for userspace
+ * For kernel contexts: this would need to change
+ */
+ sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
+ set_endian(sr);
+ sr &= ~(CXL_PSL_SR_An_HV);
+ if (!test_tsk_thread_flag(current, TIF_32BIT))
+ sr |= CXL_PSL_SR_An_SF;
+ ctx->elem->common.pid = cpu_to_be32(current->pid);
+ ctx->elem->common.tid = 0;
+ ctx->elem->sr = cpu_to_be64(sr);
+
+ ctx->elem->common.csrp = 0; /* disable */
+ ctx->elem->common.aurp0 = 0; /* disable */
+ ctx->elem->common.aurp1 = 0; /* disable */
+
+ cxl_prefault(ctx, wed);
+
+ ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
+ ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
+
+ for (r = 0; r < CXL_IRQ_RANGES; r++) {
+ ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
+ ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
+ }
+
+ ctx->elem->common.amr = cpu_to_be64(amr);
+ ctx->elem->common.wed = cpu_to_be64(wed);
+
+ /* first guy needs to enable */
+ if ((result = afu_check_and_enable(ctx->afu)))
+ return result;
+
+ add_process_element(ctx);
+
+ return 0;
+}
+
+static int deactivate_afu_directed(struct cxl_afu *afu)
+{
+ dev_info(&afu->dev, "Deactivating AFU directed mode\n");
+
+ afu->current_mode = 0;
+ afu->num_procs = 0;
+
+ cxl_sysfs_afu_m_remove(afu);
+ cxl_chardev_afu_remove(afu);
+
+ cxl_afu_reset(afu);
+ cxl_afu_disable(afu);
+ cxl_psl_purge(afu);
+
+ release_spa(afu);
+
+ return 0;
+}
+
+static int activate_dedicated_process(struct cxl_afu *afu)
+{
+ dev_info(&afu->dev, "Activating dedicated process mode\n");
+
+ cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
+
+ cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
+ cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */
+ cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
+ cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
+ cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */
+ cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
+
+ cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */
+ cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */
+ cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */
+
+ afu->current_mode = CXL_MODE_DEDICATED;
+ afu->num_procs = 1;
+
+ return cxl_chardev_d_afu_add(afu);
+}
+
+static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
+{
+ struct cxl_afu *afu = ctx->afu;
+ u64 sr;
+ int rc;
+
+ sr = CXL_PSL_SR_An_SC;
+ set_endian(sr);
+ if (ctx->master)
+ sr |= CXL_PSL_SR_An_MP;
+ if (mfspr(SPRN_LPCR) & LPCR_TC)
+ sr |= CXL_PSL_SR_An_TC;
+ sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
+ if (!test_tsk_thread_flag(current, TIF_32BIT))
+ sr |= CXL_PSL_SR_An_SF;
+ cxl_p2n_write(afu, CXL_PSL_PID_TID_An, (u64)current->pid << 32);
+ cxl_p1n_write(afu, CXL_PSL_SR_An, sr);
+
+ if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
+ return rc;
+
+ cxl_prefault(ctx, wed);
+
+ cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
+ (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
+ (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
+ (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
+ ((u64)ctx->irqs.offset[3] & 0xffff));
+ cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
+ (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
+ (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
+ (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
+ ((u64)ctx->irqs.range[3] & 0xffff));
+
+ cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
+
+ /* master only context for dedicated */
+ assign_psn_space(ctx);
+
+ if ((rc = cxl_afu_reset(afu)))
+ return rc;
+
+ cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
+
+ return afu_enable(afu);
+}
+
+static int deactivate_dedicated_process(struct cxl_afu *afu)
+{
+ dev_info(&afu->dev, "Deactivating dedicated process mode\n");
+
+ afu->current_mode = 0;
+ afu->num_procs = 0;
+
+ cxl_chardev_afu_remove(afu);
+
+ return 0;
+}
+
+int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode)
+{
+ if (mode == CXL_MODE_DIRECTED)
+ return deactivate_afu_directed(afu);
+ if (mode == CXL_MODE_DEDICATED)
+ return deactivate_dedicated_process(afu);
+ return 0;
+}
+
+int cxl_afu_deactivate_mode(struct cxl_afu *afu)
+{
+ return _cxl_afu_deactivate_mode(afu, afu->current_mode);
+}
+
+int cxl_afu_activate_mode(struct cxl_afu *afu, int mode)
+{
+ if (!mode)
+ return 0;
+ if (!(mode & afu->modes_supported))
+ return -EINVAL;
+
+ if (mode == CXL_MODE_DIRECTED)
+ return activate_afu_directed(afu);
+ if (mode == CXL_MODE_DEDICATED)
+ return activate_dedicated_process(afu);
+
+ return -EINVAL;
+}
+
+int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
+{
+ ctx->kernel = kernel;
+ if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
+ return attach_afu_directed(ctx, wed, amr);
+
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
+ return attach_dedicated(ctx, wed, amr);
+
+ return -EINVAL;
+}
+
+static inline int detach_process_native_dedicated(struct cxl_context *ctx)
+{
+ cxl_afu_reset(ctx->afu);
+ cxl_afu_disable(ctx->afu);
+ cxl_psl_purge(ctx->afu);
+ return 0;
+}
+
+/*
+ * TODO: handle case when this is called inside a rcu_read_lock() which may
+ * happen when we unbind the driver (ie. cxl_context_detach_all()) . Terminate
+ * & remove use a mutex lock and schedule which will not good with lock held.
+ * May need to write do_process_element_cmd() that handles outstanding page
+ * faults synchronously.
+ */
+static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
+{
+ if (!ctx->pe_inserted)
+ return 0;
+ if (terminate_process_element(ctx))
+ return -1;
+ if (remove_process_element(ctx))
+ return -1;
+
+ return 0;
+}
+
+int cxl_detach_process(struct cxl_context *ctx)
+{
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
+ return detach_process_native_dedicated(ctx);
+
+ return detach_process_native_afu_directed(ctx);
+}
+
+int cxl_get_irq(struct cxl_context *ctx, struct cxl_irq_info *info)
+{
+ u64 pidtid;
+
+ info->dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
+ info->dar = cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An);
+ info->dsr = cxl_p2n_read(ctx->afu, CXL_PSL_DSR_An);
+ pidtid = cxl_p2n_read(ctx->afu, CXL_PSL_PID_TID_An);
+ info->pid = pidtid >> 32;
+ info->tid = pidtid & 0xffffffff;
+ info->afu_err = cxl_p2n_read(ctx->afu, CXL_AFU_ERR_An);
+ info->errstat = cxl_p2n_read(ctx->afu, CXL_PSL_ErrStat_An);
+
+ return 0;
+}
+
+static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
+{
+ u64 dsisr;
+
+ pr_devel("RECOVERING FROM PSL ERROR... (0x%.16llx)\n", errstat);
+
+ /* Clear PSL_DSISR[PE] */
+ dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
+ cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
+
+ /* Write 1s to clear error status bits */
+ cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
+}
+
+int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
+{
+ if (tfc)
+ cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
+ if (psl_reset_mask)
+ recover_psl_err(ctx->afu, psl_reset_mask);
+
+ return 0;
+}
+
+int cxl_check_error(struct cxl_afu *afu)
+{
+ return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
+}
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
new file mode 100644
index 000000000000..10c98ab7f46e
--- /dev/null
+++ b/drivers/misc/cxl/pci.c
@@ -0,0 +1,1000 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/pci_regs.h>
+#include <linux/pci_ids.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/pci.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <asm/opal.h>
+#include <asm/msi_bitmap.h>
+#include <asm/pci-bridge.h> /* for struct pci_controller */
+#include <asm/pnv-pci.h>
+
+#include "cxl.h"
+
+
+#define CXL_PCI_VSEC_ID 0x1280
+#define CXL_VSEC_MIN_SIZE 0x80
+
+#define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \
+ { \
+ pci_read_config_word(dev, vsec + 0x6, dest); \
+ *dest >>= 4; \
+ }
+#define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \
+ pci_read_config_byte(dev, vsec + 0x8, dest)
+
+#define CXL_READ_VSEC_STATUS(dev, vsec, dest) \
+ pci_read_config_byte(dev, vsec + 0x9, dest)
+#define CXL_STATUS_SECOND_PORT 0x80
+#define CXL_STATUS_MSI_X_FULL 0x40
+#define CXL_STATUS_MSI_X_SINGLE 0x20
+#define CXL_STATUS_FLASH_RW 0x08
+#define CXL_STATUS_FLASH_RO 0x04
+#define CXL_STATUS_LOADABLE_AFU 0x02
+#define CXL_STATUS_LOADABLE_PSL 0x01
+/* If we see these features we won't try to use the card */
+#define CXL_UNSUPPORTED_FEATURES \
+ (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE)
+
+#define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \
+ pci_read_config_byte(dev, vsec + 0xa, dest)
+#define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
+ pci_write_config_byte(dev, vsec + 0xa, val)
+#define CXL_VSEC_PROTOCOL_MASK 0xe0
+#define CXL_VSEC_PROTOCOL_1024TB 0x80
+#define CXL_VSEC_PROTOCOL_512TB 0x40
+#define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8 uses this */
+#define CXL_VSEC_PROTOCOL_ENABLE 0x01
+
+#define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
+ pci_read_config_word(dev, vsec + 0xc, dest)
+#define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \
+ pci_read_config_byte(dev, vsec + 0xe, dest)
+#define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \
+ pci_read_config_byte(dev, vsec + 0xf, dest)
+#define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \
+ pci_read_config_word(dev, vsec + 0x10, dest)
+
+#define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \
+ pci_read_config_byte(dev, vsec + 0x13, dest)
+#define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \
+ pci_write_config_byte(dev, vsec + 0x13, val)
+#define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */
+#define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */
+#define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */
+
+#define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \
+ pci_read_config_dword(dev, vsec + 0x20, dest)
+#define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \
+ pci_read_config_dword(dev, vsec + 0x24, dest)
+#define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \
+ pci_read_config_dword(dev, vsec + 0x28, dest)
+#define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \
+ pci_read_config_dword(dev, vsec + 0x2c, dest)
+
+
+/* This works a little different than the p1/p2 register accesses to make it
+ * easier to pull out individual fields */
+#define AFUD_READ(afu, off) in_be64(afu->afu_desc_mmio + off)
+#define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
+#define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
+
+#define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0)
+#define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15)
+#define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31)
+#define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47)
+#define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48)
+#define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55)
+#define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59)
+#define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61)
+#define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63)
+#define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20)
+#define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
+#define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28)
+#define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30)
+#define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6)
+#define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7)
+#define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
+#define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38)
+#define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40)
+#define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
+#define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
+
+static DEFINE_PCI_DEVICE_TABLE(cxl_pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
+ { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
+ { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
+ { PCI_DEVICE_CLASS(0x120000, ~0), },
+
+ { }
+};
+MODULE_DEVICE_TABLE(pci, cxl_pci_tbl);
+
+
+/*
+ * Mostly using these wrappers to avoid confusion:
+ * priv 1 is BAR2, while priv 2 is BAR0
+ */
+static inline resource_size_t p1_base(struct pci_dev *dev)
+{
+ return pci_resource_start(dev, 2);
+}
+
+static inline resource_size_t p1_size(struct pci_dev *dev)
+{
+ return pci_resource_len(dev, 2);
+}
+
+static inline resource_size_t p2_base(struct pci_dev *dev)
+{
+ return pci_resource_start(dev, 0);
+}
+
+static inline resource_size_t p2_size(struct pci_dev *dev)
+{
+ return pci_resource_len(dev, 0);
+}
+
+static int find_cxl_vsec(struct pci_dev *dev)
+{
+ int vsec = 0;
+ u16 val;
+
+ while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) {
+ pci_read_config_word(dev, vsec + 0x4, &val);
+ if (val == CXL_PCI_VSEC_ID)
+ return vsec;
+ }
+ return 0;
+
+}
+
+static void dump_cxl_config_space(struct pci_dev *dev)
+{
+ int vsec;
+ u32 val;
+
+ dev_info(&dev->dev, "dump_cxl_config_space\n");
+
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val);
+ dev_info(&dev->dev, "BAR0: %#.8x\n", val);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val);
+ dev_info(&dev->dev, "BAR1: %#.8x\n", val);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val);
+ dev_info(&dev->dev, "BAR2: %#.8x\n", val);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val);
+ dev_info(&dev->dev, "BAR3: %#.8x\n", val);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val);
+ dev_info(&dev->dev, "BAR4: %#.8x\n", val);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val);
+ dev_info(&dev->dev, "BAR5: %#.8x\n", val);
+
+ dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
+ p1_base(dev), p1_size(dev));
+ dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
+ p1_base(dev), p2_size(dev));
+ dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
+ pci_resource_start(dev, 4), pci_resource_len(dev, 4));
+
+ if (!(vsec = find_cxl_vsec(dev)))
+ return;
+
+#define show_reg(name, what) \
+ dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)
+
+ pci_read_config_dword(dev, vsec + 0x0, &val);
+ show_reg("Cap ID", (val >> 0) & 0xffff);
+ show_reg("Cap Ver", (val >> 16) & 0xf);
+ show_reg("Next Cap Ptr", (val >> 20) & 0xfff);
+ pci_read_config_dword(dev, vsec + 0x4, &val);
+ show_reg("VSEC ID", (val >> 0) & 0xffff);
+ show_reg("VSEC Rev", (val >> 16) & 0xf);
+ show_reg("VSEC Length", (val >> 20) & 0xfff);
+ pci_read_config_dword(dev, vsec + 0x8, &val);
+ show_reg("Num AFUs", (val >> 0) & 0xff);
+ show_reg("Status", (val >> 8) & 0xff);
+ show_reg("Mode Control", (val >> 16) & 0xff);
+ show_reg("Reserved", (val >> 24) & 0xff);
+ pci_read_config_dword(dev, vsec + 0xc, &val);
+ show_reg("PSL Rev", (val >> 0) & 0xffff);
+ show_reg("CAIA Ver", (val >> 16) & 0xffff);
+ pci_read_config_dword(dev, vsec + 0x10, &val);
+ show_reg("Base Image Rev", (val >> 0) & 0xffff);
+ show_reg("Reserved", (val >> 16) & 0x0fff);
+ show_reg("Image Control", (val >> 28) & 0x3);
+ show_reg("Reserved", (val >> 30) & 0x1);
+ show_reg("Image Loaded", (val >> 31) & 0x1);
+
+ pci_read_config_dword(dev, vsec + 0x14, &val);
+ show_reg("Reserved", val);
+ pci_read_config_dword(dev, vsec + 0x18, &val);
+ show_reg("Reserved", val);
+ pci_read_config_dword(dev, vsec + 0x1c, &val);
+ show_reg("Reserved", val);
+
+ pci_read_config_dword(dev, vsec + 0x20, &val);
+ show_reg("AFU Descriptor Offset", val);
+ pci_read_config_dword(dev, vsec + 0x24, &val);
+ show_reg("AFU Descriptor Size", val);
+ pci_read_config_dword(dev, vsec + 0x28, &val);
+ show_reg("Problem State Offset", val);
+ pci_read_config_dword(dev, vsec + 0x2c, &val);
+ show_reg("Problem State Size", val);
+
+ pci_read_config_dword(dev, vsec + 0x30, &val);
+ show_reg("Reserved", val);
+ pci_read_config_dword(dev, vsec + 0x34, &val);
+ show_reg("Reserved", val);
+ pci_read_config_dword(dev, vsec + 0x38, &val);
+ show_reg("Reserved", val);
+ pci_read_config_dword(dev, vsec + 0x3c, &val);
+ show_reg("Reserved", val);
+
+ pci_read_config_dword(dev, vsec + 0x40, &val);
+ show_reg("PSL Programming Port", val);
+ pci_read_config_dword(dev, vsec + 0x44, &val);
+ show_reg("PSL Programming Control", val);
+
+ pci_read_config_dword(dev, vsec + 0x48, &val);
+ show_reg("Reserved", val);
+ pci_read_config_dword(dev, vsec + 0x4c, &val);
+ show_reg("Reserved", val);
+
+ pci_read_config_dword(dev, vsec + 0x50, &val);
+ show_reg("Flash Address Register", val);
+ pci_read_config_dword(dev, vsec + 0x54, &val);
+ show_reg("Flash Size Register", val);
+ pci_read_config_dword(dev, vsec + 0x58, &val);
+ show_reg("Flash Status/Control Register", val);
+ pci_read_config_dword(dev, vsec + 0x58, &val);
+ show_reg("Flash Data Port", val);
+
+#undef show_reg
+}
+
+static void dump_afu_descriptor(struct cxl_afu *afu)
+{
+ u64 val;
+
+#define show_reg(name, what) \
+ dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
+
+ val = AFUD_READ_INFO(afu);
+ show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val));
+ show_reg("num_of_processes", AFUD_NUM_PROCS(val));
+ show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val));
+ show_reg("req_prog_mode", val & 0xffffULL);
+
+ val = AFUD_READ(afu, 0x8);
+ show_reg("Reserved", val);
+ val = AFUD_READ(afu, 0x10);
+ show_reg("Reserved", val);
+ val = AFUD_READ(afu, 0x18);
+ show_reg("Reserved", val);
+
+ val = AFUD_READ_CR(afu);
+ show_reg("Reserved", (val >> (63-7)) & 0xff);
+ show_reg("AFU_CR_len", AFUD_CR_LEN(val));
+
+ val = AFUD_READ_CR_OFF(afu);
+ show_reg("AFU_CR_offset", val);
+
+ val = AFUD_READ_PPPSA(afu);
+ show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff);
+ show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val));
+
+ val = AFUD_READ_PPPSA_OFF(afu);
+ show_reg("PerProcessPSA_offset", val);
+
+ val = AFUD_READ_EB(afu);
+ show_reg("Reserved", (val >> (63-7)) & 0xff);
+ show_reg("AFU_EB_len", AFUD_EB_LEN(val));
+
+ val = AFUD_READ_EB_OFF(afu);
+ show_reg("AFU_EB_offset", val);
+
+#undef show_reg
+}
+
+static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
+{
+ struct device_node *np;
+ const __be32 *prop;
+ u64 psl_dsnctl;
+ u64 chipid;
+
+ if (!(np = pnv_pci_to_phb_node(dev)))
+ return -ENODEV;
+
+ while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
+ np = of_get_next_parent(np);
+ if (!np)
+ return -ENODEV;
+ chipid = be32_to_cpup(prop);
+ of_node_put(np);
+
+ /* Tell PSL where to route data to */
+ psl_dsnctl = 0x02E8900002000000ULL | (chipid << (63-5));
+ cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl);
+ cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
+ /* snoop write mask */
+ cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
+ /* set fir_accum */
+ cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL);
+ /* for debugging with trace arrays */
+ cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
+
+ return 0;
+}
+
+static int init_implementation_afu_regs(struct cxl_afu *afu)
+{
+ /* read/write masks for this slice */
+ cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
+ /* APC read/write masks for this slice */
+ cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
+ /* for debugging with trace arrays */
+ cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
+ cxl_p1n_write(afu, CXL_PSL_RXCTL_A, 0xF000000000000000ULL);
+
+ return 0;
+}
+
+int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq,
+ unsigned int virq)
+{
+ struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
+
+ return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
+}
+
+int cxl_alloc_one_irq(struct cxl *adapter)
+{
+ struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
+
+ return pnv_cxl_alloc_hwirqs(dev, 1);
+}
+
+void cxl_release_one_irq(struct cxl *adapter, int hwirq)
+{
+ struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
+
+ return pnv_cxl_release_hwirqs(dev, hwirq, 1);
+}
+
+int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num)
+{
+ struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
+
+ return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
+}
+
+void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter)
+{
+ struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
+
+ pnv_cxl_release_hwirq_ranges(irqs, dev);
+}
+
+static int setup_cxl_bars(struct pci_dev *dev)
+{
+ /* Safety check in case we get backported to < 3.17 without M64 */
+ if ((p1_base(dev) < 0x100000000ULL) ||
+ (p2_base(dev) < 0x100000000ULL)) {
+ dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n");
+ return -ENODEV;
+ }
+
+ /*
+ * BAR 4/5 has a special meaning for CXL and must be programmed with a
+ * special value corresponding to the CXL protocol address range.
+ * For POWER 8 that means bits 48:49 must be set to 10
+ */
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000);
+
+ return 0;
+}
+
+/* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */
+static int switch_card_to_cxl(struct pci_dev *dev)
+{
+ int vsec;
+ u8 val;
+ int rc;
+
+ dev_info(&dev->dev, "switch card to CXL\n");
+
+ if (!(vsec = find_cxl_vsec(dev))) {
+ dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
+ return -ENODEV;
+ }
+
+ if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) {
+ dev_err(&dev->dev, "failed to read current mode control: %i", rc);
+ return rc;
+ }
+ val &= ~CXL_VSEC_PROTOCOL_MASK;
+ val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
+ if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) {
+ dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc);
+ return rc;
+ }
+ /*
+ * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states
+ * we must wait 100ms after this mode switch before touching
+ * PCIe config space.
+ */
+ msleep(100);
+
+ return 0;
+}
+
+static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
+{
+ u64 p1n_base, p2n_base, afu_desc;
+ const u64 p1n_size = 0x100;
+ const u64 p2n_size = 0x1000;
+
+ p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
+ p2n_base = p2_base(dev) + (afu->slice * p2n_size);
+ afu->psn_phys = p2_base(dev) + (adapter->ps_off + (afu->slice * adapter->ps_size));
+ afu_desc = p2_base(dev) + adapter->afu_desc_off + (afu->slice * adapter->afu_desc_size);
+
+ if (!(afu->p1n_mmio = ioremap(p1n_base, p1n_size)))
+ goto err;
+ if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
+ goto err1;
+ if (afu_desc) {
+ if (!(afu->afu_desc_mmio = ioremap(afu_desc, adapter->afu_desc_size)))
+ goto err2;
+ }
+
+ return 0;
+err2:
+ iounmap(afu->p2n_mmio);
+err1:
+ iounmap(afu->p1n_mmio);
+err:
+ dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
+ return -ENOMEM;
+}
+
+static void cxl_unmap_slice_regs(struct cxl_afu *afu)
+{
+ if (afu->p1n_mmio)
+ iounmap(afu->p2n_mmio);
+ if (afu->p1n_mmio)
+ iounmap(afu->p1n_mmio);
+}
+
+static void cxl_release_afu(struct device *dev)
+{
+ struct cxl_afu *afu = to_cxl_afu(dev);
+
+ pr_devel("cxl_release_afu\n");
+
+ kfree(afu);
+}
+
+static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
+{
+ struct cxl_afu *afu;
+
+ if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL)))
+ return NULL;
+
+ afu->adapter = adapter;
+ afu->dev.parent = &adapter->dev;
+ afu->dev.release = cxl_release_afu;
+ afu->slice = slice;
+ idr_init(&afu->contexts_idr);
+ spin_lock_init(&afu->contexts_lock);
+ spin_lock_init(&afu->afu_cntl_lock);
+ mutex_init(&afu->spa_mutex);
+
+ afu->prefault_mode = CXL_PREFAULT_NONE;
+ afu->irqs_max = afu->adapter->user_irqs;
+
+ return afu;
+}
+
+/* Expects AFU struct to have recently been zeroed out */
+static int cxl_read_afu_descriptor(struct cxl_afu *afu)
+{
+ u64 val;
+
+ val = AFUD_READ_INFO(afu);
+ afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
+ afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
+
+ if (AFUD_AFU_DIRECTED(val))
+ afu->modes_supported |= CXL_MODE_DIRECTED;
+ if (AFUD_DEDICATED_PROCESS(val))
+ afu->modes_supported |= CXL_MODE_DEDICATED;
+ if (AFUD_TIME_SLICED(val))
+ afu->modes_supported |= CXL_MODE_TIME_SLICED;
+
+ val = AFUD_READ_PPPSA(afu);
+ afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
+ afu->psa = AFUD_PPPSA_PSA(val);
+ if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
+ afu->pp_offset = AFUD_READ_PPPSA_OFF(afu);
+
+ return 0;
+}
+
+static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
+{
+ if (afu->psa && afu->adapter->ps_size <
+ (afu->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
+ dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
+ return -ENODEV;
+ }
+
+ if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
+ dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!");
+
+ return 0;
+}
+
+static int sanitise_afu_regs(struct cxl_afu *afu)
+{
+ u64 reg;
+
+ /*
+ * Clear out any regs that contain either an IVTE or address or may be
+ * waiting on an acknowledgement to try to be a bit safer as we bring
+ * it online
+ */
+ reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
+ if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
+ dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#.16llx\n", reg);
+ if (cxl_afu_reset(afu))
+ return -EIO;
+ if (cxl_afu_disable(afu))
+ return -EIO;
+ if (cxl_psl_purge(afu))
+ return -EIO;
+ }
+ cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
+ cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000);
+ cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000);
+ cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
+ cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000);
+ cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000);
+ cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000);
+ cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000);
+ cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000);
+ cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000);
+ cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
+ reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
+ if (reg) {
+ dev_warn(&afu->dev, "AFU had pending DSISR: %#.16llx\n", reg);
+ if (reg & CXL_PSL_DSISR_TRANS)
+ cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
+ else
+ cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
+ }
+ reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
+ if (reg) {
+ if (reg & ~0xffff)
+ dev_warn(&afu->dev, "AFU had pending SERR: %#.16llx\n", reg);
+ cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
+ }
+ reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
+ if (reg) {
+ dev_warn(&afu->dev, "AFU had pending error status: %#.16llx\n", reg);
+ cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
+ }
+
+ return 0;
+}
+
+static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+{
+ struct cxl_afu *afu;
+ bool free = true;
+ int rc;
+
+ if (!(afu = cxl_alloc_afu(adapter, slice)))
+ return -ENOMEM;
+
+ if ((rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice)))
+ goto err1;
+
+ if ((rc = cxl_map_slice_regs(afu, adapter, dev)))
+ goto err1;
+
+ if ((rc = sanitise_afu_regs(afu)))
+ goto err2;
+
+ /* We need to reset the AFU before we can read the AFU descriptor */
+ if ((rc = cxl_afu_reset(afu)))
+ goto err2;
+
+ if (cxl_verbose)
+ dump_afu_descriptor(afu);
+
+ if ((rc = cxl_read_afu_descriptor(afu)))
+ goto err2;
+
+ if ((rc = cxl_afu_descriptor_looks_ok(afu)))
+ goto err2;
+
+ if ((rc = init_implementation_afu_regs(afu)))
+ goto err2;
+
+ if ((rc = cxl_register_serr_irq(afu)))
+ goto err2;
+
+ if ((rc = cxl_register_psl_irq(afu)))
+ goto err3;
+
+ /* Don't care if this fails */
+ cxl_debugfs_afu_add(afu);
+
+ /*
+ * After we call this function we must not free the afu directly, even
+ * if it returns an error!
+ */
+ if ((rc = cxl_register_afu(afu)))
+ goto err_put1;
+
+ if ((rc = cxl_sysfs_afu_add(afu)))
+ goto err_put1;
+
+
+ if ((rc = cxl_afu_select_best_mode(afu)))
+ goto err_put2;
+
+ adapter->afu[afu->slice] = afu;
+
+ return 0;
+
+err_put2:
+ cxl_sysfs_afu_remove(afu);
+err_put1:
+ device_unregister(&afu->dev);
+ free = false;
+ cxl_debugfs_afu_remove(afu);
+ cxl_release_psl_irq(afu);
+err3:
+ cxl_release_serr_irq(afu);
+err2:
+ cxl_unmap_slice_regs(afu);
+err1:
+ if (free)
+ kfree(afu);
+ return rc;
+}
+
+static void cxl_remove_afu(struct cxl_afu *afu)
+{
+ pr_devel("cxl_remove_afu\n");
+
+ if (!afu)
+ return;
+
+ cxl_sysfs_afu_remove(afu);
+ cxl_debugfs_afu_remove(afu);
+
+ spin_lock(&afu->adapter->afu_list_lock);
+ afu->adapter->afu[afu->slice] = NULL;
+ spin_unlock(&afu->adapter->afu_list_lock);
+
+ cxl_context_detach_all(afu);
+ cxl_afu_deactivate_mode(afu);
+
+ cxl_release_psl_irq(afu);
+ cxl_release_serr_irq(afu);
+ cxl_unmap_slice_regs(afu);
+
+ device_unregister(&afu->dev);
+}
+
+
+static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
+{
+ if (pci_request_region(dev, 2, "priv 2 regs"))
+ goto err1;
+ if (pci_request_region(dev, 0, "priv 1 regs"))
+ goto err2;
+
+ pr_devel("cxl_map_adapter_regs: p1: %#.16llx %#llx, p2: %#.16llx %#llx",
+ p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
+
+ if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
+ goto err3;
+
+ if (!(adapter->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
+ goto err4;
+
+ return 0;
+
+err4:
+ iounmap(adapter->p1_mmio);
+ adapter->p1_mmio = NULL;
+err3:
+ pci_release_region(dev, 0);
+err2:
+ pci_release_region(dev, 2);
+err1:
+ return -ENOMEM;
+}
+
+static void cxl_unmap_adapter_regs(struct cxl *adapter)
+{
+ if (adapter->p1_mmio)
+ iounmap(adapter->p1_mmio);
+ if (adapter->p2_mmio)
+ iounmap(adapter->p2_mmio);
+}
+
+static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
+{
+ int vsec;
+ u32 afu_desc_off, afu_desc_size;
+ u32 ps_off, ps_size;
+ u16 vseclen;
+ u8 image_state;
+
+ if (!(vsec = find_cxl_vsec(dev))) {
+ dev_err(&adapter->dev, "ABORTING: CXL VSEC not found!\n");
+ return -ENODEV;
+ }
+
+ CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen);
+ if (vseclen < CXL_VSEC_MIN_SIZE) {
+ pr_err("ABORTING: CXL VSEC too short\n");
+ return -EINVAL;
+ }
+
+ CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status);
+ CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev);
+ CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major);
+ CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor);
+ CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
+ CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
+ adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
+ adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE);
+ adapter->perst_select_user = !!(image_state & CXL_VSEC_PERST_SELECT_USER);
+
+ CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
+ CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
+ CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size);
+ CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off);
+ CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size);
+
+ /* Convert everything to bytes, because there is NO WAY I'd look at the
+ * code a month later and forget what units these are in ;-) */
+ adapter->ps_off = ps_off * 64 * 1024;
+ adapter->ps_size = ps_size * 64 * 1024;
+ adapter->afu_desc_off = afu_desc_off * 64 * 1024;
+ adapter->afu_desc_size = afu_desc_size *64 * 1024;
+
+ /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */
+ adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
+
+ return 0;
+}
+
+static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
+{
+ if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
+ return -EBUSY;
+
+ if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) {
+ dev_err(&adapter->dev, "ABORTING: CXL requires unsupported features\n");
+ return -EINVAL;
+ }
+
+ if (!adapter->slices) {
+ /* Once we support dynamic reprogramming we can use the card if
+ * it supports loadable AFUs */
+ dev_err(&adapter->dev, "ABORTING: Device has no AFUs\n");
+ return -EINVAL;
+ }
+
+ if (!adapter->afu_desc_off || !adapter->afu_desc_size) {
+ dev_err(&adapter->dev, "ABORTING: VSEC shows no AFU descriptors\n");
+ return -EINVAL;
+ }
+
+ if (adapter->ps_size > p2_size(dev) - adapter->ps_off) {
+ dev_err(&adapter->dev, "ABORTING: Problem state size larger than "
+ "available in BAR2: 0x%llx > 0x%llx\n",
+ adapter->ps_size, p2_size(dev) - adapter->ps_off);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void cxl_release_adapter(struct device *dev)
+{
+ struct cxl *adapter = to_cxl_adapter(dev);
+
+ pr_devel("cxl_release_adapter\n");
+
+ kfree(adapter);
+}
+
+static struct cxl *cxl_alloc_adapter(struct pci_dev *dev)
+{
+ struct cxl *adapter;
+
+ if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
+ return NULL;
+
+ adapter->dev.parent = &dev->dev;
+ adapter->dev.release = cxl_release_adapter;
+ pci_set_drvdata(dev, adapter);
+ spin_lock_init(&adapter->afu_list_lock);
+
+ return adapter;
+}
+
+static int sanitise_adapter_regs(struct cxl *adapter)
+{
+ cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
+ return cxl_tlb_slb_invalidate(adapter);
+}
+
+static struct cxl *cxl_init_adapter(struct pci_dev *dev)
+{
+ struct cxl *adapter;
+ bool free = true;
+ int rc;
+
+
+ if (!(adapter = cxl_alloc_adapter(dev)))
+ return ERR_PTR(-ENOMEM);
+
+ if ((rc = switch_card_to_cxl(dev)))
+ goto err1;
+
+ if ((rc = cxl_alloc_adapter_nr(adapter)))
+ goto err1;
+
+ if ((rc = dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)))
+ goto err2;
+
+ if ((rc = cxl_read_vsec(adapter, dev)))
+ goto err2;
+
+ if ((rc = cxl_vsec_looks_ok(adapter, dev)))
+ goto err2;
+
+ if ((rc = cxl_map_adapter_regs(adapter, dev)))
+ goto err2;
+
+ if ((rc = sanitise_adapter_regs(adapter)))
+ goto err2;
+
+ if ((rc = init_implementation_adapter_regs(adapter, dev)))
+ goto err3;
+
+ if ((rc = pnv_phb_to_cxl(dev)))
+ goto err3;
+
+ if ((rc = cxl_register_psl_err_irq(adapter)))
+ goto err3;
+
+ /* Don't care if this one fails: */
+ cxl_debugfs_adapter_add(adapter);
+
+ /*
+ * After we call this function we must not free the adapter directly,
+ * even if it returns an error!
+ */
+ if ((rc = cxl_register_adapter(adapter)))
+ goto err_put1;
+
+ if ((rc = cxl_sysfs_adapter_add(adapter)))
+ goto err_put1;
+
+ return adapter;
+
+err_put1:
+ device_unregister(&adapter->dev);
+ free = false;
+ cxl_debugfs_adapter_remove(adapter);
+ cxl_release_psl_err_irq(adapter);
+err3:
+ cxl_unmap_adapter_regs(adapter);
+err2:
+ cxl_remove_adapter_nr(adapter);
+err1:
+ if (free)
+ kfree(adapter);
+ return ERR_PTR(rc);
+}
+
+static void cxl_remove_adapter(struct cxl *adapter)
+{
+ struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
+
+ pr_devel("cxl_release_adapter\n");
+
+ cxl_sysfs_adapter_remove(adapter);
+ cxl_debugfs_adapter_remove(adapter);
+ cxl_release_psl_err_irq(adapter);
+ cxl_unmap_adapter_regs(adapter);
+ cxl_remove_adapter_nr(adapter);
+
+ device_unregister(&adapter->dev);
+
+ pci_release_region(pdev, 0);
+ pci_release_region(pdev, 2);
+ pci_disable_device(pdev);
+}
+
+static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct cxl *adapter;
+ int slice;
+ int rc;
+
+ pci_dev_get(dev);
+
+ if (cxl_verbose)
+ dump_cxl_config_space(dev);
+
+ if ((rc = setup_cxl_bars(dev)))
+ return rc;
+
+ if ((rc = pci_enable_device(dev))) {
+ dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
+ return rc;
+ }
+
+ adapter = cxl_init_adapter(dev);
+ if (IS_ERR(adapter)) {
+ dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
+ return PTR_ERR(adapter);
+ }
+
+ for (slice = 0; slice < adapter->slices; slice++) {
+ if ((rc = cxl_init_afu(adapter, slice, dev)))
+ dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
+ }
+
+ return 0;
+}
+
+static void cxl_remove(struct pci_dev *dev)
+{
+ struct cxl *adapter = pci_get_drvdata(dev);
+ int afu;
+
+ dev_warn(&dev->dev, "pci remove\n");
+
+ /*
+ * Lock to prevent someone grabbing a ref through the adapter list as
+ * we are removing it
+ */
+ for (afu = 0; afu < adapter->slices; afu++)
+ cxl_remove_afu(adapter->afu[afu]);
+ cxl_remove_adapter(adapter);
+}
+
+struct pci_driver cxl_pci_driver = {
+ .name = "cxl-pci",
+ .id_table = cxl_pci_tbl,
+ .probe = cxl_probe,
+ .remove = cxl_remove,
+};
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
new file mode 100644
index 000000000000..ce7ec06d87d1
--- /dev/null
+++ b/drivers/misc/cxl/sysfs.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+
+#include "cxl.h"
+
+#define to_afu_chardev_m(d) dev_get_drvdata(d)
+
+/********* Adapter attributes **********************************************/
+
+static ssize_t caia_version_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl *adapter = to_cxl_adapter(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major,
+ adapter->caia_minor);
+}
+
+static ssize_t psl_revision_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl *adapter = to_cxl_adapter(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev);
+}
+
+static ssize_t base_image_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl *adapter = to_cxl_adapter(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image);
+}
+
+static ssize_t image_loaded_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl *adapter = to_cxl_adapter(device);
+
+ if (adapter->user_image_loaded)
+ return scnprintf(buf, PAGE_SIZE, "user\n");
+ return scnprintf(buf, PAGE_SIZE, "factory\n");
+}
+
+static struct device_attribute adapter_attrs[] = {
+ __ATTR_RO(caia_version),
+ __ATTR_RO(psl_revision),
+ __ATTR_RO(base_image),
+ __ATTR_RO(image_loaded),
+};
+
+
+/********* AFU master specific attributes **********************************/
+
+static ssize_t mmio_size_show_master(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_afu *afu = to_afu_chardev_m(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
+}
+
+static ssize_t pp_mmio_off_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_afu *afu = to_afu_chardev_m(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_offset);
+}
+
+static ssize_t pp_mmio_len_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_afu *afu = to_afu_chardev_m(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
+}
+
+static struct device_attribute afu_master_attrs[] = {
+ __ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL),
+ __ATTR_RO(pp_mmio_off),
+ __ATTR_RO(pp_mmio_len),
+};
+
+
+/********* AFU attributes **************************************************/
+
+static ssize_t mmio_size_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_afu *afu = to_cxl_afu(device);
+
+ if (afu->pp_size)
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
+}
+
+static ssize_t reset_store_afu(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cxl_afu *afu = to_cxl_afu(device);
+ int rc;
+
+ /* Not safe to reset if it is currently in use */
+ spin_lock(&afu->contexts_lock);
+ if (!idr_is_empty(&afu->contexts_idr)) {
+ rc = -EBUSY;
+ goto err;
+ }
+
+ if ((rc = cxl_afu_reset(afu)))
+ goto err;
+
+ rc = count;
+err:
+ spin_unlock(&afu->contexts_lock);
+ return rc;
+}
+
+static ssize_t irqs_min_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_afu *afu = to_cxl_afu(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs);
+}
+
+static ssize_t irqs_max_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_afu *afu = to_cxl_afu(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max);
+}
+
+static ssize_t irqs_max_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cxl_afu *afu = to_cxl_afu(device);
+ ssize_t ret;
+ int irqs_max;
+
+ ret = sscanf(buf, "%i", &irqs_max);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (irqs_max < afu->pp_irqs)
+ return -EINVAL;
+
+ if (irqs_max > afu->adapter->user_irqs)
+ return -EINVAL;
+
+ afu->irqs_max = irqs_max;
+ return count;
+}
+
+static ssize_t modes_supported_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_afu *afu = to_cxl_afu(device);
+ char *p = buf, *end = buf + PAGE_SIZE;
+
+ if (afu->modes_supported & CXL_MODE_DEDICATED)
+ p += scnprintf(p, end - p, "dedicated_process\n");
+ if (afu->modes_supported & CXL_MODE_DIRECTED)
+ p += scnprintf(p, end - p, "afu_directed\n");
+ return (p - buf);
+}
+
+static ssize_t prefault_mode_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_afu *afu = to_cxl_afu(device);
+
+ switch (afu->prefault_mode) {
+ case CXL_PREFAULT_WED:
+ return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n");
+ case CXL_PREFAULT_ALL:
+ return scnprintf(buf, PAGE_SIZE, "all\n");
+ default:
+ return scnprintf(buf, PAGE_SIZE, "none\n");
+ }
+}
+
+static ssize_t prefault_mode_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cxl_afu *afu = to_cxl_afu(device);
+ enum prefault_modes mode = -1;
+
+ if (!strncmp(buf, "work_element_descriptor", 23))
+ mode = CXL_PREFAULT_WED;
+ if (!strncmp(buf, "all", 3))
+ mode = CXL_PREFAULT_ALL;
+ if (!strncmp(buf, "none", 4))
+ mode = CXL_PREFAULT_NONE;
+
+ if (mode == -1)
+ return -EINVAL;
+
+ afu->prefault_mode = mode;
+ return count;
+}
+
+static ssize_t mode_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_afu *afu = to_cxl_afu(device);
+
+ if (afu->current_mode == CXL_MODE_DEDICATED)
+ return scnprintf(buf, PAGE_SIZE, "dedicated_process\n");
+ if (afu->current_mode == CXL_MODE_DIRECTED)
+ return scnprintf(buf, PAGE_SIZE, "afu_directed\n");
+ return scnprintf(buf, PAGE_SIZE, "none\n");
+}
+
+static ssize_t mode_store(struct device *device, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cxl_afu *afu = to_cxl_afu(device);
+ int old_mode, mode = -1;
+ int rc = -EBUSY;
+
+ /* can't change this if we have a user */
+ spin_lock(&afu->contexts_lock);
+ if (!idr_is_empty(&afu->contexts_idr))
+ goto err;
+
+ if (!strncmp(buf, "dedicated_process", 17))
+ mode = CXL_MODE_DEDICATED;
+ if (!strncmp(buf, "afu_directed", 12))
+ mode = CXL_MODE_DIRECTED;
+ if (!strncmp(buf, "none", 4))
+ mode = 0;
+
+ if (mode == -1) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * cxl_afu_deactivate_mode needs to be done outside the lock, prevent
+ * other contexts coming in before we are ready:
+ */
+ old_mode = afu->current_mode;
+ afu->current_mode = 0;
+ afu->num_procs = 0;
+
+ spin_unlock(&afu->contexts_lock);
+
+ if ((rc = _cxl_afu_deactivate_mode(afu, old_mode)))
+ return rc;
+ if ((rc = cxl_afu_activate_mode(afu, mode)))
+ return rc;
+
+ return count;
+err:
+ spin_unlock(&afu->contexts_lock);
+ return rc;
+}
+
+static ssize_t api_version_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION);
+}
+
+static ssize_t api_version_compatible_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
+}
+
+static struct device_attribute afu_attrs[] = {
+ __ATTR_RO(mmio_size),
+ __ATTR_RO(irqs_min),
+ __ATTR_RW(irqs_max),
+ __ATTR_RO(modes_supported),
+ __ATTR_RW(mode),
+ __ATTR_RW(prefault_mode),
+ __ATTR_RO(api_version),
+ __ATTR_RO(api_version_compatible),
+ __ATTR(reset, S_IWUSR, NULL, reset_store_afu),
+};
+
+
+
+int cxl_sysfs_adapter_add(struct cxl *adapter)
+{
+ int i, rc;
+
+ for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
+ if ((rc = device_create_file(&adapter->dev, &adapter_attrs[i])))
+ goto err;
+ }
+ return 0;
+err:
+ for (i--; i >= 0; i--)
+ device_remove_file(&adapter->dev, &adapter_attrs[i]);
+ return rc;
+}
+void cxl_sysfs_adapter_remove(struct cxl *adapter)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++)
+ device_remove_file(&adapter->dev, &adapter_attrs[i]);
+}
+
+int cxl_sysfs_afu_add(struct cxl_afu *afu)
+{
+ int i, rc;
+
+ for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
+ if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (i--; i >= 0; i--)
+ device_remove_file(&afu->dev, &afu_attrs[i]);
+ return rc;
+}
+
+void cxl_sysfs_afu_remove(struct cxl_afu *afu)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(afu_attrs); i++)
+ device_remove_file(&afu->dev, &afu_attrs[i]);
+}
+
+int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
+{
+ int i, rc;
+
+ for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
+ if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (i--; i >= 0; i--)
+ device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
+ return rc;
+}
+
+void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++)
+ device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
+}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index ede41f05c392..1fa4c80ff886 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -977,7 +977,7 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
return ERR_CONTINUE;
/* Now for stop errors. These aren't fatal to the transfer. */
- pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
+ pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
req->rq_disk->disk_name, brq->stop.error,
brq->cmd.resp[0], status);
@@ -1398,10 +1398,15 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
if (disable_multi)
brq->data.blocks = 1;
- /* Some controllers can't do multiblock reads due to hw bugs */
- if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
- rq_data_dir(req) == READ)
- brq->data.blocks = 1;
+ /*
+ * Some controllers have HW issues while operating
+ * in multiple I/O mode
+ */
+ if (card->host->ops->multi_io_quirk)
+ brq->data.blocks = card->host->ops->multi_io_quirk(card,
+ (rq_data_dir(req) == READ) ?
+ MMC_DATA_READ : MMC_DATA_WRITE,
+ brq->data.blocks);
}
if (brq->data.blocks > 1 || do_rel_wr) {
@@ -1923,8 +1928,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
case MMC_BLK_ECC_ERR:
if (brq->data.blocks > 1) {
/* Redo read one sector at a time */
- pr_warning("%s: retrying using single block read\n",
- req->rq_disk->disk_name);
+ pr_warn("%s: retrying using single block read\n",
+ req->rq_disk->disk_name);
disable_multi = 1;
break;
}
@@ -2131,7 +2136,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->disk->queue = md->queue.queue;
md->disk->driverfs_dev = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
- if (area_type & MMC_BLK_DATA_AREA_RPMB)
+ if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
md->disk->flags |= GENHD_FL_NO_PART_SCAN;
/*
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 3e049c13429c..feea926e32f8 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -229,14 +229,12 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (bouncesz > 512) {
mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
if (!mqrq_cur->bounce_buf) {
- pr_warning("%s: unable to "
- "allocate bounce cur buffer\n",
+ pr_warn("%s: unable to allocate bounce cur buffer\n",
mmc_card_name(card));
}
mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
if (!mqrq_prev->bounce_buf) {
- pr_warning("%s: unable to "
- "allocate bounce prev buffer\n",
+ pr_warn("%s: unable to allocate bounce prev buffer\n",
mmc_card_name(card));
kfree(mqrq_cur->bounce_buf);
mqrq_cur->bounce_buf = NULL;
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index f093cea0d060..d2de5925b73e 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -1063,8 +1063,8 @@ static int sdio_uart_probe(struct sdio_func *func,
return -ENOMEM;
if (func->class == SDIO_CLASS_UART) {
- pr_warning("%s: need info on UART class basic setup\n",
- sdio_func_id(func));
+ pr_warn("%s: need info on UART class basic setup\n",
+ sdio_func_id(func));
kfree(port);
return -ENOSYS;
} else if (func->class == SDIO_CLASS_GPS) {
@@ -1082,9 +1082,8 @@ static int sdio_uart_probe(struct sdio_func *func,
break;
}
if (!tpl) {
- pr_warning(
- "%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n",
- sdio_func_id(func));
+ pr_warn("%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n",
+ sdio_func_id(func));
kfree(port);
return -EINVAL;
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index d03a080fb9cd..f26a5f1d926d 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -433,8 +433,8 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
*/
if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
if (!mmc_interrupt_hpi(host->card)) {
- pr_warning("%s: %s: Interrupted sanitize\n",
- mmc_hostname(host), __func__);
+ pr_warn("%s: %s: Interrupted sanitize\n",
+ mmc_hostname(host), __func__);
cmd->error = 0;
break;
} else {
@@ -995,7 +995,7 @@ void mmc_set_chip_select(struct mmc_host *host, int mode)
*/
static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
{
- WARN_ON(hz < host->f_min);
+ WARN_ON(hz && hz < host->f_min);
if (hz > host->f_max)
hz = host->f_max;
@@ -1221,15 +1221,14 @@ int mmc_regulator_get_ocrmask(struct regulator *supply)
int result = 0;
int count;
int i;
+ int vdd_uV;
+ int vdd_mV;
count = regulator_count_voltages(supply);
if (count < 0)
return count;
for (i = 0; i < count; i++) {
- int vdd_uV;
- int vdd_mV;
-
vdd_uV = regulator_list_voltage(supply, i);
if (vdd_uV <= 0)
continue;
@@ -1238,6 +1237,15 @@ int mmc_regulator_get_ocrmask(struct regulator *supply)
result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
}
+ if (!result) {
+ vdd_uV = regulator_get_voltage(supply);
+ if (vdd_uV <= 0)
+ return vdd_uV;
+
+ vdd_mV = vdd_uV / 1000;
+ result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
+ }
+
return result;
}
EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
@@ -1263,7 +1271,6 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
if (vdd_bit) {
int tmp;
- int voltage;
/*
* REVISIT mmc_vddrange_to_ocrmask() may have set some
@@ -1280,22 +1287,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
max_uV = min_uV + 100 * 1000;
}
- /*
- * If we're using a fixed/static regulator, don't call
- * regulator_set_voltage; it would fail.
- */
- voltage = regulator_get_voltage(supply);
-
- if (!regulator_can_change_voltage(supply))
- min_uV = max_uV = voltage;
-
- if (voltage < 0)
- result = voltage;
- else if (voltage < min_uV || voltage > max_uV)
- result = regulator_set_voltage(supply, min_uV, max_uV);
- else
- result = 0;
-
+ result = regulator_set_voltage(supply, min_uV, max_uV);
if (result == 0 && !mmc->regulator_enabled) {
result = regulator_enable(supply);
if (!result)
@@ -1425,8 +1417,8 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
if (!host->ops->start_signal_voltage_switch)
return -EPERM;
if (!host->ops->card_busy)
- pr_warning("%s: cannot verify signal voltage switch\n",
- mmc_hostname(host));
+ pr_warn("%s: cannot verify signal voltage switch\n",
+ mmc_hostname(host));
cmd.opcode = SD_SWITCH_VOLTAGE;
cmd.arg = 0;
@@ -1761,7 +1753,7 @@ void mmc_init_erase(struct mmc_card *card)
card->erase_shift = ffs(card->ssr.au) - 1;
} else if (card->ext_csd.hc_erase_size) {
card->pref_erase = card->ext_csd.hc_erase_size;
- } else {
+ } else if (card->erase_size) {
sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
if (sz < 128)
card->pref_erase = 512 * 1024 / 512;
@@ -1778,7 +1770,8 @@ void mmc_init_erase(struct mmc_card *card)
if (sz)
card->pref_erase += card->erase_size - sz;
}
- }
+ } else
+ card->pref_erase = 0;
}
static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
@@ -2489,6 +2482,7 @@ void mmc_start_host(struct mmc_host *host)
{
host->f_init = max(freqs[0], host->f_min);
host->rescan_disable = 0;
+ host->ios.power_mode = MMC_POWER_UNDEFINED;
if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
mmc_power_off(host);
else
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 95cceae96944..03c53b72a2d6 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -310,9 +310,8 @@ int mmc_of_parse(struct mmc_host *host)
{
struct device_node *np;
u32 bus_width;
- bool explicit_inv_wp, gpio_inv_wp = false;
- enum of_gpio_flags flags;
- int len, ret, gpio;
+ int len, ret;
+ bool cap_invert, gpio_invert;
if (!host->parent || !host->parent->of_node)
return 0;
@@ -360,59 +359,62 @@ int mmc_of_parse(struct mmc_host *host)
if (of_find_property(np, "non-removable", &len)) {
host->caps |= MMC_CAP_NONREMOVABLE;
} else {
- bool explicit_inv_cd, gpio_inv_cd = false;
-
- explicit_inv_cd = of_property_read_bool(np, "cd-inverted");
+ if (of_property_read_bool(np, "cd-inverted"))
+ cap_invert = true;
+ else
+ cap_invert = false;
if (of_find_property(np, "broken-cd", &len))
host->caps |= MMC_CAP_NEEDS_POLL;
- gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
- if (gpio == -EPROBE_DEFER)
- return gpio;
- if (gpio_is_valid(gpio)) {
- if (!(flags & OF_GPIO_ACTIVE_LOW))
- gpio_inv_cd = true;
-
- ret = mmc_gpio_request_cd(host, gpio, 0);
- if (ret < 0) {
- dev_err(host->parent,
- "Failed to request CD GPIO #%d: %d!\n",
- gpio, ret);
+ ret = mmc_gpiod_request_cd(host, "cd", 0, true,
+ 0, &gpio_invert);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
return ret;
- } else {
- dev_info(host->parent, "Got CD GPIO #%d.\n",
- gpio);
+ if (ret != -ENOENT) {
+ dev_err(host->parent,
+ "Failed to request CD GPIO: %d\n",
+ ret);
}
- }
-
- if (explicit_inv_cd ^ gpio_inv_cd)
+ } else
+ dev_info(host->parent, "Got CD GPIO\n");
+
+ /*
+ * There are two ways to flag that the CD line is inverted:
+ * through the cd-inverted flag and by the GPIO line itself
+ * being inverted from the GPIO subsystem. This is a leftover
+ * from the times when the GPIO subsystem did not make it
+ * possible to flag a line as inverted.
+ *
+ * If the capability on the host AND the GPIO line are
+ * both inverted, the end result is that the CD line is
+ * not inverted.
+ */
+ if (cap_invert ^ gpio_invert)
host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
}
/* Parse Write Protection */
- explicit_inv_wp = of_property_read_bool(np, "wp-inverted");
-
- gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, &flags);
- if (gpio == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto out;
- }
- if (gpio_is_valid(gpio)) {
- if (!(flags & OF_GPIO_ACTIVE_LOW))
- gpio_inv_wp = true;
+ if (of_property_read_bool(np, "wp-inverted"))
+ cap_invert = true;
+ else
+ cap_invert = false;
- ret = mmc_gpio_request_ro(host, gpio);
- if (ret < 0) {
- dev_err(host->parent,
- "Failed to request WP GPIO: %d!\n", ret);
+ ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &gpio_invert);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
goto out;
- } else {
- dev_info(host->parent, "Got WP GPIO #%d.\n",
- gpio);
+ if (ret != -ENOENT) {
+ dev_err(host->parent,
+ "Failed to request WP GPIO: %d\n",
+ ret);
}
- }
- if (explicit_inv_wp ^ gpio_inv_wp)
+ } else
+ dev_info(host->parent, "Got WP GPIO\n");
+
+ /* See the comment on CD inversion above */
+ if (cap_invert ^ gpio_invert)
host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
if (of_find_property(np, "cap-sd-highspeed", &len))
@@ -452,6 +454,14 @@ int mmc_of_parse(struct mmc_host *host)
if (of_find_property(np, "mmc-hs400-1_2v", &len))
host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
+ host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
+ if (host->dsr_req && (host->dsr & ~0xffff)) {
+ dev_err(host->parent,
+ "device tree specified broken value for DSR: 0x%x, ignoring\n",
+ host->dsr);
+ host->dsr_req = 0;
+ }
+
return 0;
out:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 1eda8dd8c867..a301a78a2bd1 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -162,6 +162,7 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
+ csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
@@ -225,9 +226,7 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
"Card will be ignored.\n",
mmc_hostname(card->host));
} else {
- pr_warning("%s: unable to read "
- "EXT_CSD, performance might "
- "suffer.\n",
+ pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
mmc_hostname(card->host));
err = 0;
}
@@ -298,6 +297,97 @@ static void mmc_select_card_type(struct mmc_card *card)
card->mmc_avail_type = avail_type;
}
+static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
+{
+ u8 hc_erase_grp_sz, hc_wp_grp_sz;
+
+ /*
+ * Disable these attributes by default
+ */
+ card->ext_csd.enhanced_area_offset = -EINVAL;
+ card->ext_csd.enhanced_area_size = -EINVAL;
+
+ /*
+ * Enhanced area feature support -- check whether the eMMC
+ * card has the Enhanced area enabled. If so, export enhanced
+ * area offset and size to user by adding sysfs interface.
+ */
+ if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
+ (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+ if (card->ext_csd.partition_setting_completed) {
+ hc_erase_grp_sz =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+ hc_wp_grp_sz =
+ ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
+
+ /*
+ * calculate the enhanced data area offset, in bytes
+ */
+ card->ext_csd.enhanced_area_offset =
+ (ext_csd[139] << 24) + (ext_csd[138] << 16) +
+ (ext_csd[137] << 8) + ext_csd[136];
+ if (mmc_card_blockaddr(card))
+ card->ext_csd.enhanced_area_offset <<= 9;
+ /*
+ * calculate the enhanced data area size, in kilobytes
+ */
+ card->ext_csd.enhanced_area_size =
+ (ext_csd[142] << 16) + (ext_csd[141] << 8) +
+ ext_csd[140];
+ card->ext_csd.enhanced_area_size *=
+ (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
+ card->ext_csd.enhanced_area_size <<= 9;
+ } else {
+ pr_warn("%s: defines enhanced area without partition setting complete\n",
+ mmc_hostname(card->host));
+ }
+ }
+}
+
+static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
+{
+ int idx;
+ u8 hc_erase_grp_sz, hc_wp_grp_sz;
+ unsigned int part_size;
+
+ /*
+ * General purpose partition feature support --
+ * If ext_csd has the size of general purpose partitions,
+ * set size, part_cfg, partition name in mmc_part.
+ */
+ if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
+ EXT_CSD_PART_SUPPORT_PART_EN) {
+ hc_erase_grp_sz =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
+ hc_wp_grp_sz =
+ ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
+
+ for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
+ if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
+ !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
+ !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
+ continue;
+ if (card->ext_csd.partition_setting_completed == 0) {
+ pr_warn("%s: has partition size defined without partition complete\n",
+ mmc_hostname(card->host));
+ break;
+ }
+ part_size =
+ (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
+ << 16) +
+ (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
+ << 8) +
+ ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
+ part_size *= (size_t)(hc_erase_grp_sz *
+ hc_wp_grp_sz);
+ mmc_part_add(card, part_size << 19,
+ EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
+ "gp%d", idx, false,
+ MMC_BLK_DATA_AREA_GP);
+ }
+ }
+}
+
/*
* Decode extended CSD.
*/
@@ -305,7 +395,6 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
{
int err = 0, idx;
unsigned int part_size;
- u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0;
BUG_ON(!card);
@@ -402,80 +491,16 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
ext_csd[EXT_CSD_TRIM_MULT];
card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
if (card->ext_csd.rev >= 4) {
- /*
- * Enhanced area feature support -- check whether the eMMC
- * card has the Enhanced area enabled. If so, export enhanced
- * area offset and size to user by adding sysfs interface.
- */
- if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
- (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
- hc_erase_grp_sz =
- ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
- hc_wp_grp_sz =
- ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
+ if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
+ EXT_CSD_PART_SETTING_COMPLETED)
+ card->ext_csd.partition_setting_completed = 1;
+ else
+ card->ext_csd.partition_setting_completed = 0;
- card->ext_csd.enhanced_area_en = 1;
- /*
- * calculate the enhanced data area offset, in bytes
- */
- card->ext_csd.enhanced_area_offset =
- (ext_csd[139] << 24) + (ext_csd[138] << 16) +
- (ext_csd[137] << 8) + ext_csd[136];
- if (mmc_card_blockaddr(card))
- card->ext_csd.enhanced_area_offset <<= 9;
- /*
- * calculate the enhanced data area size, in kilobytes
- */
- card->ext_csd.enhanced_area_size =
- (ext_csd[142] << 16) + (ext_csd[141] << 8) +
- ext_csd[140];
- card->ext_csd.enhanced_area_size *=
- (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
- card->ext_csd.enhanced_area_size <<= 9;
- } else {
- /*
- * If the enhanced area is not enabled, disable these
- * device attributes.
- */
- card->ext_csd.enhanced_area_offset = -EINVAL;
- card->ext_csd.enhanced_area_size = -EINVAL;
- }
+ mmc_manage_enhanced_area(card, ext_csd);
- /*
- * General purpose partition feature support --
- * If ext_csd has the size of general purpose partitions,
- * set size, part_cfg, partition name in mmc_part.
- */
- if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
- EXT_CSD_PART_SUPPORT_PART_EN) {
- if (card->ext_csd.enhanced_area_en != 1) {
- hc_erase_grp_sz =
- ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
- hc_wp_grp_sz =
- ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
-
- card->ext_csd.enhanced_area_en = 1;
- }
+ mmc_manage_gp_partitions(card, ext_csd);
- for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
- if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
- !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
- !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
- continue;
- part_size =
- (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
- << 16) +
- (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
- << 8) +
- ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
- part_size *= (size_t)(hc_erase_grp_sz *
- hc_wp_grp_sz);
- mmc_part_add(card, part_size << 19,
- EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
- "gp%d", idx, false,
- MMC_BLK_DATA_AREA_GP);
- }
- }
card->ext_csd.sec_trim_mult =
ext_csd[EXT_CSD_SEC_TRIM_MULT];
card->ext_csd.sec_erase_mult =
@@ -789,8 +814,8 @@ static int __mmc_select_powerclass(struct mmc_card *card,
ext_csd->raw_pwr_cl_200_360;
break;
default:
- pr_warning("%s: Voltage range not supported "
- "for power class.\n", mmc_hostname(host));
+ pr_warn("%s: Voltage range not supported for power class\n",
+ mmc_hostname(host));
return -EINVAL;
}
@@ -987,19 +1012,35 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
* 1.8V vccq at 3.3V core voltage (vcc) is not required
* in the JEDEC spec for DDR.
*
- * Do not force change in vccq since we are obviously
- * working and no change to vccq is needed.
+ * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
+ * host controller can support this, like some of the SDHCI
+ * controller which connect to an eMMC device. Some of these
+ * host controller still needs to use 1.8v vccq for supporting
+ * DDR mode.
+ *
+ * So the sequence will be:
+ * if (host and device can both support 1.2v IO)
+ * use 1.2v IO;
+ * else if (host and device can both support 1.8v IO)
+ * use 1.8v IO;
+ * so if host and device can only support 3.3v IO, this is the
+ * last choice.
*
* WARNING: eMMC rules are NOT the same as SD DDR
*/
- if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
- err = __mmc_set_signal_voltage(host,
- MMC_SIGNAL_VOLTAGE_120);
- if (err)
- return err;
- }
+ err = -EINVAL;
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+
+ if (err && (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
- mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
+ /* make sure vccq is 3.3v after switching disaster */
+ if (err)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
+
+ if (!err)
+ mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
return err;
}
@@ -1134,6 +1175,38 @@ bus_speed:
return err;
}
+const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE] = {
+ 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
+ 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
+ 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
+ 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
+ 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
+ 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
+ 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
+ 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
+};
+EXPORT_SYMBOL(tuning_blk_pattern_4bit);
+
+const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE] = {
+ 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
+ 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
+ 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
+ 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
+ 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
+ 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
+ 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
+ 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
+ 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
+ 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
+ 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
+ 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
+ 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
+};
+EXPORT_SYMBOL(tuning_blk_pattern_8bit);
+
/*
* Execute tuning sequence to seek the proper bus operating
* conditions for HS200 and HS400, which sends CMD21 to the device.
@@ -1272,6 +1345,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * handling only for cards supporting DSR and hosts requesting
+ * DSR configuration
+ */
+ if (card->csd.dsr_imp && host->dsr_req)
+ mmc_set_dsr(host);
+
+ /*
* Select card, as all following commands rely on that.
*/
if (!mmc_host_is_spi(host)) {
@@ -1308,7 +1388,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
* bit. This bit will be lost every time after a reset or power off.
*/
- if (card->ext_csd.enhanced_area_en ||
+ if (card->ext_csd.partition_setting_completed ||
(card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_ERASE_GROUP_DEF, 1,
@@ -1408,8 +1488,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if (err && err != -EBADMSG)
goto free_card;
if (err) {
- pr_warning("%s: Enabling HPI failed\n",
- mmc_hostname(card->host));
+ pr_warn("%s: Enabling HPI failed\n",
+ mmc_hostname(card->host));
err = 0;
} else
card->ext_csd.hpi_en = 1;
@@ -1430,9 +1510,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* Only if no error, cache is turned on successfully.
*/
if (err) {
- pr_warning("%s: Cache is supported, "
- "but failed to turn on (%d)\n",
- mmc_hostname(card->host), err);
+ pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
+ mmc_hostname(card->host), err);
card->ext_csd.cache_ctrl = 0;
err = 0;
} else {
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index f51b5ba3bbea..7911e0510a1d 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -93,6 +93,26 @@ int mmc_deselect_cards(struct mmc_host *host)
return _mmc_select_card(host, NULL);
}
+/*
+ * Write the value specified in the device tree or board code into the optional
+ * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
+ * drive strength of the DAT and CMD outputs. The actual meaning of a given
+ * value is hardware dependant.
+ * The presence of the DSR register can be determined from the CSD register,
+ * bit 76.
+ */
+int mmc_set_dsr(struct mmc_host *host)
+{
+ struct mmc_command cmd = {0};
+
+ cmd.opcode = MMC_SET_DSR;
+
+ cmd.arg = (host->dsr << 16) | 0xffff;
+ cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+}
+
int mmc_go_idle(struct mmc_host *host)
{
int err;
@@ -629,8 +649,8 @@ int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
int err;
if (!card->ext_csd.hpi) {
- pr_warning("%s: Card didn't support HPI command\n",
- mmc_hostname(card->host));
+ pr_warn("%s: Card didn't support HPI command\n",
+ mmc_hostname(card->host));
return -EINVAL;
}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 80ae9f4e0293..390dac665b2a 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -14,6 +14,7 @@
int mmc_select_card(struct mmc_card *card);
int mmc_deselect_cards(struct mmc_host *host);
+int mmc_set_dsr(struct mmc_host *host);
int mmc_go_idle(struct mmc_host *host);
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
int mmc_all_send_cid(struct mmc_host *host, u32 *cid);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 0c44510bf717..d90a6de7901d 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -127,6 +127,7 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
+ csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
@@ -228,8 +229,8 @@ static int mmc_read_ssr(struct mmc_card *card)
u32 *ssr;
if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
- pr_warning("%s: card lacks mandatory SD Status "
- "function.\n", mmc_hostname(card->host));
+ pr_warn("%s: card lacks mandatory SD Status function\n",
+ mmc_hostname(card->host));
return 0;
}
@@ -239,8 +240,8 @@ static int mmc_read_ssr(struct mmc_card *card)
err = mmc_app_sd_status(card, ssr);
if (err) {
- pr_warning("%s: problem reading SD Status "
- "register.\n", mmc_hostname(card->host));
+ pr_warn("%s: problem reading SD Status register\n",
+ mmc_hostname(card->host));
err = 0;
goto out;
}
@@ -264,8 +265,8 @@ static int mmc_read_ssr(struct mmc_card *card)
card->ssr.erase_offset = eo * 1000;
}
} else {
- pr_warning("%s: SD Status: Invalid Allocation Unit size.\n",
- mmc_hostname(card->host));
+ pr_warn("%s: SD Status: Invalid Allocation Unit size\n",
+ mmc_hostname(card->host));
}
}
out:
@@ -285,8 +286,7 @@ static int mmc_read_switch(struct mmc_card *card)
return 0;
if (!(card->csd.cmdclass & CCC_SWITCH)) {
- pr_warning("%s: card lacks mandatory switch "
- "function, performance might suffer.\n",
+ pr_warn("%s: card lacks mandatory switch function, performance might suffer\n",
mmc_hostname(card->host));
return 0;
}
@@ -315,7 +315,7 @@ static int mmc_read_switch(struct mmc_card *card)
if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
goto out;
- pr_warning("%s: problem reading Bus Speed modes.\n",
+ pr_warn("%s: problem reading Bus Speed modes\n",
mmc_hostname(card->host));
err = 0;
@@ -371,8 +371,7 @@ int mmc_sd_switch_hs(struct mmc_card *card)
goto out;
if ((status[16] & 0xF) != 1) {
- pr_warning("%s: Problem switching card "
- "into high-speed mode!\n",
+ pr_warn("%s: Problem switching card into high-speed mode!\n",
mmc_hostname(card->host));
err = 0;
} else {
@@ -439,7 +438,7 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
return err;
if ((status[15] & 0xF) != drive_strength) {
- pr_warning("%s: Problem setting drive strength!\n",
+ pr_warn("%s: Problem setting drive strength!\n",
mmc_hostname(card->host));
return 0;
}
@@ -517,7 +516,7 @@ static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
return err;
if ((status[16] & 0xF) != card->sd_bus_speed)
- pr_warning("%s: Problem setting bus speed mode!\n",
+ pr_warn("%s: Problem setting bus speed mode!\n",
mmc_hostname(card->host));
else {
mmc_set_timing(card->host, timing);
@@ -597,7 +596,7 @@ static int sd_set_current_limit(struct mmc_card *card, u8 *status)
return err;
if (((status[15] >> 4) & 0x0F) != current_limit)
- pr_warning("%s: Problem setting current limit!\n",
+ pr_warn("%s: Problem setting current limit!\n",
mmc_hostname(card->host));
}
@@ -726,8 +725,7 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
try_again:
if (!retries) {
ocr &= ~SD_OCR_S18R;
- pr_warning("%s: Skipping voltage switch\n",
- mmc_hostname(host));
+ pr_warn("%s: Skipping voltage switch\n", mmc_hostname(host));
}
/*
@@ -871,9 +869,7 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
}
if (ro < 0) {
- pr_warning("%s: host does not "
- "support reading read-only "
- "switch. assuming write-enable.\n",
+ pr_warn("%s: host does not support reading read-only switch, assuming write-enable\n",
mmc_hostname(host));
} else if (ro > 0) {
mmc_card_set_readonly(card);
@@ -954,6 +950,13 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * handling only for cards supporting DSR and hosts requesting
+ * DSR configuration
+ */
+ if (card->csd.dsr_imp && host->dsr_req)
+ mmc_set_dsr(host);
+
+ /*
* Select card, as all following commands rely on that.
*/
if (!mmc_host_is_spi(host)) {
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index e636d9e99e4a..2439e717655b 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -216,8 +216,8 @@ static int sdio_enable_wide(struct mmc_card *card)
return ret;
if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED)
- pr_warning("%s: SDIO_CCCR_IF is invalid: 0x%02x\n",
- mmc_hostname(card->host), ctrl);
+ pr_warn("%s: SDIO_CCCR_IF is invalid: 0x%02x\n",
+ mmc_hostname(card->host), ctrl);
/* set as 4-bit bus width */
ctrl &= ~SDIO_BUS_WIDTH_MASK;
@@ -605,8 +605,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
try_again:
if (!retries) {
- pr_warning("%s: Skipping voltage switch\n",
- mmc_hostname(host));
+ pr_warn("%s: Skipping voltage switch\n", mmc_hostname(host));
ocr &= ~R4_18V_PRESENT;
}
@@ -992,8 +991,16 @@ static int mmc_sdio_resume(struct mmc_host *host)
}
}
- if (!err && host->sdio_irqs)
- wake_up_process(host->sdio_irq_thread);
+ if (!err && host->sdio_irqs) {
+ if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
+ wake_up_process(host->sdio_irq_thread);
+ } else if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
+ host->ops->enable_sdio_irq(host, 1);
+ mmc_host_clk_release(host);
+ }
+ }
+
mmc_release_host(host);
host->pm_flags &= ~MMC_PM_KEEP_POWER;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 65cf7a7e05ea..6da97b170563 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -178,8 +178,8 @@ static int sdio_bus_remove(struct device *dev)
drv->remove(func);
if (func->irq_handler) {
- pr_warning("WARNING: driver %s did not remove "
- "its interrupt handler!\n", drv->name);
+ pr_warn("WARNING: driver %s did not remove its interrupt handler!\n",
+ drv->name);
sdio_claim_host(func);
sdio_release_irq(func);
sdio_release_host(func);
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 5cc13c8d35bb..09cc67d028f0 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -69,16 +69,15 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
if (pending & (1 << i)) {
func = card->sdio_func[i - 1];
if (!func) {
- pr_warning("%s: pending IRQ for "
- "non-existent function\n",
+ pr_warn("%s: pending IRQ for non-existent function\n",
mmc_card_id(card));
ret = -EINVAL;
} else if (func->irq_handler) {
func->irq_handler(func);
count++;
} else {
- pr_warning("%s: pending IRQ with no handler\n",
- sdio_func_id(func));
+ pr_warn("%s: pending IRQ with no handler\n",
+ sdio_func_id(func));
ret = -EINVAL;
}
}
@@ -208,7 +207,7 @@ static int sdio_card_irq_get(struct mmc_card *card)
host->sdio_irqs--;
return err;
}
- } else {
+ } else if (host->caps & MMC_CAP_SDIO_IRQ) {
mmc_host_clk_hold(host);
host->ops->enable_sdio_irq(host, 1);
mmc_host_clk_release(host);
@@ -229,7 +228,7 @@ static int sdio_card_irq_put(struct mmc_card *card)
if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
atomic_set(&host->sdio_irq_thread_abort, 1);
kthread_stop(host->sdio_irq_thread);
- } else {
+ } else if (host->caps & MMC_CAP_SDIO_IRQ) {
mmc_host_clk_hold(host);
host->ops->enable_sdio_irq(host, 0);
mmc_host_clk_release(host);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 5f89cb83d5f0..69bbf2adb329 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -221,8 +221,6 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
ctx->override_cd_active_level = true;
ctx->cd_gpio = gpio_to_desc(gpio);
- mmc_gpiod_request_cd_irq(host);
-
return 0;
}
EXPORT_SYMBOL(mmc_gpio_request_cd);
@@ -283,6 +281,8 @@ EXPORT_SYMBOL(mmc_gpio_free_cd);
* @idx: index of the GPIO to obtain in the consumer
* @override_active_level: ignore %GPIO_ACTIVE_LOW flag
* @debounce: debounce time in microseconds
+ * @gpio_invert: will return whether the GPIO line is inverted or not, set
+ * to NULL to ignore
*
* Use this function in place of mmc_gpio_request_cd() to use the GPIO
* descriptor API. Note that it is paired with mmc_gpiod_free_cd() not
@@ -293,7 +293,7 @@ EXPORT_SYMBOL(mmc_gpio_free_cd);
*/
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
- unsigned int debounce)
+ unsigned int debounce, bool *gpio_invert)
{
struct mmc_gpio *ctx;
struct gpio_desc *desc;
@@ -308,20 +308,19 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
if (!con_id)
con_id = ctx->cd_label;
- desc = devm_gpiod_get_index(host->parent, con_id, idx);
+ desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN);
if (IS_ERR(desc))
return PTR_ERR(desc);
- ret = gpiod_direction_input(desc);
- if (ret < 0)
- return ret;
-
if (debounce) {
ret = gpiod_set_debounce(desc, debounce);
if (ret < 0)
return ret;
}
+ if (gpio_invert)
+ *gpio_invert = !gpiod_is_active_low(desc);
+
ctx->override_cd_active_level = override_active_level;
ctx->cd_gpio = desc;
@@ -330,6 +329,59 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
EXPORT_SYMBOL(mmc_gpiod_request_cd);
/**
+ * mmc_gpiod_request_ro - request a gpio descriptor for write protection
+ * @host: mmc host
+ * @con_id: function within the GPIO consumer
+ * @idx: index of the GPIO to obtain in the consumer
+ * @override_active_level: ignore %GPIO_ACTIVE_LOW flag
+ * @debounce: debounce time in microseconds
+ * @gpio_invert: will return whether the GPIO line is inverted or not,
+ * set to NULL to ignore
+ *
+ * Use this function in place of mmc_gpio_request_ro() to use the GPIO
+ * descriptor API. Note that it is paired with mmc_gpiod_free_ro() not
+ * mmc_gpio_free_ro().
+ *
+ * Returns zero on success, else an error.
+ */
+int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
+ unsigned int idx, bool override_active_level,
+ unsigned int debounce, bool *gpio_invert)
+{
+ struct mmc_gpio *ctx;
+ struct gpio_desc *desc;
+ int ret;
+
+ ret = mmc_gpio_alloc(host);
+ if (ret < 0)
+ return ret;
+
+ ctx = host->slot.handler_priv;
+
+ if (!con_id)
+ con_id = ctx->ro_label;
+
+ desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ if (debounce) {
+ ret = gpiod_set_debounce(desc, debounce);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (gpio_invert)
+ *gpio_invert = !gpiod_is_active_low(desc);
+
+ ctx->override_ro_active_level = override_active_level;
+ ctx->ro_gpio = desc;
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_gpiod_request_ro);
+
+/**
* mmc_gpiod_free_cd - free the card-detection gpio descriptor
* @host: mmc host
*
@@ -348,7 +400,7 @@ void mmc_gpiod_free_cd(struct mmc_host *host)
host->slot.cd_irq = -EINVAL;
}
- devm_gpiod_put(&host->class_dev, ctx->cd_gpio);
+ devm_gpiod_put(host->parent, ctx->cd_gpio);
ctx->cd_gpio = NULL;
}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 451135822464..13860656104b 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -14,6 +14,17 @@ config MMC_ARMMMCI
If unsure, say N.
+config MMC_QCOM_DML
+ tristate "Qualcomm Data Mover for SD Card Controller"
+ depends on MMC_ARMMMCI && QCOM_BAM_DMA
+ default y
+ help
+ This selects the Qualcomm Data Mover lite/local on SD Card controller.
+ This option will enable the dma to work correctly, if you are using
+ Qcom SOCs and MMC, you would probably need this option to get DMA working.
+
+ if unsure, say N.
+
config MMC_PXA
tristate "Intel PXA25x/26x/27x Multimedia Card Interface support"
depends on ARCH_PXA
@@ -568,7 +579,8 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
config MMC_DW
tristate "Synopsys DesignWare Memory Card Interface"
- depends on ARC || ARM
+ depends on HAS_DMA
+ depends on ARC || ARM || MIPS || COMPILE_TEST
help
This selects support for the Synopsys DesignWare Mobile Storage IP
block, this provides host support for SD and MMC interfaces, in both
@@ -626,6 +638,15 @@ config MMC_DW_PCI
If unsure, say N.
+config MMC_DW_ROCKCHIP
+ tristate "Rockchip specific extensions for Synopsys DW Memory Card Interface"
+ depends on MMC_DW && ARCH_ROCKCHIP
+ select MMC_DW_PLTFM
+ help
+ This selects support for Rockchip SoC specific extensions to the
+ Synopsys DesignWare Memory Card Interface driver. Select this option
+ for platforms based on RK3066, RK3188 and RK3288 SoC's.
+
config MMC_SH_MMCIF
tristate "SuperH Internal MMCIF support"
depends on MMC_BLOCK && HAS_DMA
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index f211eede8db5..b09ecfb88269 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
+obj-$(CONFIG_MMC_QCOM_DML) += mmci_qcom_dml.o
obj-$(CONFIG_MMC_PXA) += pxamci.o
obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
@@ -45,6 +46,7 @@ obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o
obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o
obj-$(CONFIG_MMC_DW_K3) += dw_mmc-k3.o
obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o
+obj-$(CONFIG_MMC_DW_ROCKCHIP) += dw_mmc-rockchip.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
obj-$(CONFIG_MMC_VUB300) += vub300.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index bb585d940901..77250d4b1979 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -17,6 +17,7 @@
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -2195,7 +2196,8 @@ static int __init atmci_init_slot(struct atmel_mci *host,
/* Assume card is present initially */
set_bit(ATMCI_CARD_PRESENT, &slot->flags);
if (gpio_is_valid(slot->detect_pin)) {
- if (gpio_request(slot->detect_pin, "mmc_detect")) {
+ if (devm_gpio_request(&host->pdev->dev, slot->detect_pin,
+ "mmc_detect")) {
dev_dbg(&mmc->class_dev, "no detect pin available\n");
slot->detect_pin = -EBUSY;
} else if (gpio_get_value(slot->detect_pin) ^
@@ -2208,7 +2210,8 @@ static int __init atmci_init_slot(struct atmel_mci *host,
mmc->caps |= MMC_CAP_NEEDS_POLL;
if (gpio_is_valid(slot->wp_pin)) {
- if (gpio_request(slot->wp_pin, "mmc_wp")) {
+ if (devm_gpio_request(&host->pdev->dev, slot->wp_pin,
+ "mmc_wp")) {
dev_dbg(&mmc->class_dev, "no WP pin available\n");
slot->wp_pin = -EBUSY;
}
@@ -2232,7 +2235,6 @@ static int __init atmci_init_slot(struct atmel_mci *host,
dev_dbg(&mmc->class_dev,
"could not request IRQ %d for detect pin\n",
gpio_to_irq(slot->detect_pin));
- gpio_free(slot->detect_pin);
slot->detect_pin = -EBUSY;
}
}
@@ -2242,7 +2244,7 @@ static int __init atmci_init_slot(struct atmel_mci *host,
return 0;
}
-static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
+static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
unsigned int id)
{
/* Debugfs stuff is cleaned up by mmc core */
@@ -2257,10 +2259,7 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
free_irq(gpio_to_irq(pin), slot);
del_timer_sync(&slot->detect_timer);
- gpio_free(pin);
}
- if (gpio_is_valid(slot->wp_pin))
- gpio_free(slot->wp_pin);
slot->host->slot[id] = NULL;
mmc_free_host(slot->mmc);
@@ -2344,6 +2343,7 @@ static void __init atmci_get_cap(struct atmel_mci *host)
/* keep only major version number */
switch (version & 0xf00) {
+ case 0x600:
case 0x500:
host->caps.has_odd_clk_div = 1;
case 0x400:
@@ -2377,7 +2377,7 @@ static int __init atmci_probe(struct platform_device *pdev)
struct resource *regs;
unsigned int nr_slots;
int irq;
- int ret;
+ int ret, i;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
@@ -2395,7 +2395,7 @@ static int __init atmci_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL);
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
@@ -2403,20 +2403,18 @@ static int __init atmci_probe(struct platform_device *pdev)
spin_lock_init(&host->lock);
INIT_LIST_HEAD(&host->queue);
- host->mck = clk_get(&pdev->dev, "mci_clk");
- if (IS_ERR(host->mck)) {
- ret = PTR_ERR(host->mck);
- goto err_clk_get;
- }
+ host->mck = devm_clk_get(&pdev->dev, "mci_clk");
+ if (IS_ERR(host->mck))
+ return PTR_ERR(host->mck);
- ret = -ENOMEM;
- host->regs = ioremap(regs->start, resource_size(regs));
+ host->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
if (!host->regs)
- goto err_ioremap;
+ return -ENOMEM;
ret = clk_prepare_enable(host->mck);
if (ret)
- goto err_request_irq;
+ return ret;
+
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
host->bus_hz = clk_get_rate(host->mck);
clk_disable_unprepare(host->mck);
@@ -2427,7 +2425,7 @@ static int __init atmci_probe(struct platform_device *pdev)
ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
if (ret)
- goto err_request_irq;
+ return ret;
/* Get MCI capabilities and set operations according to it */
atmci_get_cap(host);
@@ -2485,7 +2483,7 @@ static int __init atmci_probe(struct platform_device *pdev)
if (!host->buffer) {
ret = -ENOMEM;
dev_err(&pdev->dev, "buffer allocation failed\n");
- goto err_init_slot;
+ goto err_dma_alloc;
}
}
@@ -2495,16 +2493,16 @@ static int __init atmci_probe(struct platform_device *pdev)
return 0;
+err_dma_alloc:
+ for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
+ if (host->slot[i])
+ atmci_cleanup_slot(host->slot[i], i);
+ }
err_init_slot:
+ del_timer_sync(&host->timer);
if (host->dma.chan)
dma_release_channel(host->dma.chan);
free_irq(irq, host);
-err_request_irq:
- iounmap(host->regs);
-err_ioremap:
- clk_put(host->mck);
-err_clk_get:
- kfree(host);
return ret;
}
@@ -2528,14 +2526,11 @@ static int __exit atmci_remove(struct platform_device *pdev)
atmci_readl(host, ATMCI_SR);
clk_disable_unprepare(host->mck);
+ del_timer_sync(&host->timer);
if (host->dma.chan)
dma_release_channel(host->dma.chan);
free_irq(platform_get_irq(pdev, 0), host);
- iounmap(host->regs);
-
- clk_put(host->mck);
- kfree(host);
return 0;
}
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 9c9f6af29251..725f6a6fd89b 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1028,9 +1028,12 @@ static int au1xmmc_probe(struct platform_device *pdev)
host->clk = clk_get(&pdev->dev, ALCHEMY_PERIPH_CLK);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "cannot find clock\n");
+ ret = PTR_ERR(host->clk);
goto out_irq;
}
- if (clk_prepare_enable(host->clk)) {
+
+ ret = clk_prepare_enable(host->clk);
+ if (ret) {
dev_err(&pdev->dev, "cannot enable clock\n");
goto out_clk;
}
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c
index 6ada1b36685b..4c69fbd29811 100644
--- a/drivers/mmc/host/dw_mmc-pci.c
+++ b/drivers/mmc/host/dw_mmc-pci.c
@@ -95,9 +95,6 @@ static int dw_mci_pci_resume(struct device *dev)
return dw_mci_resume(host);
}
-#else
-#define dw_mci_pci_suspend NULL
-#define dw_mci_pci_resume NULL
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(dw_mci_pci_pmops, dw_mci_pci_suspend, dw_mci_pci_resume);
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index d4a47a9f5584..8b6572162ed9 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -21,6 +21,7 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/dw_mmc.h>
#include <linux/of.h>
+#include <linux/clk.h>
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
@@ -30,10 +31,6 @@ static void dw_mci_pltfm_prepare_command(struct dw_mci *host, u32 *cmdr)
*cmdr |= SDMMC_CMD_USE_HOLD_REG;
}
-static const struct dw_mci_drv_data rockchip_drv_data = {
- .prepare_command = dw_mci_pltfm_prepare_command,
-};
-
static const struct dw_mci_drv_data socfpga_drv_data = {
.prepare_command = dw_mci_pltfm_prepare_command,
};
@@ -84,9 +81,6 @@ static int dw_mci_pltfm_resume(struct device *dev)
return dw_mci_resume(host);
}
-#else
-#define dw_mci_pltfm_suspend NULL
-#define dw_mci_pltfm_resume NULL
#endif /* CONFIG_PM_SLEEP */
SIMPLE_DEV_PM_OPS(dw_mci_pltfm_pmops, dw_mci_pltfm_suspend, dw_mci_pltfm_resume);
@@ -94,8 +88,6 @@ EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops);
static const struct of_device_id dw_mci_pltfm_match[] = {
{ .compatible = "snps,dw-mshc", },
- { .compatible = "rockchip,rk2928-dw-mshc",
- .data = &rockchip_drv_data },
{ .compatible = "altr,socfpga-dw-mshc",
.data = &socfpga_drv_data },
{},
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
new file mode 100644
index 000000000000..f0c2cb1a210d
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/of_address.h>
+
+#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
+
+#define RK3288_CLKGEN_DIV 2
+
+static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr)
+{
+ *cmdr |= SDMMC_CMD_USE_HOLD_REG;
+}
+
+static int dw_mci_rk3288_setup_clock(struct dw_mci *host)
+{
+ host->bus_hz /= RK3288_CLKGEN_DIV;
+
+ return 0;
+}
+
+static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
+{
+ int ret;
+ unsigned int cclkin;
+ u32 bus_hz;
+
+ /*
+ * cclkin: source clock of mmc controller
+ * bus_hz: card interface clock generated by CLKGEN
+ * bus_hz = cclkin / RK3288_CLKGEN_DIV
+ * ios->clock = (div == 0) ? bus_hz : (bus_hz / (2 * div))
+ *
+ * Note: div can only be 0 or 1
+ * if DDR50 8bit mode(only emmc work in 8bit mode),
+ * div must be set 1
+ */
+ if (ios->bus_width == MMC_BUS_WIDTH_8 &&
+ ios->timing == MMC_TIMING_MMC_DDR52)
+ cclkin = 2 * ios->clock * RK3288_CLKGEN_DIV;
+ else
+ cclkin = ios->clock * RK3288_CLKGEN_DIV;
+
+ ret = clk_set_rate(host->ciu_clk, cclkin);
+ if (ret)
+ dev_warn(host->dev, "failed to set rate %uHz\n", ios->clock);
+
+ bus_hz = clk_get_rate(host->ciu_clk) / RK3288_CLKGEN_DIV;
+ if (bus_hz != host->bus_hz) {
+ host->bus_hz = bus_hz;
+ /* force dw_mci_setup_bus() */
+ host->current_speed = 0;
+ }
+}
+
+static const struct dw_mci_drv_data rk2928_drv_data = {
+ .prepare_command = dw_mci_rockchip_prepare_command,
+};
+
+static const struct dw_mci_drv_data rk3288_drv_data = {
+ .prepare_command = dw_mci_rockchip_prepare_command,
+ .set_ios = dw_mci_rk3288_set_ios,
+ .setup_clock = dw_mci_rk3288_setup_clock,
+};
+
+static const struct of_device_id dw_mci_rockchip_match[] = {
+ { .compatible = "rockchip,rk2928-dw-mshc",
+ .data = &rk2928_drv_data },
+ { .compatible = "rockchip,rk3288-dw-mshc",
+ .data = &rk3288_drv_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_mci_rockchip_match);
+
+static int dw_mci_rockchip_probe(struct platform_device *pdev)
+{
+ const struct dw_mci_drv_data *drv_data;
+ const struct of_device_id *match;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ match = of_match_node(dw_mci_rockchip_match, pdev->dev.of_node);
+ drv_data = match->data;
+
+ return dw_mci_pltfm_register(pdev, drv_data);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dw_mci_rockchip_suspend(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+
+ return dw_mci_suspend(host);
+}
+
+static int dw_mci_rockchip_resume(struct device *dev)
+{
+ struct dw_mci *host = dev_get_drvdata(dev);
+
+ return dw_mci_resume(host);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(dw_mci_rockchip_pmops,
+ dw_mci_rockchip_suspend,
+ dw_mci_rockchip_resume);
+
+static struct platform_driver dw_mci_rockchip_pltfm_driver = {
+ .probe = dw_mci_rockchip_probe,
+ .remove = __exit_p(dw_mci_pltfm_remove),
+ .driver = {
+ .name = "dwmmc_rockchip",
+ .of_match_table = dw_mci_rockchip_match,
+ .pm = &dw_mci_rockchip_pmops,
+ },
+};
+
+module_platform_driver(dw_mci_rockchip_pltfm_driver);
+
+MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip Specific DW-MSHC Driver Extension");
+MODULE_ALIAS("platform:dwmmc-rockchip");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 8f216edbdf08..69f0cc68d5b2 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -29,6 +29,7 @@
#include <linux/irq.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/dw_mmc.h>
#include <linux/bitops.h>
@@ -81,36 +82,6 @@ struct idmac_desc {
};
#endif /* CONFIG_MMC_DW_IDMAC */
-static const u8 tuning_blk_pattern_4bit[] = {
- 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
- 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
- 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
- 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
- 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
- 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
- 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
- 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
-};
-
-static const u8 tuning_blk_pattern_8bit[] = {
- 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
- 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
- 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
- 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
- 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
- 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
- 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
- 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
- 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
- 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
- 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
- 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
- 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
- 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
- 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
- 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
-};
-
static bool dw_mci_reset(struct dw_mci *host);
#if defined(CONFIG_DEBUG_FS)
@@ -234,10 +205,13 @@ err:
}
#endif /* defined(CONFIG_DEBUG_FS) */
+static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
+
static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
{
struct mmc_data *data;
struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
u32 cmdr;
cmd->error = -EINPROGRESS;
@@ -253,6 +227,34 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
+ if (cmd->opcode == SD_SWITCH_VOLTAGE) {
+ u32 clk_en_a;
+
+ /* Special bit makes CMD11 not die */
+ cmdr |= SDMMC_CMD_VOLT_SWITCH;
+
+ /* Change state to continue to handle CMD11 weirdness */
+ WARN_ON(slot->host->state != STATE_SENDING_CMD);
+ slot->host->state = STATE_SENDING_CMD11;
+
+ /*
+ * We need to disable low power mode (automatic clock stop)
+ * while doing voltage switch so we don't confuse the card,
+ * since stopping the clock is a specific part of the UHS
+ * voltage change dance.
+ *
+ * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
+ * unconditionally turned back on in dw_mci_setup_bus() if it's
+ * ever called with a non-zero clock. That shouldn't happen
+ * until the voltage change is all done.
+ */
+ clk_en_a = mci_readl(host, CLKENA);
+ clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
+ mci_writel(host, CLKENA, clk_en_a);
+ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
+ SDMMC_CMD_PRV_DAT_WAIT, 0);
+ }
+
if (cmd->flags & MMC_RSP_PRESENT) {
/* We expect a response, so set this bit */
cmdr |= SDMMC_CMD_RESP_EXP;
@@ -775,11 +777,15 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
unsigned int clock = slot->clock;
u32 div;
u32 clk_en_a;
+ u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
+
+ /* We must continue to set bit 28 in CMD until the change is complete */
+ if (host->state == STATE_WAITING_CMD11_DONE)
+ sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
if (!clock) {
mci_writel(host, CLKENA, 0);
- mci_send_cmd(slot,
- SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+ mci_send_cmd(slot, sdmmc_cmd_bits, 0);
} else if (clock != host->current_speed || force_clkinit) {
div = host->bus_hz / clock;
if (host->bus_hz % clock && host->bus_hz > clock)
@@ -803,15 +809,13 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
mci_writel(host, CLKSRC, 0);
/* inform CIU */
- mci_send_cmd(slot,
- SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+ mci_send_cmd(slot, sdmmc_cmd_bits, 0);
/* set clock to desired speed */
mci_writel(host, CLKDIV, div);
/* inform CIU */
- mci_send_cmd(slot,
- SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+ mci_send_cmd(slot, sdmmc_cmd_bits, 0);
/* enable clock; only low power if no SDIO */
clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
@@ -820,8 +824,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
mci_writel(host, CLKENA, clk_en_a);
/* inform CIU */
- mci_send_cmd(slot,
- SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+ mci_send_cmd(slot, sdmmc_cmd_bits, 0);
/* keep the clock with reflecting clock dividor */
slot->__clk_old = clock << div;
@@ -897,6 +900,17 @@ static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
slot->mrq = mrq;
+ if (host->state == STATE_WAITING_CMD11_DONE) {
+ dev_warn(&slot->mmc->class_dev,
+ "Voltage change didn't complete\n");
+ /*
+ * this case isn't expected to happen, so we can
+ * either crash here or just try to continue on
+ * in the closest possible state
+ */
+ host->state = STATE_IDLE;
+ }
+
if (host->state == STATE_IDLE) {
host->state = STATE_SENDING_CMD;
dw_mci_start_request(host, slot);
@@ -936,6 +950,7 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct dw_mci_slot *slot = mmc_priv(mmc);
const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
u32 regs;
+ int ret;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_4:
@@ -972,14 +987,43 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* Slot specific timing and width adjustment */
dw_mci_setup_bus(slot, false);
+ if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
+ slot->host->state = STATE_IDLE;
+
switch (ios->power_mode) {
case MMC_POWER_UP:
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
+ ios->vdd);
+ if (ret) {
+ dev_err(slot->host->dev,
+ "failed to enable vmmc regulator\n");
+ /*return, if failed turn on vmmc*/
+ return;
+ }
+ }
+ if (!IS_ERR(mmc->supply.vqmmc) && !slot->host->vqmmc_enabled) {
+ ret = regulator_enable(mmc->supply.vqmmc);
+ if (ret < 0)
+ dev_err(slot->host->dev,
+ "failed to enable vqmmc regulator\n");
+ else
+ slot->host->vqmmc_enabled = true;
+ }
set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
regs = mci_readl(slot->host, PWREN);
regs |= (1 << slot->id);
mci_writel(slot->host, PWREN, regs);
break;
case MMC_POWER_OFF:
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
+ if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) {
+ regulator_disable(mmc->supply.vqmmc);
+ slot->host->vqmmc_enabled = false;
+ }
+
regs = mci_readl(slot->host, PWREN);
regs &= ~(1 << slot->id);
mci_writel(slot->host, PWREN, regs);
@@ -989,6 +1033,59 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
+static int dw_mci_card_busy(struct mmc_host *mmc)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ u32 status;
+
+ /*
+ * Check the busy bit which is low when DAT[3:0]
+ * (the data lines) are 0000
+ */
+ status = mci_readl(slot->host, STATUS);
+
+ return !!(status & SDMMC_STATUS_BUSY);
+}
+
+static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+ u32 uhs;
+ u32 v18 = SDMMC_UHS_18V << slot->id;
+ int min_uv, max_uv;
+ int ret;
+
+ /*
+ * Program the voltage. Note that some instances of dw_mmc may use
+ * the UHS_REG for this. For other instances (like exynos) the UHS_REG
+ * does no harm but you need to set the regulator directly. Try both.
+ */
+ uhs = mci_readl(host, UHS_REG);
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ min_uv = 2700000;
+ max_uv = 3600000;
+ uhs &= ~v18;
+ } else {
+ min_uv = 1700000;
+ max_uv = 1950000;
+ uhs |= v18;
+ }
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
+
+ if (ret) {
+ dev_err(&mmc->class_dev,
+ "Regulator set error %d: %d - %d\n",
+ ret, min_uv, max_uv);
+ return ret;
+ }
+ }
+ mci_writel(host, UHS_REG, uhs);
+
+ return 0;
+}
+
static int dw_mci_get_ro(struct mmc_host *mmc)
{
int read_only;
@@ -1131,6 +1228,9 @@ static const struct mmc_host_ops dw_mci_ops = {
.get_cd = dw_mci_get_cd,
.enable_sdio_irq = dw_mci_enable_sdio_irq,
.execute_tuning = dw_mci_execute_tuning,
+ .card_busy = dw_mci_card_busy,
+ .start_signal_voltage_switch = dw_mci_switch_voltage,
+
};
static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
@@ -1154,7 +1254,11 @@ static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
dw_mci_start_request(host, slot);
} else {
dev_vdbg(host->dev, "list empty\n");
- host->state = STATE_IDLE;
+
+ if (host->state == STATE_SENDING_CMD11)
+ host->state = STATE_WAITING_CMD11_DONE;
+ else
+ host->state = STATE_IDLE;
}
spin_unlock(&host->lock);
@@ -1265,8 +1369,10 @@ static void dw_mci_tasklet_func(unsigned long priv)
switch (state) {
case STATE_IDLE:
+ case STATE_WAITING_CMD11_DONE:
break;
+ case STATE_SENDING_CMD11:
case STATE_SENDING_CMD:
if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
&host->pending_events))
@@ -1299,6 +1405,14 @@ static void dw_mci_tasklet_func(unsigned long priv)
/* fall through */
case STATE_SENDING_DATA:
+ /*
+ * We could get a data error and never a transfer
+ * complete so we'd better check for it here.
+ *
+ * Note that we don't really care if we also got a
+ * transfer complete; stopping the DMA and sending an
+ * abort won't hurt.
+ */
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
dw_mci_stop_dma(host);
@@ -1312,7 +1426,29 @@ static void dw_mci_tasklet_func(unsigned long priv)
break;
set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
+
+ /*
+ * Handle an EVENT_DATA_ERROR that might have shown up
+ * before the transfer completed. This might not have
+ * been caught by the check above because the interrupt
+ * could have gone off between the previous check and
+ * the check for transfer complete.
+ *
+ * Technically this ought not be needed assuming we
+ * get a DATA_COMPLETE eventually (we'll notice the
+ * error and end the request), but it shouldn't hurt.
+ *
+ * This has the advantage of sending the stop command.
+ */
+ if (test_and_clear_bit(EVENT_DATA_ERROR,
+ &host->pending_events)) {
+ dw_mci_stop_dma(host);
+ send_stop_abort(host, data);
+ state = STATE_DATA_ERROR;
+ break;
+ }
prev_state = state = STATE_DATA_BUSY;
+
/* fall through */
case STATE_DATA_BUSY:
@@ -1335,6 +1471,22 @@ static void dw_mci_tasklet_func(unsigned long priv)
/* stop command for open-ended transfer*/
if (data->stop)
send_stop_abort(host, data);
+ } else {
+ /*
+ * If we don't have a command complete now we'll
+ * never get one since we just reset everything;
+ * better end the request.
+ *
+ * If we do have a command complete we'll fall
+ * through to the SENDING_STOP command and
+ * everything will be peachy keen.
+ */
+ if (!test_bit(EVENT_CMD_COMPLETE,
+ &host->pending_events)) {
+ host->cmd = NULL;
+ dw_mci_request_end(host, mrq);
+ goto unlock;
+ }
}
/*
@@ -1821,6 +1973,14 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending) {
+ /* Check volt switch first, since it can look like an error */
+ if ((host->state == STATE_SENDING_CMD11) &&
+ (pending & SDMMC_INT_VOLT_SWITCH)) {
+ mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
+ pending &= ~SDMMC_INT_VOLT_SWITCH;
+ dw_mci_cmd_interrupt(host, pending);
+ }
+
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
host->cmd_status = pending;
@@ -1926,7 +2086,9 @@ static void dw_mci_work_routine_card(struct work_struct *work)
switch (host->state) {
case STATE_IDLE:
+ case STATE_WAITING_CMD11_DONE:
break;
+ case STATE_SENDING_CMD11:
case STATE_SENDING_CMD:
mrq->cmd->error = -ENOMEDIUM;
if (!mrq->data)
@@ -2028,10 +2190,6 @@ static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
{
return 0;
}
-static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
-{
- return NULL;
-}
#endif /* CONFIG_OF */
static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
@@ -2064,7 +2222,13 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
mmc->f_max = freq[1];
}
- mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ /*if there are external regulators, get them*/
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret == -EPROBE_DEFER)
+ goto err_host_allocated;
+
+ if (!mmc->ocr_avail)
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
if (host->pdata->caps)
mmc->caps = host->pdata->caps;
@@ -2085,7 +2249,9 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
if (host->pdata->caps2)
mmc->caps2 = host->pdata->caps2;
- mmc_of_parse(mmc);
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto err_host_allocated;
if (host->pdata->blk_settings) {
mmc->max_segs = host->pdata->blk_settings->max_segs;
@@ -2117,7 +2283,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
ret = mmc_add_host(mmc);
if (ret)
- goto err_setup_bus;
+ goto err_host_allocated;
#if defined(CONFIG_DEBUG_FS)
dw_mci_init_debugfs(slot);
@@ -2128,9 +2294,9 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
return 0;
-err_setup_bus:
+err_host_allocated:
mmc_free_host(mmc);
- return -EINVAL;
+ return ret;
}
static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
@@ -2423,24 +2589,6 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
- if (IS_ERR(host->vmmc)) {
- ret = PTR_ERR(host->vmmc);
- if (ret == -EPROBE_DEFER)
- goto err_clk_ciu;
-
- dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
- host->vmmc = NULL;
- } else {
- ret = regulator_enable(host->vmmc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(host->dev,
- "regulator_enable fail: %d\n", ret);
- goto err_clk_ciu;
- }
- }
-
host->quirks = host->pdata->quirks;
spin_lock_init(&host->lock);
@@ -2584,8 +2732,6 @@ err_workqueue:
err_dmaunmap:
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
- if (host->vmmc)
- regulator_disable(host->vmmc);
err_clk_ciu:
if (!IS_ERR(host->ciu_clk))
@@ -2621,9 +2767,6 @@ void dw_mci_remove(struct dw_mci *host)
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
- if (host->vmmc)
- regulator_disable(host->vmmc);
-
if (!IS_ERR(host->ciu_clk))
clk_disable_unprepare(host->ciu_clk);
@@ -2640,9 +2783,6 @@ EXPORT_SYMBOL(dw_mci_remove);
*/
int dw_mci_suspend(struct dw_mci *host)
{
- if (host->vmmc)
- regulator_disable(host->vmmc);
-
return 0;
}
EXPORT_SYMBOL(dw_mci_suspend);
@@ -2651,15 +2791,6 @@ int dw_mci_resume(struct dw_mci *host)
{
int i, ret;
- if (host->vmmc) {
- ret = regulator_enable(host->vmmc);
- if (ret) {
- dev_err(host->dev,
- "failed to enable regulator: %d\n", ret);
- return ret;
- }
- }
-
if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
ret = -ENODEV;
return ret;
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 08fd956d81f3..01b99e8a9190 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -99,6 +99,7 @@
#define SDMMC_INT_HLE BIT(12)
#define SDMMC_INT_FRUN BIT(11)
#define SDMMC_INT_HTO BIT(10)
+#define SDMMC_INT_VOLT_SWITCH BIT(10) /* overloads bit 10! */
#define SDMMC_INT_DRTO BIT(9)
#define SDMMC_INT_RTO BIT(8)
#define SDMMC_INT_DCRC BIT(7)
@@ -113,6 +114,7 @@
/* Command register defines */
#define SDMMC_CMD_START BIT(31)
#define SDMMC_CMD_USE_HOLD_REG BIT(29)
+#define SDMMC_CMD_VOLT_SWITCH BIT(28)
#define SDMMC_CMD_CCS_EXP BIT(23)
#define SDMMC_CMD_CEATA_RD BIT(22)
#define SDMMC_CMD_UPD_CLK BIT(21)
@@ -130,6 +132,7 @@
/* Status register defines */
#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FFF)
#define SDMMC_STATUS_DMA_REQ BIT(31)
+#define SDMMC_STATUS_BUSY BIT(9)
/* FIFOTH register defines */
#define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \
((r) & 0xFFF) << 16 | \
@@ -150,7 +153,7 @@
#define SDMMC_GET_VERID(x) ((x) & 0xFFFF)
/* Card read threshold */
#define SDMMC_SET_RD_THLD(v, x) (((v) & 0x1FFF) << 16 | (x))
-
+#define SDMMC_UHS_18V BIT(0)
/* All ctrl reset bits */
#define SDMMC_CTRL_ALL_RESET_FLAGS \
(SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET)
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 537d6c7a5ae4..76e8bce6f46e 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -30,7 +30,9 @@
#include <asm/mach-jz4740/gpio.h>
#include <asm/cacheflush.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <asm/mach-jz4740/dma.h>
#include <asm/mach-jz4740/jz4740_mmc.h>
#define JZ_REG_MMC_STRPCL 0x00
@@ -112,6 +114,11 @@ enum jz4740_mmc_state {
JZ4740_MMC_STATE_DONE,
};
+struct jz4740_mmc_host_next {
+ int sg_len;
+ s32 cookie;
+};
+
struct jz4740_mmc_host {
struct mmc_host *mmc;
struct platform_device *pdev;
@@ -122,6 +129,7 @@ struct jz4740_mmc_host {
int card_detect_irq;
void __iomem *base;
+ struct resource *mem_res;
struct mmc_request *req;
struct mmc_command *cmd;
@@ -136,8 +144,220 @@ struct jz4740_mmc_host {
struct timer_list timeout_timer;
struct sg_mapping_iter miter;
enum jz4740_mmc_state state;
+
+ /* DMA support */
+ struct dma_chan *dma_rx;
+ struct dma_chan *dma_tx;
+ struct jz4740_mmc_host_next next_data;
+ bool use_dma;
+ int sg_len;
+
+/* The DMA trigger level is 8 words, that is to say, the DMA read
+ * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
+ * trigger is when data words in MSC_TXFIFO is < 8.
+ */
+#define JZ4740_MMC_FIFO_HALF_SIZE 8
};
+/*----------------------------------------------------------------------------*/
+/* DMA infrastructure */
+
+static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
+{
+ if (!host->use_dma)
+ return;
+
+ dma_release_channel(host->dma_tx);
+ dma_release_channel(host->dma_rx);
+}
+
+static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->dma_tx = dma_request_channel(mask, NULL, host);
+ if (!host->dma_tx) {
+ dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
+ return -ENODEV;
+ }
+
+ host->dma_rx = dma_request_channel(mask, NULL, host);
+ if (!host->dma_rx) {
+ dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
+ goto free_master_write;
+ }
+
+ /* Initialize DMA pre request cookie */
+ host->next_data.cookie = 1;
+
+ return 0;
+
+free_master_write:
+ dma_release_channel(host->dma_tx);
+ return -ENODEV;
+}
+
+static inline int jz4740_mmc_get_dma_dir(struct mmc_data *data)
+{
+ return (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+}
+
+static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
+ struct mmc_data *data)
+{
+ return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx;
+}
+
+static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
+ struct mmc_data *data)
+{
+ struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
+ enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
+
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+}
+
+/* Prepares DMA data for current/next transfer, returns non-zero on failure */
+static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
+ struct mmc_data *data,
+ struct jz4740_mmc_host_next *next,
+ struct dma_chan *chan)
+{
+ struct jz4740_mmc_host_next *next_data = &host->next_data;
+ enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
+ int sg_len;
+
+ if (!next && data->host_cookie &&
+ data->host_cookie != host->next_data.cookie) {
+ dev_warn(mmc_dev(host->mmc),
+ "[%s] invalid cookie: data->host_cookie %d host->next_data.cookie %d\n",
+ __func__,
+ data->host_cookie,
+ host->next_data.cookie);
+ data->host_cookie = 0;
+ }
+
+ /* Check if next job is already prepared */
+ if (next || data->host_cookie != host->next_data.cookie) {
+ sg_len = dma_map_sg(chan->device->dev,
+ data->sg,
+ data->sg_len,
+ dir);
+
+ } else {
+ sg_len = next_data->sg_len;
+ next_data->sg_len = 0;
+ }
+
+ if (sg_len <= 0) {
+ dev_err(mmc_dev(host->mmc),
+ "Failed to map scatterlist for DMA operation\n");
+ return -EINVAL;
+ }
+
+ if (next) {
+ next->sg_len = sg_len;
+ data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
+ } else
+ host->sg_len = sg_len;
+
+ return 0;
+}
+
+static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
+ struct mmc_data *data)
+{
+ int ret;
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config conf = {
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
+ .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
+ };
+
+ if (data->flags & MMC_DATA_WRITE) {
+ conf.direction = DMA_MEM_TO_DEV;
+ conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
+ conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT;
+ chan = host->dma_tx;
+ } else {
+ conf.direction = DMA_DEV_TO_MEM;
+ conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
+ conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE;
+ chan = host->dma_rx;
+ }
+
+ ret = jz4740_mmc_prepare_dma_data(host, data, NULL, chan);
+ if (ret)
+ return ret;
+
+ dmaengine_slave_config(chan, &conf);
+ desc = dmaengine_prep_slave_sg(chan,
+ data->sg,
+ host->sg_len,
+ conf.direction,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(mmc_dev(host->mmc),
+ "Failed to allocate DMA %s descriptor",
+ conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
+ goto dma_unmap;
+ }
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+
+ return 0;
+
+dma_unmap:
+ jz4740_mmc_dma_unmap(host, data);
+ return -ENOMEM;
+}
+
+static void jz4740_mmc_pre_request(struct mmc_host *mmc,
+ struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct jz4740_mmc_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+ struct jz4740_mmc_host_next *next_data = &host->next_data;
+
+ BUG_ON(data->host_cookie);
+
+ if (host->use_dma) {
+ struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
+
+ if (jz4740_mmc_prepare_dma_data(host, data, next_data, chan))
+ data->host_cookie = 0;
+ }
+}
+
+static void jz4740_mmc_post_request(struct mmc_host *mmc,
+ struct mmc_request *mrq,
+ int err)
+{
+ struct jz4740_mmc_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (host->use_dma && data->host_cookie) {
+ jz4740_mmc_dma_unmap(host, data);
+ data->host_cookie = 0;
+ }
+
+ if (err) {
+ struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
+
+ dmaengine_terminate_all(chan);
+ }
+}
+
+/*----------------------------------------------------------------------------*/
+
static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
unsigned int irq, bool enabled)
{
@@ -442,6 +662,8 @@ static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
cmdat |= JZ_MMC_CMDAT_WRITE;
if (cmd->data->flags & MMC_DATA_STREAM)
cmdat |= JZ_MMC_CMDAT_STREAM;
+ if (host->use_dma)
+ cmdat |= JZ_MMC_CMDAT_DMA_EN;
writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
@@ -474,6 +696,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
struct mmc_command *cmd = host->req->cmd;
struct mmc_request *req = host->req;
+ struct mmc_data *data = cmd->data;
bool timeout = false;
if (cmd->error)
@@ -484,23 +707,37 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
if (cmd->flags & MMC_RSP_PRESENT)
jz4740_mmc_read_response(host, cmd);
- if (!cmd->data)
+ if (!data)
break;
jz_mmc_prepare_data_transfer(host);
case JZ4740_MMC_STATE_TRANSFER_DATA:
- if (cmd->data->flags & MMC_DATA_READ)
- timeout = jz4740_mmc_read_data(host, cmd->data);
+ if (host->use_dma) {
+ /* Use DMA if enabled.
+ * Data transfer direction is defined later by
+ * relying on data flags in
+ * jz4740_mmc_prepare_dma_data() and
+ * jz4740_mmc_start_dma_transfer().
+ */
+ timeout = jz4740_mmc_start_dma_transfer(host, data);
+ data->bytes_xfered = data->blocks * data->blksz;
+ } else if (data->flags & MMC_DATA_READ)
+ /* Use PIO if DMA is not enabled.
+ * Data transfer direction was defined before
+ * by relying on data flags in
+ * jz_mmc_prepare_data_transfer().
+ */
+ timeout = jz4740_mmc_read_data(host, data);
else
- timeout = jz4740_mmc_write_data(host, cmd->data);
+ timeout = jz4740_mmc_write_data(host, data);
if (unlikely(timeout)) {
host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
break;
}
- jz4740_mmc_transfer_check_state(host, cmd->data);
+ jz4740_mmc_transfer_check_state(host, data);
timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
if (unlikely(timeout)) {
@@ -664,6 +901,8 @@ static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
static const struct mmc_host_ops jz4740_mmc_ops = {
.request = jz4740_mmc_request,
+ .pre_req = jz4740_mmc_pre_request,
+ .post_req = jz4740_mmc_post_request,
.set_ios = jz4740_mmc_set_ios,
.get_ro = mmc_gpio_get_ro,
.get_cd = mmc_gpio_get_cd,
@@ -757,7 +996,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
struct mmc_host *mmc;
struct jz4740_mmc_host *host;
struct jz4740_mmc_platform_data *pdata;
- struct resource *res;
pdata = pdev->dev.platform_data;
@@ -784,10 +1022,11 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
goto err_free_host;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->base = devm_ioremap_resource(&pdev->dev, res);
+ host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
+ dev_err(&pdev->dev, "Failed to ioremap base memory\n");
goto err_free_host;
}
@@ -834,6 +1073,10 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
/* It is not important when it times out, it just needs to timeout. */
set_timer_slack(&host->timeout_timer, HZ);
+ host->use_dma = true;
+ if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
+ host->use_dma = false;
+
platform_set_drvdata(pdev, host);
ret = mmc_add_host(mmc);
@@ -843,6 +1086,10 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
}
dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n");
+ dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
+ host->use_dma ? "DMA" : "PIO",
+ (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
+
return 0;
err_free_irq:
@@ -850,6 +1097,8 @@ err_free_irq:
err_free_gpios:
jz4740_mmc_free_gpios(pdev);
err_gpio_bulk_free:
+ if (host->use_dma)
+ jz4740_mmc_release_dma_channels(host);
jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
err_free_host:
mmc_free_host(mmc);
@@ -872,6 +1121,9 @@ static int jz4740_mmc_remove(struct platform_device *pdev)
jz4740_mmc_free_gpios(pdev);
jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
+ if (host->use_dma)
+ jz4740_mmc_release_dma_channels(host);
+
mmc_free_host(host->mmc);
return 0;
@@ -909,7 +1161,6 @@ static struct platform_driver jz4740_mmc_driver = {
.remove = jz4740_mmc_remove,
.driver = {
.name = "jz4740-mmc",
- .owner = THIS_MODULE,
.pm = JZ4740_MMC_PM_OPS,
},
};
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index cc8d4a6099cd..e4a07546f8b6 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1436,6 +1436,7 @@ static int mmc_spi_probe(struct spi_device *spi)
host->pdata->cd_debounce);
if (status != 0)
goto fail_add_host;
+ mmc_gpiod_request_cd_irq(mmc);
}
if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) {
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index e4d470704150..43af791e2e45 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -43,6 +43,7 @@
#include <asm/sizes.h>
#include "mmci.h"
+#include "mmci_qcom_dml.h"
#define DRIVER_NAME "mmci-pl18x"
@@ -60,12 +61,13 @@ static unsigned int fmax = 515633;
* @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
* is asserted (likewise for RX)
* @data_cmd_enable: enable value for data commands.
- * @sdio: variant supports SDIO
+ * @st_sdio: enable ST specific SDIO logic
* @st_clkdiv: true if using a ST-specific clock divider algorithm
* @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
* @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
* @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
* register
+ * @datactrl_mask_sdio: SDIO enable mask in datactrl register
* @pwrreg_powerup: power up value for MMCIPOWER register
* @f_max: maximum clk frequency supported by the controller.
* @signal_direction: input/out direction of bus signals can be indicated
@@ -74,6 +76,7 @@ static unsigned int fmax = 515633;
* @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
* @explicit_mclk_control: enable explicit mclk control in driver.
* @qcom_fifo: enables qcom specific fifo pio read logic.
+ * @qcom_dml: enables qcom specific dma glue for dma transfers.
* @reversed_irq_handling: handle data irq before cmd irq.
*/
struct variant_data {
@@ -86,7 +89,8 @@ struct variant_data {
unsigned int fifohalfsize;
unsigned int data_cmd_enable;
unsigned int datactrl_mask_ddrmode;
- bool sdio;
+ unsigned int datactrl_mask_sdio;
+ bool st_sdio;
bool st_clkdiv;
bool blksz_datactrl16;
bool blksz_datactrl4;
@@ -98,6 +102,7 @@ struct variant_data {
bool pwrreg_nopower;
bool explicit_mclk_control;
bool qcom_fifo;
+ bool qcom_dml;
bool reversed_irq_handling;
};
@@ -133,7 +138,8 @@ static struct variant_data variant_u300 = {
.clkreg_enable = MCI_ST_U300_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.datalength_bits = 16,
- .sdio = true,
+ .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .st_sdio = true,
.pwrreg_powerup = MCI_PWR_ON,
.f_max = 100000000,
.signal_direction = true,
@@ -146,7 +152,8 @@ static struct variant_data variant_nomadik = {
.fifohalfsize = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.datalength_bits = 24,
- .sdio = true,
+ .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .st_sdio = true,
.st_clkdiv = true,
.pwrreg_powerup = MCI_PWR_ON,
.f_max = 100000000,
@@ -163,7 +170,8 @@ static struct variant_data variant_ux500 = {
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
.datalength_bits = 24,
- .sdio = true,
+ .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .st_sdio = true,
.st_clkdiv = true,
.pwrreg_powerup = MCI_PWR_ON,
.f_max = 100000000,
@@ -182,7 +190,8 @@ static struct variant_data variant_ux500v2 = {
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
.datactrl_mask_ddrmode = MCI_ST_DPSM_DDRMODE,
.datalength_bits = 24,
- .sdio = true,
+ .datactrl_mask_sdio = MCI_ST_DPSM_SDIOEN,
+ .st_sdio = true,
.st_clkdiv = true,
.blksz_datactrl16 = true,
.pwrreg_powerup = MCI_PWR_ON,
@@ -208,6 +217,7 @@ static struct variant_data variant_qcom = {
.f_max = 208000000,
.explicit_mclk_control = true,
.qcom_fifo = true,
+ .qcom_dml = true,
};
static int mmci_card_busy(struct mmc_host *mmc)
@@ -421,6 +431,7 @@ static void mmci_dma_setup(struct mmci_host *host)
{
const char *rxname, *txname;
dma_cap_mask_t mask;
+ struct variant_data *variant = host->variant;
host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
@@ -471,6 +482,10 @@ static void mmci_dma_setup(struct mmci_host *host)
if (max_seg_size < host->mmc->max_seg_size)
host->mmc->max_seg_size = max_seg_size;
}
+
+ if (variant->qcom_dml && host->dma_rx_channel && host->dma_tx_channel)
+ if (dml_hw_init(host, host->mmc->parent->of_node))
+ variant->qcom_dml = false;
}
/*
@@ -572,6 +587,7 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
struct dma_async_tx_descriptor *desc;
enum dma_data_direction buffer_dirn;
int nr_sg;
+ unsigned long flags = DMA_CTRL_ACK;
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_DEV_TO_MEM;
@@ -596,9 +612,12 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
if (nr_sg == 0)
return -EINVAL;
+ if (host->variant->qcom_dml)
+ flags |= DMA_PREP_INTERRUPT;
+
dmaengine_slave_config(chan, &conf);
desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
- conf.direction, DMA_CTRL_ACK);
+ conf.direction, flags);
if (!desc)
goto unmap_exit;
@@ -647,6 +666,9 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
dmaengine_submit(host->dma_desc_current);
dma_async_issue_pending(host->dma_current);
+ if (host->variant->qcom_dml)
+ dml_start_xfer(host, data);
+
datactrl |= MCI_DPSM_DMAENABLE;
/* Trigger the DMA transfer */
@@ -792,32 +814,26 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
- /* The ST Micro variants has a special bit to enable SDIO */
- if (variant->sdio && host->mmc->card)
- if (mmc_card_sdio(host->mmc->card)) {
- /*
- * The ST Micro variants has a special bit
- * to enable SDIO.
- */
- u32 clk;
+ if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
+ u32 clk;
- datactrl |= MCI_ST_DPSM_SDIOEN;
+ datactrl |= variant->datactrl_mask_sdio;
- /*
- * The ST Micro variant for SDIO small write transfers
- * needs to have clock H/W flow control disabled,
- * otherwise the transfer will not start. The threshold
- * depends on the rate of MCLK.
- */
- if (data->flags & MMC_DATA_WRITE &&
- (host->size < 8 ||
- (host->size <= 8 && host->mclk > 50000000)))
- clk = host->clk_reg & ~variant->clkreg_enable;
- else
- clk = host->clk_reg | variant->clkreg_enable;
+ /*
+ * The ST Micro variant for SDIO small write transfers
+ * needs to have clock H/W flow control disabled,
+ * otherwise the transfer will not start. The threshold
+ * depends on the rate of MCLK.
+ */
+ if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
+ (host->size < 8 ||
+ (host->size <= 8 && host->mclk > 50000000)))
+ clk = host->clk_reg & ~variant->clkreg_enable;
+ else
+ clk = host->clk_reg | variant->clkreg_enable;
- mmci_write_clkreg(host, clk);
- }
+ mmci_write_clkreg(host, clk);
+ }
if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
@@ -1658,16 +1674,35 @@ static int mmci_probe(struct amba_device *dev,
writel(0, host->base + MMCIMASK1);
writel(0xfff, host->base + MMCICLEAR);
- /* If DT, cd/wp gpios must be supplied through it. */
- if (!np && gpio_is_valid(plat->gpio_cd)) {
- ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0);
- if (ret)
- goto clk_disable;
- }
- if (!np && gpio_is_valid(plat->gpio_wp)) {
- ret = mmc_gpio_request_ro(mmc, plat->gpio_wp);
- if (ret)
- goto clk_disable;
+ /*
+ * If:
+ * - not using DT but using a descriptor table, or
+ * - using a table of descriptors ALONGSIDE DT, or
+ * look up these descriptors named "cd" and "wp" right here, fail
+ * silently of these do not exist and proceed to try platform data
+ */
+ if (!np) {
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ if (ret < 0) {
+ if (ret == -EPROBE_DEFER)
+ goto clk_disable;
+ else if (gpio_is_valid(plat->gpio_cd)) {
+ ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0);
+ if (ret)
+ goto clk_disable;
+ }
+ }
+
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
+ if (ret < 0) {
+ if (ret == -EPROBE_DEFER)
+ goto clk_disable;
+ else if (gpio_is_valid(plat->gpio_wp)) {
+ ret = mmc_gpio_request_ro(mmc, plat->gpio_wp);
+ if (ret)
+ goto clk_disable;
+ }
+ }
}
ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
diff --git a/drivers/mmc/host/mmci_qcom_dml.c b/drivers/mmc/host/mmci_qcom_dml.c
new file mode 100644
index 000000000000..2b7fc3764803
--- /dev/null
+++ b/drivers/mmc/host/mmci_qcom_dml.c
@@ -0,0 +1,177 @@
+/*
+ *
+ * Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include "mmci.h"
+
+/* Registers */
+#define DML_CONFIG 0x00
+#define PRODUCER_CRCI_MSK GENMASK(1, 0)
+#define PRODUCER_CRCI_DISABLE 0
+#define PRODUCER_CRCI_X_SEL BIT(0)
+#define PRODUCER_CRCI_Y_SEL BIT(1)
+#define CONSUMER_CRCI_MSK GENMASK(3, 2)
+#define CONSUMER_CRCI_DISABLE 0
+#define CONSUMER_CRCI_X_SEL BIT(2)
+#define CONSUMER_CRCI_Y_SEL BIT(3)
+#define PRODUCER_TRANS_END_EN BIT(4)
+#define BYPASS BIT(16)
+#define DIRECT_MODE BIT(17)
+#define INFINITE_CONS_TRANS BIT(18)
+
+#define DML_SW_RESET 0x08
+#define DML_PRODUCER_START 0x0c
+#define DML_CONSUMER_START 0x10
+#define DML_PRODUCER_PIPE_LOGICAL_SIZE 0x14
+#define DML_CONSUMER_PIPE_LOGICAL_SIZE 0x18
+#define DML_PIPE_ID 0x1c
+#define PRODUCER_PIPE_ID_SHFT 0
+#define PRODUCER_PIPE_ID_MSK GENMASK(4, 0)
+#define CONSUMER_PIPE_ID_SHFT 16
+#define CONSUMER_PIPE_ID_MSK GENMASK(20, 16)
+
+#define DML_PRODUCER_BAM_BLOCK_SIZE 0x24
+#define DML_PRODUCER_BAM_TRANS_SIZE 0x28
+
+/* other definitions */
+#define PRODUCER_PIPE_LOGICAL_SIZE 4096
+#define CONSUMER_PIPE_LOGICAL_SIZE 4096
+
+#define DML_OFFSET 0x800
+
+void dml_start_xfer(struct mmci_host *host, struct mmc_data *data)
+{
+ u32 config;
+ void __iomem *base = host->base + DML_OFFSET;
+
+ if (data->flags & MMC_DATA_READ) {
+ /* Read operation: configure DML for producer operation */
+ /* Set producer CRCI-x and disable consumer CRCI */
+ config = readl_relaxed(base + DML_CONFIG);
+ config = (config & ~PRODUCER_CRCI_MSK) | PRODUCER_CRCI_X_SEL;
+ config = (config & ~CONSUMER_CRCI_MSK) | CONSUMER_CRCI_DISABLE;
+ writel_relaxed(config, base + DML_CONFIG);
+
+ /* Set the Producer BAM block size */
+ writel_relaxed(data->blksz, base + DML_PRODUCER_BAM_BLOCK_SIZE);
+
+ /* Set Producer BAM Transaction size */
+ writel_relaxed(data->blocks * data->blksz,
+ base + DML_PRODUCER_BAM_TRANS_SIZE);
+ /* Set Producer Transaction End bit */
+ config = readl_relaxed(base + DML_CONFIG);
+ config |= PRODUCER_TRANS_END_EN;
+ writel_relaxed(config, base + DML_CONFIG);
+ /* Trigger producer */
+ writel_relaxed(1, base + DML_PRODUCER_START);
+ } else {
+ /* Write operation: configure DML for consumer operation */
+ /* Set consumer CRCI-x and disable producer CRCI*/
+ config = readl_relaxed(base + DML_CONFIG);
+ config = (config & ~CONSUMER_CRCI_MSK) | CONSUMER_CRCI_X_SEL;
+ config = (config & ~PRODUCER_CRCI_MSK) | PRODUCER_CRCI_DISABLE;
+ writel_relaxed(config, base + DML_CONFIG);
+ /* Clear Producer Transaction End bit */
+ config = readl_relaxed(base + DML_CONFIG);
+ config &= ~PRODUCER_TRANS_END_EN;
+ writel_relaxed(config, base + DML_CONFIG);
+ /* Trigger consumer */
+ writel_relaxed(1, base + DML_CONSUMER_START);
+ }
+
+ /* make sure the dml is configured before dma is triggered */
+ wmb();
+}
+
+static int of_get_dml_pipe_index(struct device_node *np, const char *name)
+{
+ int index;
+ struct of_phandle_args dma_spec;
+
+ index = of_property_match_string(np, "dma-names", name);
+
+ if (index < 0)
+ return -ENODEV;
+
+ if (of_parse_phandle_with_args(np, "dmas", "#dma-cells", index,
+ &dma_spec))
+ return -ENODEV;
+
+ if (dma_spec.args_count)
+ return dma_spec.args[0];
+
+ return -ENODEV;
+}
+
+/* Initialize the dml hardware connected to SD Card controller */
+int dml_hw_init(struct mmci_host *host, struct device_node *np)
+{
+ u32 config;
+ void __iomem *base;
+ int consumer_id, producer_id;
+
+ consumer_id = of_get_dml_pipe_index(np, "tx");
+ producer_id = of_get_dml_pipe_index(np, "rx");
+
+ if (producer_id < 0 || consumer_id < 0)
+ return -ENODEV;
+
+ base = host->base + DML_OFFSET;
+
+ /* Reset the DML block */
+ writel_relaxed(1, base + DML_SW_RESET);
+
+ /* Disable the producer and consumer CRCI */
+ config = (PRODUCER_CRCI_DISABLE | CONSUMER_CRCI_DISABLE);
+ /*
+ * Disable the bypass mode. Bypass mode will only be used
+ * if data transfer is to happen in PIO mode and don't
+ * want the BAM interface to connect with SDCC-DML.
+ */
+ config &= ~BYPASS;
+ /*
+ * Disable direct mode as we don't DML to MASTER the AHB bus.
+ * BAM connected with DML should MASTER the AHB bus.
+ */
+ config &= ~DIRECT_MODE;
+ /*
+ * Disable infinite mode transfer as we won't be doing any
+ * infinite size data transfers. All data transfer will be
+ * of finite data size.
+ */
+ config &= ~INFINITE_CONS_TRANS;
+ writel_relaxed(config, base + DML_CONFIG);
+
+ /*
+ * Initialize the logical BAM pipe size for producer
+ * and consumer.
+ */
+ writel_relaxed(PRODUCER_PIPE_LOGICAL_SIZE,
+ base + DML_PRODUCER_PIPE_LOGICAL_SIZE);
+ writel_relaxed(CONSUMER_PIPE_LOGICAL_SIZE,
+ base + DML_CONSUMER_PIPE_LOGICAL_SIZE);
+
+ /* Initialize Producer/consumer pipe id */
+ writel_relaxed(producer_id | (consumer_id << CONSUMER_PIPE_ID_SHFT),
+ base + DML_PIPE_ID);
+
+ /* Make sure dml intialization is finished */
+ mb();
+
+ return 0;
+}
diff --git a/drivers/mmc/host/mmci_qcom_dml.h b/drivers/mmc/host/mmci_qcom_dml.h
new file mode 100644
index 000000000000..6e405d09d534
--- /dev/null
+++ b/drivers/mmc/host/mmci_qcom_dml.h
@@ -0,0 +1,31 @@
+/*
+ *
+ * Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __MMC_QCOM_DML_H__
+#define __MMC_QCOM_DML_H__
+
+#ifdef CONFIG_MMC_QCOM_DML
+int dml_hw_init(struct mmci_host *host, struct device_node *np);
+void dml_start_xfer(struct mmci_host *host, struct mmc_data *data);
+#else
+static inline int dml_hw_init(struct mmci_host *host, struct device_node *np)
+{
+ return -ENOSYS;
+}
+static inline void dml_start_xfer(struct mmci_host *host, struct mmc_data *data)
+{
+}
+#endif /* CONFIG_MMC_QCOM_DML */
+
+#endif /* __MMC_QCOM_DML_H__ */
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index b4b1efbf6c16..f3e18d08e852 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -717,7 +717,6 @@ static struct platform_driver moxart_mmc_driver = {
.remove = moxart_remove,
.driver = {
.name = "mmc-moxart",
- .owner = THIS_MODULE,
.of_match_table = moxart_mmc_match,
},
};
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index ed1cb93c3784..ad111422ad55 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -1238,7 +1238,6 @@ static struct platform_driver mxcmci_driver = {
.id_table = mxcmci_devtype,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &mxcmci_pm_ops,
.of_match_table = mxcmci_of_match,
}
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 140885a5a4e7..cd74e5143c36 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -735,7 +735,6 @@ static struct platform_driver mxs_mmc_driver = {
.id_table = mxs_ssp_ids,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &mxs_mmc_pm_ops,
#endif
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 81974ecdfcbc..68dd6c79c378 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1494,7 +1494,6 @@ static struct platform_driver mmc_omap_driver = {
.remove = mmc_omap_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(mmc_omap_match),
},
};
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 965672663ef0..df27bb4fc098 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1829,7 +1829,17 @@ static int omap_hsmmc_disable_fclk(struct mmc_host *mmc)
return 0;
}
-static const struct mmc_host_ops omap_hsmmc_ops = {
+static int omap_hsmmc_multi_io_quirk(struct mmc_card *card,
+ unsigned int direction, int blk_size)
+{
+ /* This controller can't do multiblock reads due to hw bugs */
+ if (direction == MMC_DATA_READ)
+ return 1;
+
+ return blk_size;
+}
+
+static struct mmc_host_ops omap_hsmmc_ops = {
.enable = omap_hsmmc_enable_fclk,
.disable = omap_hsmmc_disable_fclk,
.post_req = omap_hsmmc_post_req,
@@ -2101,7 +2111,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
- mmc->caps2 |= MMC_CAP2_NO_MULTI_READ;
+ omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
}
pm_runtime_enable(host->dev);
@@ -2489,7 +2499,6 @@ static struct platform_driver omap_hsmmc_driver = {
.remove = omap_hsmmc_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = &omap_hsmmc_dev_pm_ops,
.of_match_table = of_match_ptr(omap_mmc_of_match),
},
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 32fe11323f39..1b6d0bfe35f5 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -474,7 +474,7 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
unsigned int clk = rate / ios->clock;
if (host->clkrt == CLKRT_OFF)
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
if (ios->clock == 26000000) {
/* to support 26MHz */
@@ -501,7 +501,7 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
pxamci_stop_clock(host);
if (host->clkrt != CLKRT_OFF) {
host->clkrt = CLKRT_OFF;
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
}
}
@@ -885,7 +885,6 @@ static struct platform_driver pxamci_driver = {
.remove = pxamci_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pxa_mmc_dt_ids),
},
};
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index dfde4a210238..c70b602f8f1e 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -412,6 +412,13 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
}
if (rsp_type == SD_RSP_TYPE_R2) {
+ /*
+ * The controller offloads the last byte {CRC-7, end bit 1'b1}
+ * of response type R2. Assign dummy CRC, 0, and end bit to the
+ * byte(ptr[16], goes into the LSB of resp[3] later).
+ */
+ ptr[16] = 1;
+
for (i = 0; i < 4; i++) {
cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4);
dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n",
@@ -1292,6 +1299,7 @@ static void realtek_init_host(struct realtek_pci_sdmmc *host)
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST |
MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
+ mmc->caps2 = MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_FULL_PWR_CYCLE;
mmc->max_current_330 = 400;
mmc->max_current_180 = 800;
mmc->ops = &realtek_pci_sdmmc_ops;
@@ -1416,7 +1424,6 @@ static struct platform_driver rtsx_pci_sdmmc_driver = {
.remove = rtsx_pci_sdmmc_drv_remove,
.id_table = rtsx_pci_sdmmc_ids,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME_RTSX_PCI_SDMMC,
},
};
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 5d3766e792f0..88af827e086b 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -435,6 +435,13 @@ static void sd_send_cmd_get_rsp(struct rtsx_usb_sdmmc *host,
}
if (rsp_type == SD_RSP_TYPE_R2) {
+ /*
+ * The controller offloads the last byte {CRC-7, end bit 1'b1}
+ * of response type R2. Assign dummy CRC, 0, and end bit to the
+ * byte(ptr[16], goes into the LSB of resp[3] later).
+ */
+ ptr[16] = 1;
+
for (i = 0; i < 4; i++) {
cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4);
dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n",
@@ -1329,6 +1336,7 @@ static void rtsx_usb_init_host(struct rtsx_usb_sdmmc *host)
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST |
MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 |
MMC_CAP_NEEDS_POLL;
+ mmc->caps2 = MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_FULL_PWR_CYCLE;
mmc->max_current_330 = 400;
mmc->max_current_180 = 800;
@@ -1445,7 +1453,6 @@ static struct platform_driver rtsx_usb_sdmmc_driver = {
.remove = rtsx_usb_sdmmc_drv_remove,
.id_table = rtsx_usb_sdmmc_ids,
.driver = {
- .owner = THIS_MODULE,
.name = "rtsx_usb_sdmmc",
},
};
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index e5516a226362..94cddf381ba3 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -985,7 +985,8 @@ static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
* one block being transferred. */
if (data->blocks > 1) {
- pr_warning("%s: can't do non-word sized block transfers (blksz %d)\n", __func__, data->blksz);
+ pr_warn("%s: can't do non-word sized block transfers (blksz %d)\n",
+ __func__, data->blksz);
return -EINVAL;
}
}
@@ -1874,7 +1875,6 @@ MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
static struct platform_driver s3cmci_driver = {
.driver = {
.name = "s3c-sdi",
- .owner = THIS_MODULE,
},
.id_table = s3cmci_driver_ids,
.probe = s3cmci_probe,
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 8c5337002c51..9cccc0e89b04 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -67,6 +67,8 @@ struct sdhci_acpi_slot {
unsigned int caps2;
mmc_pm_flag_t pm_caps;
unsigned int flags;
+ int (*probe_slot)(struct platform_device *, const char *, const char *);
+ int (*remove_slot)(struct platform_device *);
};
struct sdhci_acpi_host {
@@ -122,13 +124,67 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
.ops = &sdhci_acpi_ops_int,
};
+static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
+ const char *hid, const char *uid)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct sdhci_host *host;
+
+ if (!c || !c->host)
+ return 0;
+
+ host = c->host;
+
+ /* Platform specific code during emmc proble slot goes here */
+
+ if (hid && uid && !strcmp(hid, "80860F14") && !strcmp(uid, "1") &&
+ sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 &&
+ sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807)
+ host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
+
+ return 0;
+}
+
+static int sdhci_acpi_sdio_probe_slot(struct platform_device *pdev,
+ const char *hid, const char *uid)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct sdhci_host *host;
+
+ if (!c || !c->host)
+ return 0;
+
+ host = c->host;
+
+ /* Platform specific code during emmc proble slot goes here */
+
+ return 0;
+}
+
+static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
+ const char *hid, const char *uid)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct sdhci_host *host;
+
+ if (!c || !c->host || !c->slot)
+ return 0;
+
+ host = c->host;
+
+ /* Platform specific code during emmc proble slot goes here */
+
+ return 0;
+}
+
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.chip = &sdhci_acpi_chip_int,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR,
.caps2 = MMC_CAP2_HC_ERASE_SZ,
.flags = SDHCI_ACPI_RUNTIME_PM,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_STOP_WITH_TC,
+ .probe_slot = sdhci_acpi_emmc_probe_slot,
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
@@ -137,12 +193,15 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
.caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD,
.flags = SDHCI_ACPI_RUNTIME_PM,
.pm_caps = MMC_PM_KEEP_POWER,
+ .probe_slot = sdhci_acpi_sdio_probe_slot,
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
.flags = SDHCI_ACPI_SD_CD | SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL |
SDHCI_ACPI_RUNTIME_PM,
- .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON,
+ .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
+ SDHCI_QUIRK2_STOP_WITH_TC,
+ .probe_slot = sdhci_acpi_sd_probe_slot,
};
struct sdhci_acpi_uid_slot {
@@ -156,6 +215,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "80860F14" , "3" , &sdhci_acpi_slot_int_sd },
{ "80860F16" , NULL, &sdhci_acpi_slot_int_sd },
{ "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio },
+ { "INT33BB" , "3" , &sdhci_acpi_slot_int_sd },
{ "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
{ "INT3436" , NULL, &sdhci_acpi_slot_int_sdio },
{ "PNP0D40" },
@@ -173,8 +233,8 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
-static const struct sdhci_acpi_slot *sdhci_acpi_get_slot_by_ids(const char *hid,
- const char *uid)
+static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid,
+ const char *uid)
{
const struct sdhci_acpi_uid_slot *u;
@@ -189,24 +249,6 @@ static const struct sdhci_acpi_slot *sdhci_acpi_get_slot_by_ids(const char *hid,
return NULL;
}
-static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(acpi_handle handle,
- const char *hid)
-{
- const struct sdhci_acpi_slot *slot;
- struct acpi_device_info *info;
- const char *uid = NULL;
- acpi_status status;
-
- status = acpi_get_object_info(handle, &info);
- if (!ACPI_FAILURE(status) && (info->valid & ACPI_VALID_UID))
- uid = info->unique_id.string;
-
- slot = sdhci_acpi_get_slot_by_ids(hid, uid);
-
- kfree(info);
- return slot;
-}
-
static int sdhci_acpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -217,6 +259,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
struct resource *iomem;
resource_size_t len;
const char *hid;
+ const char *uid;
int err;
if (acpi_bus_get_device(handle, &device))
@@ -226,6 +269,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
return -ENODEV;
hid = acpi_device_hid(device);
+ uid = device->pnp.unique_id;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem)
@@ -244,7 +288,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
c = sdhci_priv(host);
c->host = host;
- c->slot = sdhci_acpi_get_slot(handle, hid);
+ c->slot = sdhci_acpi_get_slot(hid, uid);
c->pdev = pdev;
c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM);
@@ -277,6 +321,11 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
}
if (c->slot) {
+ if (c->slot->probe_slot) {
+ err = c->slot->probe_slot(pdev, hid, uid);
+ if (err)
+ goto err_free;
+ }
if (c->slot->chip) {
host->ops = c->slot->chip->ops;
host->quirks |= c->slot->chip->quirks;
@@ -297,7 +346,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
- if (mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0)) {
+ if (mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL)) {
dev_warn(dev, "failed to setup card detect gpio\n");
c->use_runtime_pm = false;
}
@@ -334,6 +383,9 @@ static int sdhci_acpi_remove(struct platform_device *pdev)
pm_runtime_put_noidle(dev);
}
+ if (c->slot && c->slot->remove_slot)
+ c->slot->remove_slot(pdev);
+
dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0);
sdhci_remove_host(c->host, dead);
sdhci_free_host(c->host);
@@ -385,20 +437,13 @@ static int sdhci_acpi_runtime_idle(struct device *dev)
return 0;
}
-#else
-
-#define sdhci_acpi_runtime_suspend NULL
-#define sdhci_acpi_runtime_resume NULL
-#define sdhci_acpi_runtime_idle NULL
-
#endif
static const struct dev_pm_ops sdhci_acpi_pm_ops = {
.suspend = sdhci_acpi_suspend,
.resume = sdhci_acpi_resume,
- .runtime_suspend = sdhci_acpi_runtime_suspend,
- .runtime_resume = sdhci_acpi_runtime_resume,
- .runtime_idle = sdhci_acpi_runtime_idle,
+ SET_RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend,
+ sdhci_acpi_runtime_resume, sdhci_acpi_runtime_idle)
};
static struct platform_driver sdhci_acpi_driver = {
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index dd780c315a63..e7e4fbdcbfe0 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -225,7 +225,7 @@ static struct sdhci_pltfm_data sdhci_pltfm_data_kona = {
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
};
-static struct __initconst of_device_id sdhci_bcm_kona_of_match[] = {
+static const struct of_device_id sdhci_bcm_kona_of_match[] = {
{ .compatible = "brcm,kona-sdhci"},
{ .compatible = "bcm,kona-sdhci"}, /* deprecated name */
{}
@@ -359,7 +359,6 @@ static int sdhci_bcm_kona_remove(struct platform_device *pdev)
static struct platform_driver sdhci_bcm_kona_driver = {
.driver = {
.name = "sdhci-kona",
- .owner = THIS_MODULE,
.pm = SDHCI_PLTFM_PMOPS,
.of_match_table = sdhci_bcm_kona_of_match,
},
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c
index 46af9a439d7b..439d259fdf1d 100644
--- a/drivers/mmc/host/sdhci-bcm2835.c
+++ b/drivers/mmc/host/sdhci-bcm2835.c
@@ -194,7 +194,6 @@ MODULE_DEVICE_TABLE(of, bcm2835_sdhci_of_match);
static struct platform_driver bcm2835_sdhci_driver = {
.driver = {
.name = "sdhci-bcm2835",
- .owner = THIS_MODULE,
.of_match_table = bcm2835_sdhci_of_match,
.pm = SDHCI_PLTFM_PMOPS,
},
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 14b74075589a..a7935a8d0922 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -106,7 +106,6 @@ static int sdhci_cns3xxx_remove(struct platform_device *pdev)
static struct platform_driver sdhci_cns3xxx_driver = {
.driver = {
.name = "sdhci-cns3xxx",
- .owner = THIS_MODULE,
.pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_cns3xxx_probe,
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index e6278ec007d7..ca969d271a27 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -146,7 +146,6 @@ MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table);
static struct platform_driver sdhci_dove_driver = {
.driver = {
.name = "sdhci-dove",
- .owner = THIS_MODULE,
.pm = SDHCI_PLTFM_PMOPS,
.of_match_table = sdhci_dove_of_match_table,
},
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index ccec0e32590f..587ee0edeb57 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -880,6 +880,24 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
+static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+
+ return esdhc_is_usdhc(imx_data) ? 1 << 28 : 1 << 27;
+}
+
+static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+
+ /* use maximum timeout counter */
+ sdhci_writeb(host, esdhc_is_usdhc(imx_data) ? 0xF : 0xE,
+ SDHCI_TIMEOUT_CONTROL);
+}
+
static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
@@ -889,7 +907,9 @@ static struct sdhci_ops sdhci_esdhc_ops = {
.set_clock = esdhc_pltfm_set_clock,
.get_max_clock = esdhc_pltfm_get_max_clock,
.get_min_clock = esdhc_pltfm_get_min_clock,
+ .get_max_timeout_count = esdhc_get_max_timeout_count,
.get_ro = esdhc_pltfm_get_ro,
+ .set_timeout = esdhc_set_timeout,
.set_bus_width = esdhc_pltfm_set_bus_width,
.set_uhs_signaling = esdhc_set_uhs_signaling,
.reset = esdhc_reset,
@@ -1207,7 +1227,6 @@ static const struct dev_pm_ops sdhci_esdhc_pmops = {
static struct platform_driver sdhci_esdhc_imx_driver = {
.driver = {
.name = "sdhci-esdhc-imx",
- .owner = THIS_MODULE,
.of_match_table = imx_esdhc_dt_ids,
.pm = &sdhci_esdhc_pmops,
},
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 1a6661ed6205..30804385af6d 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -46,24 +46,6 @@
#define CMUX_SHIFT_PHASE_SHIFT 24
#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
-static const u32 tuning_block_64[] = {
- 0x00ff0fff, 0xccc3ccff, 0xffcc3cc3, 0xeffefffe,
- 0xddffdfff, 0xfbfffbff, 0xff7fffbf, 0xefbdf777,
- 0xf0fff0ff, 0x3cccfc0f, 0xcfcc33cc, 0xeeffefff,
- 0xfdfffdff, 0xffbfffdf, 0xfff7ffbb, 0xde7b7ff7
-};
-
-static const u32 tuning_block_128[] = {
- 0xff00ffff, 0x0000ffff, 0xccccffff, 0xcccc33cc,
- 0xcc3333cc, 0xffffcccc, 0xffffeeff, 0xffeeeeff,
- 0xffddffff, 0xddddffff, 0xbbffffff, 0xbbffffff,
- 0xffffffbb, 0xffffff77, 0x77ff7777, 0xffeeddbb,
- 0x00ffffff, 0x00ffffff, 0xccffff00, 0xcc33cccc,
- 0x3333cccc, 0xffcccccc, 0xffeeffff, 0xeeeeffff,
- 0xddffffff, 0xddffffff, 0xffffffdd, 0xffffffbb,
- 0xffffbbbb, 0xffff77ff, 0xff7777ff, 0xeeddbb77
-};
-
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
@@ -358,8 +340,8 @@ static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
{
int tuning_seq_cnt = 3;
u8 phase, *data_buf, tuned_phases[16], tuned_phase_cnt = 0;
- const u32 *tuning_block_pattern = tuning_block_64;
- int size = sizeof(tuning_block_64); /* Pattern size in bytes */
+ const u8 *tuning_block_pattern = tuning_blk_pattern_4bit;
+ int size = sizeof(tuning_blk_pattern_4bit);
int rc;
struct mmc_host *mmc = host->mmc;
struct mmc_ios ios = host->mmc->ios;
@@ -375,8 +357,8 @@ static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
(mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
- tuning_block_pattern = tuning_block_128;
- size = sizeof(tuning_block_128);
+ tuning_block_pattern = tuning_blk_pattern_8bit;
+ size = sizeof(tuning_blk_pattern_8bit);
}
data_buf = kmalloc(size, GFP_KERNEL);
@@ -610,7 +592,6 @@ static struct platform_driver sdhci_msm_driver = {
.remove = sdhci_msm_remove,
.driver = {
.name = "sdhci_msm",
- .owner = THIS_MODULE,
.of_match_table = sdhci_msm_dt_match,
},
};
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 5bd1092310f2..981d66e5c023 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -213,7 +213,6 @@ MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
static struct platform_driver sdhci_arasan_driver = {
.driver = {
.name = "sdhci-arasan",
- .owner = THIS_MODULE,
.of_match_table = sdhci_arasan_of_match,
.pm = &sdhci_arasan_dev_pm_ops,
},
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 8be4dcfb49a0..8872c85c63d4 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -388,7 +388,6 @@ MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
static struct platform_driver sdhci_esdhc_driver = {
.driver = {
.name = "sdhci-esdhc",
- .owner = THIS_MODULE,
.of_match_table = sdhci_esdhc_of_match,
.pm = ESDHC_PMOPS,
},
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index b341661369a2..be479279a1d5 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -89,7 +89,6 @@ MODULE_DEVICE_TABLE(of, sdhci_hlwd_of_match);
static struct platform_driver sdhci_hlwd_driver = {
.driver = {
.name = "sdhci-hlwd",
- .owner = THIS_MODULE,
.of_match_table = sdhci_hlwd_of_match,
.pm = SDHCI_PLTFM_PMOPS,
},
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index c3a1debc9289..61192973e7cb 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/pm_runtime.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/mmc/sdhci-pci-data.h>
#include "sdhci.h"
@@ -271,6 +272,8 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR;
slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
slot->hw_reset = sdhci_pci_int_hw_reset;
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
+ slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
return 0;
}
@@ -280,22 +283,35 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
+static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->cd_con_id = NULL;
+ slot->cd_idx = 0;
+ slot->cd_override_level = true;
+ return 0;
+}
+
static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
.allow_runtime_pm = true,
.probe_slot = byt_emmc_probe_slot,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_STOP_WITH_TC,
};
static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
- .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+ .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.allow_runtime_pm = true,
.probe_slot = byt_sdio_probe_slot,
};
static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
- .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON,
+ .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_STOP_WITH_TC,
.allow_runtime_pm = true,
.own_cd_for_runtime_pm = true,
+ .probe_slot = byt_sd_probe_slot,
};
/* Define Host controllers for Intel Merrifield platform */
@@ -317,7 +333,9 @@ static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot)
static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
- .quirks2 = SDHCI_QUIRK2_BROKEN_HS200,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .allow_runtime_pm = true,
.probe_slot = intel_mrfl_mmc_probe_slot,
};
@@ -876,6 +894,29 @@ static const struct pci_device_id pci_ids[] = {
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
},
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BSW_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BSW_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BSW_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
{
.vendor = PCI_VENDOR_ID_INTEL,
@@ -1269,20 +1310,13 @@ static int sdhci_pci_runtime_idle(struct device *dev)
return 0;
}
-#else
-
-#define sdhci_pci_runtime_suspend NULL
-#define sdhci_pci_runtime_resume NULL
-#define sdhci_pci_runtime_idle NULL
-
#endif
static const struct dev_pm_ops sdhci_pci_pm_ops = {
.suspend = sdhci_pci_suspend,
.resume = sdhci_pci_resume,
- .runtime_suspend = sdhci_pci_runtime_suspend,
- .runtime_resume = sdhci_pci_runtime_resume,
- .runtime_idle = sdhci_pci_runtime_idle,
+ SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend,
+ sdhci_pci_runtime_resume, sdhci_pci_runtime_idle)
};
/*****************************************************************************\
@@ -1332,6 +1366,7 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
slot->pci_bar = bar;
slot->rst_n_gpio = -EINVAL;
slot->cd_gpio = -EINVAL;
+ slot->cd_idx = -1;
/* Retrieve platform data if there is any */
if (*sdhci_pci_get_data)
@@ -1390,6 +1425,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
host->mmc->slotno = slotno;
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
+ if (slot->cd_idx >= 0 &&
+ mmc_gpiod_request_cd(host->mmc, slot->cd_con_id, slot->cd_idx,
+ slot->cd_override_level, 0, NULL)) {
+ dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
+ slot->cd_idx = -1;
+ }
+
ret = sdhci_add_host(host);
if (ret)
goto remove;
@@ -1402,7 +1444,7 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
* Note sdhci_pci_add_own_cd() sets slot->cd_gpio to -EINVAL on failure.
*/
if (chip->fixes && chip->fixes->own_cd_for_runtime_pm &&
- !gpio_is_valid(slot->cd_gpio))
+ !gpio_is_valid(slot->cd_gpio) && slot->cd_idx < 0)
chip->allow_runtime_pm = false;
return slot;
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index c101477ef3be..d57c3d169914 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -11,6 +11,9 @@
#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15
#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16
#define PCI_DEVICE_ID_INTEL_BYT_EMMC2 0x0f50
+#define PCI_DEVICE_ID_INTEL_BSW_EMMC 0x2294
+#define PCI_DEVICE_ID_INTEL_BSW_SDIO 0x2295
+#define PCI_DEVICE_ID_INTEL_BSW_SD 0x2296
#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9
#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa
@@ -61,6 +64,10 @@ struct sdhci_pci_slot {
int cd_gpio;
int cd_irq;
+ char *cd_con_id;
+ int cd_idx;
+ bool cd_override_level;
+
void (*hw_reset)(struct sdhci_host *host);
};
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 7e834fb78f42..c5b01d6bb85d 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -123,7 +123,6 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
size_t priv_size)
{
struct sdhci_host *host;
- struct device_node *np = pdev->dev.of_node;
struct resource *iomem;
int ret;
@@ -136,13 +135,8 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
if (resource_size(iomem) < 0x100)
dev_err(&pdev->dev, "Invalid iomem size!\n");
- /* Some PCI-based MFD need the parent here */
- if (pdev->dev.parent != &platform_bus && !np)
- host = sdhci_alloc_host(pdev->dev.parent,
- sizeof(struct sdhci_pltfm_host) + priv_size);
- else
- host = sdhci_alloc_host(&pdev->dev,
- sizeof(struct sdhci_pltfm_host) + priv_size);
+ host = sdhci_alloc_host(&pdev->dev,
+ sizeof(struct sdhci_pltfm_host) + priv_size);
if (IS_ERR(host)) {
ret = PTR_ERR(host);
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index 3c0f3c0a1cc8..b4c23e983baf 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -261,7 +261,6 @@ static int sdhci_pxav2_remove(struct platform_device *pdev)
static struct platform_driver sdhci_pxav2_driver = {
.driver = {
.name = "sdhci-pxav2",
- .owner = THIS_MODULE,
#ifdef CONFIG_OF
.of_match_table = sdhci_pxav2_of_match,
#endif
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 6f842fb8e6b8..5036d7d39529 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -224,12 +224,11 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
static const struct sdhci_ops pxav3_sdhci_ops = {
.set_clock = sdhci_set_clock,
- .set_uhs_signaling = pxav3_set_uhs_signaling,
.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = pxav3_reset,
- .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_uhs_signaling = pxav3_set_uhs_signaling,
};
static struct sdhci_pltfm_data sdhci_pxav3_pdata = {
@@ -381,11 +380,11 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
return 0;
-err_of_parse:
-err_cd_req:
err_add_host:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+err_of_parse:
+err_cd_req:
clk_disable_unprepare(clk);
err_clk_get:
err_mbus_win:
@@ -492,7 +491,6 @@ static struct platform_driver sdhci_pxav3_driver = {
#ifdef CONFIG_OF
.of_match_table = sdhci_pxav3_of_match,
#endif
- .owner = THIS_MODULE,
.pm = SDHCI_PXAV3_PMOPS,
},
.probe = sdhci_pxav3_probe,
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index fa5954a05449..0ce6eb17deaf 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -606,8 +606,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
ret = sdhci_add_host(host);
if (ret) {
dev_err(dev, "sdhci_add_host() failed\n");
- pm_runtime_forbid(&pdev->dev);
- pm_runtime_get_noresume(&pdev->dev);
goto err_req_regs;
}
@@ -618,6 +616,8 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
return 0;
err_req_regs:
+ pm_runtime_disable(&pdev->dev);
+
err_no_busclks:
clk_disable_unprepare(sc->clk_io);
@@ -747,7 +747,6 @@ static struct platform_driver sdhci_s3c_driver = {
.remove = sdhci_s3c_remove,
.id_table = sdhci_s3c_driver_ids,
.driver = {
- .owner = THIS_MODULE,
.name = "s3c-sdhci",
.of_match_table = of_match_ptr(sdhci_s3c_dt_match),
.pm = SDHCI_S3C_PMOPS,
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index 17004531d089..dd29d47c07aa 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -15,6 +15,8 @@
#include <linux/mmc/slot-gpio.h>
#include "sdhci-pltfm.h"
+#define SDHCI_SIRF_8BITBUS BIT(3)
+
struct sdhci_sirf_priv {
struct clk *clk;
int gpio_cd;
@@ -27,10 +29,30 @@ static unsigned int sdhci_sirf_get_max_clk(struct sdhci_host *host)
return clk_get_rate(priv->clk);
}
+static void sdhci_sirf_set_bus_width(struct sdhci_host *host, int width)
+{
+ u8 ctrl;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl &= ~(SDHCI_CTRL_4BITBUS | SDHCI_SIRF_8BITBUS);
+
+ /*
+ * CSR atlas7 and prima2 SD host version is not 3.0
+ * 8bit-width enable bit of CSR SD hosts is 3,
+ * while stardard hosts use bit 5
+ */
+ if (width == MMC_BUS_WIDTH_8)
+ ctrl |= SDHCI_SIRF_8BITBUS;
+ else if (width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+
static struct sdhci_ops sdhci_sirf_ops = {
.set_clock = sdhci_set_clock,
.get_max_clock = sdhci_sirf_get_max_clk,
- .set_bus_width = sdhci_set_bus_width,
+ .set_bus_width = sdhci_sirf_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
@@ -94,6 +116,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev)
ret);
goto err_request_cd;
}
+ mmc_gpiod_request_cd_irq(host->mmc);
}
return 0;
@@ -167,7 +190,6 @@ MODULE_DEVICE_TABLE(of, sdhci_sirf_of_match);
static struct platform_driver sdhci_sirf_driver = {
.driver = {
.name = "sdhci-sirf",
- .owner = THIS_MODULE,
.of_match_table = sdhci_sirf_of_match,
#ifdef CONFIG_PM_SLEEP
.pm = &sdhci_sirf_pm_ops,
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 9d535c7336ef..22e58268545f 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -230,7 +230,6 @@ MODULE_DEVICE_TABLE(of, sdhci_spear_id_table);
static struct platform_driver sdhci_driver = {
.driver = {
.name = "sdhci",
- .owner = THIS_MODULE,
.pm = &sdhci_pm_ops,
.of_match_table = of_match_ptr(sdhci_spear_id_table),
},
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 33100d10d176..59797106af93 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -318,7 +318,6 @@ static int sdhci_tegra_remove(struct platform_device *pdev)
static struct platform_driver sdhci_tegra_driver = {
.driver = {
.name = "sdhci-tegra",
- .owner = THIS_MODULE,
.of_match_table = sdhci_tegra_dt_match,
.pm = SDHCI_PLTFM_PMOPS,
},
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 37b2a9ae52ef..ada1a3ea3a87 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -707,19 +707,28 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
-static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
u8 count;
+
+ if (host->ops->set_timeout) {
+ host->ops->set_timeout(host, cmd);
+ } else {
+ count = sdhci_calc_timeout(host, cmd);
+ sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
+ }
+}
+
+static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+{
u8 ctrl;
struct mmc_data *data = cmd->data;
int ret;
WARN_ON(host->data);
- if (data || (cmd->flags & MMC_RSP_BUSY)) {
- count = sdhci_calc_timeout(host, cmd);
- sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
- }
+ if (data || (cmd->flags & MMC_RSP_BUSY))
+ sdhci_set_timeout(host, cmd);
if (!data)
return;
@@ -1007,6 +1016,7 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
mod_timer(&host->timer, timeout);
host->cmd = cmd;
+ host->busy_handle = 0;
sdhci_prepare_data(host, cmd);
@@ -1194,7 +1204,6 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
clock_set:
if (real_div)
host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
-
clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
<< SDHCI_DIVIDER_HI_SHIFT;
@@ -1357,11 +1366,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
/*
* Check if the re-tuning timer has already expired and there
- * is no on-going data transfer. If so, we need to execute
- * tuning procedure before sending command.
+ * is no on-going data transfer and DAT0 is not busy. If so,
+ * we need to execute tuning procedure before sending command.
*/
if ((host->flags & SDHCI_NEEDS_RETUNING) &&
- !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
+ !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) &&
+ (present_state & SDHCI_DATA_0_LVL_MASK)) {
if (mmc->card) {
/* eMMC uses cmd21 but sd and sdio use cmd19 */
tuning_opcode =
@@ -1471,6 +1481,18 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
if (!ios->clock || ios->clock != host->clock) {
host->ops->set_clock(host, ios->clock);
host->clock = ios->clock;
+
+ if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
+ host->clock) {
+ host->timeout_clk = host->mmc->actual_clock ?
+ host->mmc->actual_clock / 1000 :
+ host->clock / 1000;
+ host->mmc->max_busy_timeout =
+ host->ops->get_max_timeout_count ?
+ host->ops->get_max_timeout_count(host) :
+ 1 << 27;
+ host->mmc->max_busy_timeout /= host->timeout_clk;
+ }
}
sdhci_set_power(host, ios->power_mode, ios->vdd);
@@ -1733,8 +1755,8 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
3600000);
if (ret) {
- pr_warning("%s: Switching to 3.3V signalling voltage "
- " failed\n", mmc_hostname(mmc));
+ pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
+ mmc_hostname(mmc));
return -EIO;
}
}
@@ -1746,8 +1768,8 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
if (!(ctrl & SDHCI_CTRL_VDD_180))
return 0;
- pr_warning("%s: 3.3V regulator output did not became stable\n",
- mmc_hostname(mmc));
+ pr_warn("%s: 3.3V regulator output did not became stable\n",
+ mmc_hostname(mmc));
return -EAGAIN;
case MMC_SIGNAL_VOLTAGE_180:
@@ -1755,8 +1777,8 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
ret = regulator_set_voltage(mmc->supply.vqmmc,
1700000, 1950000);
if (ret) {
- pr_warning("%s: Switching to 1.8V signalling voltage "
- " failed\n", mmc_hostname(mmc));
+ pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
+ mmc_hostname(mmc));
return -EIO;
}
}
@@ -1773,8 +1795,8 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
if (ctrl & SDHCI_CTRL_VDD_180)
return 0;
- pr_warning("%s: 1.8V regulator output did not became stable\n",
- mmc_hostname(mmc));
+ pr_warn("%s: 1.8V regulator output did not became stable\n",
+ mmc_hostname(mmc));
return -EAGAIN;
case MMC_SIGNAL_VOLTAGE_120:
@@ -1782,8 +1804,8 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000,
1300000);
if (ret) {
- pr_warning("%s: Switching to 1.2V signalling voltage "
- " failed\n", mmc_hostname(mmc));
+ pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
+ mmc_hostname(mmc));
return -EIO;
}
}
@@ -2203,7 +2225,7 @@ static void sdhci_tuning_timer(unsigned long data)
* *
\*****************************************************************************/
-static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
+static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
{
BUG_ON(intmask == 0);
@@ -2241,11 +2263,18 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
if (host->cmd->data)
DBG("Cannot wait for busy signal when also "
"doing a data transfer");
- else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
+ else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)
+ && !host->busy_handle) {
+ /* Mark that command complete before busy is ended */
+ host->busy_handle = 1;
return;
+ }
/* The controller does not support the end-of-busy IRQ,
* fall through and take the SDHCI_INT_RESPONSE */
+ } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
+ host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) {
+ *mask &= ~SDHCI_INT_DATA_END;
}
if (intmask & SDHCI_INT_RESPONSE)
@@ -2304,8 +2333,21 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
* above in sdhci_cmd_irq().
*/
if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
+ if (intmask & SDHCI_INT_DATA_TIMEOUT) {
+ host->cmd->error = -ETIMEDOUT;
+ tasklet_schedule(&host->finish_tasklet);
+ return;
+ }
if (intmask & SDHCI_INT_DATA_END) {
- sdhci_finish_command(host);
+ /*
+ * Some cards handle busy-end interrupt
+ * before the command completed, so make
+ * sure we do things in the proper order.
+ */
+ if (host->busy_handle)
+ sdhci_finish_command(host);
+ else
+ host->busy_handle = 1;
return;
}
}
@@ -2442,7 +2484,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
}
if (intmask & SDHCI_INT_CMD_MASK)
- sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
+ sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
+ &intmask);
if (intmask & SDHCI_INT_DATA_MASK)
sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
@@ -2534,7 +2577,7 @@ void sdhci_enable_irq_wakeups(struct sdhci_host *host)
}
EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
-void sdhci_disable_irq_wakeups(struct sdhci_host *host)
+static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
{
u8 val;
u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
@@ -2544,7 +2587,6 @@ void sdhci_disable_irq_wakeups(struct sdhci_host *host)
val &= ~mask;
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
}
-EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
int sdhci_suspend_host(struct sdhci_host *host)
{
@@ -2749,6 +2791,7 @@ int sdhci_add_host(struct sdhci_host *host)
u32 caps[2] = {0, 0};
u32 max_current_caps;
unsigned int ocr_avail;
+ unsigned int override_timeout_clk;
int ret;
WARN_ON(host == NULL);
@@ -2762,6 +2805,8 @@ int sdhci_add_host(struct sdhci_host *host)
if (debug_quirks2)
host->quirks2 = debug_quirks2;
+ override_timeout_clk = host->timeout_clk;
+
sdhci_do_reset(host, SDHCI_RESET_ALL);
host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
@@ -2807,8 +2852,7 @@ int sdhci_add_host(struct sdhci_host *host)
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma) {
if (host->ops->enable_dma(host)) {
- pr_warning("%s: No suitable DMA "
- "available. Falling back to PIO.\n",
+ pr_warn("%s: No suitable DMA available - falling back to PIO\n",
mmc_hostname(mmc));
host->flags &=
~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
@@ -2830,15 +2874,14 @@ int sdhci_add_host(struct sdhci_host *host)
dma_free_coherent(mmc_dev(mmc), ADMA_SIZE,
host->adma_desc, host->adma_addr);
kfree(host->align_buffer);
- pr_warning("%s: Unable to allocate ADMA "
- "buffers. Falling back to standard DMA.\n",
+ pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
mmc_hostname(mmc));
host->flags &= ~SDHCI_USE_ADMA;
host->adma_desc = NULL;
host->align_buffer = NULL;
} else if (host->adma_addr & 3) {
- pr_warning("%s: unable to allocate aligned ADMA descriptor\n",
- mmc_hostname(mmc));
+ pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
+ mmc_hostname(mmc));
host->flags &= ~SDHCI_USE_ADMA;
dma_free_coherent(mmc_dev(mmc), ADMA_SIZE,
host->adma_desc, host->adma_addr);
@@ -2908,25 +2951,30 @@ int sdhci_add_host(struct sdhci_host *host)
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
- host->timeout_clk =
- (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
- if (host->timeout_clk == 0) {
- if (host->ops->get_timeout_clock) {
- host->timeout_clk = host->ops->get_timeout_clock(host);
- } else if (!(host->quirks &
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
- pr_err("%s: Hardware doesn't specify timeout clock "
- "frequency.\n", mmc_hostname(mmc));
- return -ENODEV;
+ if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
+ host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
+ SDHCI_TIMEOUT_CLK_SHIFT;
+ if (host->timeout_clk == 0) {
+ if (host->ops->get_timeout_clock) {
+ host->timeout_clk =
+ host->ops->get_timeout_clock(host);
+ } else {
+ pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
+ mmc_hostname(mmc));
+ return -ENODEV;
+ }
}
- }
- if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
- host->timeout_clk *= 1000;
- if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
- host->timeout_clk = mmc->f_max / 1000;
+ if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
+ host->timeout_clk *= 1000;
- mmc->max_busy_timeout = (1 << 27) / host->timeout_clk;
+ mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
+ host->ops->get_max_timeout_count(host) : 1 << 27;
+ mmc->max_busy_timeout /= host->timeout_clk;
+ }
+
+ if (override_timeout_clk)
+ host->timeout_clk = override_timeout_clk;
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
@@ -2998,8 +3046,13 @@ int sdhci_add_host(struct sdhci_host *host)
/* SD3.0: SDR104 is supported so (for eMMC) the caps2
* field can be promoted to support HS200.
*/
- if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
+ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) {
mmc->caps2 |= MMC_CAP2_HS200;
+ if (IS_ERR(mmc->supply.vqmmc) ||
+ !regulator_is_supported_voltage
+ (mmc->supply.vqmmc, 1100000, 1300000))
+ mmc->caps2 &= ~MMC_CAP2_HS200_1_2V_SDR;
+ }
} else if (caps[1] & SDHCI_SUPPORT_SDR50)
mmc->caps |= MMC_CAP_UHS_SDR50;
@@ -3049,7 +3102,7 @@ int sdhci_add_host(struct sdhci_host *host)
*/
max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
- u32 curr = regulator_get_current_limit(mmc->supply.vmmc);
+ int curr = regulator_get_current_limit(mmc->supply.vmmc);
if (curr > 0) {
/* convert to SDHCI_MAX_CURRENT format */
@@ -3158,8 +3211,8 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
SDHCI_MAX_BLOCK_SHIFT;
if (mmc->max_blk_size >= 3) {
- pr_warning("%s: Invalid maximum block size, "
- "assuming 512 bytes\n", mmc_hostname(mmc));
+ pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
+ mmc_hostname(mmc));
mmc->max_blk_size = 0;
}
}
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 4a5cd5e3fa3e..31896a779d4e 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -72,6 +72,7 @@
#define SDHCI_WRITE_PROTECT 0x00080000
#define SDHCI_DATA_LVL_MASK 0x00F00000
#define SDHCI_DATA_LVL_SHIFT 20
+#define SDHCI_DATA_0_LVL_MASK 0x00100000
#define SDHCI_HOST_CONTROL 0x28
#define SDHCI_CTRL_LED 0x01
@@ -281,6 +282,9 @@ struct sdhci_ops {
unsigned int (*get_max_clock)(struct sdhci_host *host);
unsigned int (*get_min_clock)(struct sdhci_host *host);
unsigned int (*get_timeout_clock)(struct sdhci_host *host);
+ unsigned int (*get_max_timeout_count)(struct sdhci_host *host);
+ void (*set_timeout)(struct sdhci_host *host,
+ struct mmc_command *cmd);
void (*set_bus_width)(struct sdhci_host *host, int width);
void (*platform_send_init_74_clocks)(struct sdhci_host *host,
u8 power_mode);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index d11708c815d7..7d9d6a321521 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1553,7 +1553,6 @@ static struct platform_driver sh_mmcif_driver = {
.driver = {
.name = DRIVER_NAME,
.pm = &sh_mmcif_dev_pm_ops,
- .owner = THIS_MODULE,
.of_match_table = mmcif_of_match,
},
};
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 91058dabd11a..a2e81a1ea6af 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -39,6 +39,7 @@ struct sh_mobile_sdhi_of_data {
unsigned long tmio_flags;
unsigned long capabilities;
unsigned long capabilities2;
+ dma_addr_t dma_rx_offset;
};
static const struct sh_mobile_sdhi_of_data sh_mobile_sdhi_of_cfg[] = {
@@ -48,14 +49,16 @@ static const struct sh_mobile_sdhi_of_data sh_mobile_sdhi_of_cfg[] = {
};
static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
- .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE,
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
+ TMIO_MMC_CLK_ACTUAL,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
};
static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
- .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE,
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
+ TMIO_MMC_CLK_ACTUAL,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
- .capabilities2 = MMC_CAP2_NO_MULTI_READ,
+ .dma_rx_offset = 0x2000,
};
static const struct of_device_id sh_mobile_sdhi_of_match[] = {
@@ -68,6 +71,9 @@ static const struct of_device_id sh_mobile_sdhi_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
{ .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, },
+ { .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, },
+ { .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
+ { .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
{},
};
MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
@@ -132,6 +138,24 @@ static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
return 0;
}
+static int sh_mobile_sdhi_multi_io_quirk(struct mmc_card *card,
+ unsigned int direction, int blk_size)
+{
+ /*
+ * In Renesas controllers, when performing a
+ * multiple block read of one or two blocks,
+ * depending on the timing with which the
+ * response register is read, the response
+ * value may not be read properly.
+ * Use single block read for this HW bug
+ */
+ if ((direction == MMC_DATA_READ) &&
+ blk_size == 2)
+ return 1;
+
+ return blk_size;
+}
+
static void sh_mobile_sdhi_cd_wakeup(const struct platform_device *pdev)
{
mmc_detect_change(platform_get_drvdata(pdev), msecs_to_jiffies(100));
@@ -187,6 +211,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->clk_disable = sh_mobile_sdhi_clk_disable;
mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
+ mmc_data->multi_io_quirk = sh_mobile_sdhi_multi_io_quirk;
if (p) {
mmc_data->flags = p->tmio_flags;
mmc_data->ocr_mask = p->tmio_ocr_mask;
@@ -223,11 +248,27 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
*/
mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
+ /*
+ * All SDHI have CMD12 controll bit
+ */
+ mmc_data->flags |= TMIO_MMC_HAVE_CMD12_CTRL;
+
+ /*
+ * All SDHI need SDIO_INFO1 reserved bit
+ */
+ mmc_data->flags |= TMIO_MMC_SDIO_STATUS_QUIRK;
+
+ /*
+ * All SDHI have DMA control register
+ */
+ mmc_data->flags |= TMIO_MMC_HAVE_CTL_DMA_REG;
+
if (of_id && of_id->data) {
const struct sh_mobile_sdhi_of_data *of_data = of_id->data;
mmc_data->flags |= of_data->tmio_flags;
mmc_data->capabilities |= of_data->capabilities;
mmc_data->capabilities2 |= of_data->capabilities2;
+ dma_priv->dma_rx_offset = of_data->dma_rx_offset;
}
/* SD control register space size is 0x100, 0x200 for bus_shift=1 */
@@ -332,8 +373,9 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_host_suspend, tmio_mmc_host_resume)
- SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_PM_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
tmio_mmc_host_runtime_resume,
NULL)
};
@@ -341,7 +383,6 @@ static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
static struct platform_driver sh_mobile_sdhi_driver = {
.driver = {
.name = "sh_mobile_sdhi",
- .owner = THIS_MODULE,
.pm = &tmio_mmc_dev_pm_ops,
.of_match_table = sh_mobile_sdhi_of_match,
},
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 024f67c98cdc..d1663b3c4143 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -990,7 +990,8 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
/* 400kHz ~ 50MHz */
mmc->f_min = 400000;
mmc->f_max = 50000000;
- mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_ERASE;
ret = mmc_of_parse(mmc);
if (ret)
@@ -1035,7 +1036,6 @@ static int sunxi_mmc_remove(struct platform_device *pdev)
static struct platform_driver sunxi_mmc_driver = {
.driver = {
.name = "sunxi-mmc",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(sunxi_mmc_of_match),
},
.probe = sunxi_mmc_probe,
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index d1760ebcac03..93c4b40df90a 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -952,8 +952,8 @@ static int tifm_sd_probe(struct tifm_dev *sock)
if (!(TIFM_SOCK_STATE_OCCUPIED
& readl(sock->addr + SOCK_PRESENT_STATE))) {
- pr_warning("%s : card gone, unexpectedly\n",
- dev_name(&sock->dev));
+ pr_warn("%s : card gone, unexpectedly\n",
+ dev_name(&sock->dev));
return rc;
}
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index cfad844730d8..659028ddb8b1 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -30,7 +30,7 @@ static int tmio_mmc_suspend(struct device *dev)
const struct mfd_cell *cell = mfd_get_cell(pdev);
int ret;
- ret = tmio_mmc_host_suspend(dev);
+ ret = pm_runtime_force_suspend(dev);
/* Tell MFD core it can disable us now.*/
if (!ret && cell->disable)
@@ -50,7 +50,7 @@ static int tmio_mmc_resume(struct device *dev)
ret = cell->resume(pdev);
if (!ret)
- ret = tmio_mmc_host_resume(dev);
+ ret = pm_runtime_force_resume(dev);
return ret;
}
@@ -135,6 +135,9 @@ static int tmio_mmc_remove(struct platform_device *pdev)
static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_suspend, tmio_mmc_resume)
+ SET_PM_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
+ tmio_mmc_host_runtime_resume,
+ NULL)
};
static struct platform_driver tmio_mmc_driver = {
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 100ffe0b2faf..a34ecbe1c1ad 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -40,22 +40,6 @@
struct tmio_mmc_data;
-/*
- * We differentiate between the following 3 power states:
- * 1. card slot powered off, controller stopped. This is used, when either there
- * is no card in the slot, or the card really has to be powered down.
- * 2. card slot powered on, controller stopped. This is used, when a card is in
- * the slot, but no activity is currently taking place. This is a power-
- * saving mode with card-state preserved. This state can be entered, e.g.
- * when MMC clock-gating is used.
- * 3. card slot powered on, controller running. This is the actual active state.
- */
-enum tmio_mmc_power {
- TMIO_MMC_OFF_STOP, /* card power off, controller stopped */
- TMIO_MMC_ON_STOP, /* card power on, controller stopped */
- TMIO_MMC_ON_RUN, /* card power on, controller running */
-};
-
struct tmio_mmc_host {
void __iomem *ctl;
struct mmc_command *cmd;
@@ -63,9 +47,6 @@ struct tmio_mmc_host {
struct mmc_data *data;
struct mmc_host *mmc;
- /* Controller and card power state */
- enum tmio_mmc_power power;
-
/* Callbacks for clock / power control */
void (*set_pwr)(struct platform_device *host, int state);
void (*set_clk_div)(struct platform_device *host, int state);
@@ -92,15 +73,16 @@ struct tmio_mmc_host {
struct delayed_work delayed_reset_work;
struct work_struct done;
- /* Cache IRQ mask */
+ /* Cache */
u32 sdcard_irq_mask;
u32 sdio_irq_mask;
+ unsigned int clk_cache;
spinlock_t lock; /* protect host private data */
unsigned long last_req_ts;
struct mutex ios_lock; /* protect set_ios() context */
bool native_hotplug;
- bool resuming;
+ bool sdio_irq_enabled;
};
int tmio_mmc_host_probe(struct tmio_mmc_host **host,
@@ -162,12 +144,7 @@ static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
}
#endif
-#ifdef CONFIG_PM_SLEEP
-int tmio_mmc_host_suspend(struct device *dev);
-int tmio_mmc_host_resume(struct device *dev);
-#endif
-
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
int tmio_mmc_host_runtime_suspend(struct device *dev);
int tmio_mmc_host_runtime_resume(struct device *dev);
#endif
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index eb8f1d5c34b1..7d077388b9eb 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -28,10 +28,8 @@ void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
if (!host->chan_tx || !host->chan_rx)
return;
-#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
- /* Switch DMA mode on or off - SuperH specific? */
- sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0);
-#endif
+ if (host->pdata->flags & TMIO_MMC_HAVE_CTL_DMA_REG)
+ sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0);
}
void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
@@ -312,7 +310,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
if (pdata->dma->chan_priv_rx)
cfg.slave_id = pdata->dma->slave_id_rx;
cfg.direction = DMA_DEV_TO_MEM;
- cfg.src_addr = cfg.dst_addr;
+ cfg.src_addr = cfg.dst_addr + pdata->dma->dma_rx_offset;
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.dst_addr = 0;
ret = dmaengine_slave_config(host->chan_rx, &cfg);
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index faf0924e71cb..250bf8c9f998 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -44,6 +44,7 @@
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/mmc/sdio.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -129,19 +130,28 @@ static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
- if (enable) {
+ if (enable && !host->sdio_irq_enabled) {
+ /* Keep device active while SDIO irq is enabled */
+ pm_runtime_get_sync(mmc_dev(mmc));
+ host->sdio_irq_enabled = true;
+
host->sdio_irq_mask = TMIO_SDIO_MASK_ALL &
~TMIO_SDIO_STAT_IOIRQ;
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
- } else {
+ } else if (!enable && host->sdio_irq_enabled) {
host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
+
+ host->sdio_irq_enabled = false;
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
}
}
-static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
+static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
+ unsigned int new_clock)
{
u32 clk = 0, clock;
@@ -149,7 +159,11 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
for (clock = host->mmc->f_min, clk = 0x80000080;
new_clock >= (clock<<1); clk >>= 1)
clock <<= 1;
- clk |= 0x100;
+
+ /* 1/1 clock is option */
+ if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) &&
+ ((clk >> 22) & 0x1))
+ clk |= 0xff;
}
if (host->set_clk_div)
@@ -245,6 +259,9 @@ static void tmio_mmc_reset_work(struct work_struct *work)
tmio_mmc_abort_dma(host);
mmc_request_done(host->mmc, mrq);
+
+ pm_runtime_mark_last_busy(mmc_dev(host->mmc));
+ pm_runtime_put_autosuspend(mmc_dev(host->mmc));
}
/* called with host->lock held, interrupts disabled */
@@ -274,6 +291,9 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
tmio_mmc_abort_dma(host);
mmc_request_done(host->mmc, mrq);
+
+ pm_runtime_mark_last_busy(mmc_dev(host->mmc));
+ pm_runtime_put_autosuspend(mmc_dev(host->mmc));
}
static void tmio_mmc_done_work(struct work_struct *work)
@@ -295,6 +315,7 @@ static void tmio_mmc_done_work(struct work_struct *work)
#define TRANSFER_READ 0x1000
#define TRANSFER_MULTI 0x2000
#define SECURITY_CMD 0x4000
+#define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
{
@@ -331,6 +352,14 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command
if (data->blocks > 1) {
sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
c |= TRANSFER_MULTI;
+
+ /*
+ * Disable auto CMD12 at IO_RW_EXTENDED when
+ * multiple block transfer
+ */
+ if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
+ (cmd->opcode == SD_IO_RW_EXTENDED))
+ c |= NO_CMD12_ISSUE;
}
if (data->flags & MMC_DATA_READ)
c |= TRANSFER_READ;
@@ -347,6 +376,40 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command
return 0;
}
+static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
+ unsigned short *buf,
+ unsigned int count)
+{
+ int is_read = host->data->flags & MMC_DATA_READ;
+ u8 *buf8;
+
+ /*
+ * Transfer the data
+ */
+ if (is_read)
+ sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
+ else
+ sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
+
+ /* if count was even number */
+ if (!(count & 0x1))
+ return;
+
+ /* if count was odd number */
+ buf8 = (u8 *)(buf + (count >> 1));
+
+ /*
+ * FIXME
+ *
+ * driver and this function are assuming that
+ * it is used as little endian
+ */
+ if (is_read)
+ *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff;
+ else
+ sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8);
+}
+
/*
* This chip always returns (at least?) as much data as you ask for.
* I'm unsure what happens if you ask for less than a block. This should be
@@ -379,10 +442,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
count, host->sg_off, data->flags);
/* Transfer the data */
- if (data->flags & MMC_DATA_READ)
- sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
- else
- sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
+ tmio_mmc_transfer_data(host, buf, count);
host->sg_off += count;
@@ -465,6 +525,9 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
goto out;
if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
+ u32 status = sd_ctrl_read32(host, CTL_STATUS);
+ bool done = false;
+
/*
* Has all data been written out yet? Testing on SuperH showed,
* that in most cases the first interrupt comes already with the
@@ -473,7 +536,15 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
* DATAEND interrupt with the BUSY bit set, in this cases
* waiting for one more interrupt fixes the problem.
*/
- if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
+ if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
+ if (status & TMIO_STAT_ILL_FUNC)
+ done = true;
+ } else {
+ if (!(status & TMIO_STAT_CMD_BUSY))
+ done = true;
+ }
+
+ if (done) {
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
tasklet_schedule(&host->dma_complete);
}
@@ -557,6 +628,9 @@ static void tmio_mmc_card_irq_status(struct tmio_mmc_host *host,
pr_debug_status(*status);
pr_debug_status(*ireg);
+
+ /* Clear the status except the interrupt status */
+ sd_ctrl_write32(host, CTL_STATUS, TMIO_MASK_IRQ);
}
static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
@@ -637,6 +711,7 @@ irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
struct mmc_host *mmc = host->mmc;
struct tmio_mmc_data *pdata = host->pdata;
unsigned int ireg, status;
+ unsigned int sdio_status;
if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
return IRQ_HANDLED;
@@ -644,7 +719,11 @@ irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
- sd_ctrl_write16(host, CTL_SDIO_STATUS, status & ~TMIO_SDIO_MASK_ALL);
+ sdio_status = status & ~TMIO_SDIO_MASK_ALL;
+ if (pdata->flags & TMIO_MMC_SDIO_STATUS_QUIRK)
+ sdio_status |= 6;
+
+ sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
mmc_signal_sdio_irq(mmc);
@@ -728,6 +807,8 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, flags);
+ pm_runtime_get_sync(mmc_dev(mmc));
+
if (mrq->data) {
ret = tmio_mmc_start_data(host, mrq->data);
if (ret)
@@ -746,11 +827,14 @@ fail:
host->mrq = NULL;
mrq->cmd->error = ret;
mmc_request_done(mmc, mrq);
+
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
}
-static int tmio_mmc_clk_update(struct mmc_host *mmc)
+static int tmio_mmc_clk_update(struct tmio_mmc_host *host)
{
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct mmc_host *mmc = host->mmc;
struct tmio_mmc_data *pdata = host->pdata;
int ret;
@@ -812,6 +896,19 @@ static void tmio_mmc_power_off(struct tmio_mmc_host *host)
host->set_pwr(host->pdev, 0);
}
+static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
+ unsigned char bus_width)
+{
+ switch (bus_width) {
+ case MMC_BUS_WIDTH_1:
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
+ break;
+ case MMC_BUS_WIDTH_4:
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
+ break;
+ }
+}
+
/* Set MMC clock / power.
* Note: This controller uses a simple divider scheme therefore it cannot
* run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
@@ -824,6 +921,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct device *dev = &host->pdev->dev;
unsigned long flags;
+ pm_runtime_get_sync(mmc_dev(mmc));
+
mutex_lock(&host->ios_lock);
spin_lock_irqsave(&host->lock, flags);
@@ -850,60 +949,22 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_unlock_irqrestore(&host->lock, flags);
- /*
- * host->power toggles between false and true in both cases - either
- * or not the controller can be runtime-suspended during inactivity.
- * But if the controller has to be kept on, the runtime-pm usage_count
- * is kept positive, so no suspending actually takes place.
- */
- if (ios->power_mode == MMC_POWER_ON && ios->clock) {
- if (host->power != TMIO_MMC_ON_RUN) {
- tmio_mmc_clk_update(mmc);
- pm_runtime_get_sync(dev);
- if (host->resuming) {
- tmio_mmc_reset(host);
- host->resuming = false;
- }
- }
- if (host->power == TMIO_MMC_OFF_STOP)
- tmio_mmc_reset(host);
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ tmio_mmc_power_off(host);
+ tmio_mmc_clk_stop(host);
+ break;
+ case MMC_POWER_UP:
tmio_mmc_set_clock(host, ios->clock);
- if (host->power == TMIO_MMC_OFF_STOP)
- /* power up SD card and the bus */
- tmio_mmc_power_on(host, ios->vdd);
- host->power = TMIO_MMC_ON_RUN;
- /* start bus clock */
+ tmio_mmc_power_on(host, ios->vdd);
tmio_mmc_clk_start(host);
- } else if (ios->power_mode != MMC_POWER_UP) {
- struct tmio_mmc_data *pdata = host->pdata;
- unsigned int old_power = host->power;
-
- if (old_power != TMIO_MMC_OFF_STOP) {
- if (ios->power_mode == MMC_POWER_OFF) {
- tmio_mmc_power_off(host);
- host->power = TMIO_MMC_OFF_STOP;
- } else {
- host->power = TMIO_MMC_ON_STOP;
- }
- }
-
- if (old_power == TMIO_MMC_ON_RUN) {
- tmio_mmc_clk_stop(host);
- pm_runtime_put(dev);
- if (pdata->clk_disable)
- pdata->clk_disable(host->pdev);
- }
- }
-
- if (host->power != TMIO_MMC_OFF_STOP) {
- switch (ios->bus_width) {
- case MMC_BUS_WIDTH_1:
- sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
+ tmio_mmc_set_bus_width(host, ios->bus_width);
break;
- case MMC_BUS_WIDTH_4:
- sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
+ case MMC_POWER_ON:
+ tmio_mmc_set_clock(host, ios->clock);
+ tmio_mmc_clk_start(host);
+ tmio_mmc_set_bus_width(host, ios->bus_width);
break;
- }
}
/* Let things settle. delay taken from winCE driver */
@@ -915,7 +976,12 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
ios->clock, ios->power_mode);
host->mrq = NULL;
+ host->clk_cache = ios->clock;
+
mutex_unlock(&host->ios_lock);
+
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
}
static int tmio_mmc_get_ro(struct mmc_host *mmc)
@@ -926,8 +992,25 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
if (ret >= 0)
return ret;
- return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
- (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
+ pm_runtime_get_sync(mmc_dev(mmc));
+ ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
+ (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
+
+ return ret;
+}
+
+static int tmio_multi_io_quirk(struct mmc_card *card,
+ unsigned int direction, int blk_size)
+{
+ struct tmio_mmc_host *host = mmc_priv(card->host);
+ struct tmio_mmc_data *pdata = host->pdata;
+
+ if (pdata->multi_io_quirk)
+ return pdata->multi_io_quirk(card, direction, blk_size);
+
+ return blk_size;
}
static const struct mmc_host_ops tmio_mmc_ops = {
@@ -936,6 +1019,7 @@ static const struct mmc_host_ops tmio_mmc_ops = {
.get_ro = tmio_mmc_get_ro,
.get_cd = mmc_gpio_get_cd,
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
+ .multi_io_quirk = tmio_multi_io_quirk,
};
static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
@@ -1032,28 +1116,23 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
mmc->caps & MMC_CAP_NONREMOVABLE ||
mmc->slot.cd_irq >= 0);
- _host->power = TMIO_MMC_OFF_STOP;
- pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_resume(&pdev->dev);
- if (ret < 0)
- goto pm_disable;
-
- if (tmio_mmc_clk_update(mmc) < 0) {
+ if (tmio_mmc_clk_update(_host) < 0) {
mmc->f_max = pdata->hclk;
mmc->f_min = mmc->f_max / 512;
}
/*
- * There are 4 different scenarios for the card detection:
- * 1) an external gpio irq handles the cd (best for power savings)
- * 2) internal sdhi irq handles the cd
- * 3) a worker thread polls the sdhi - indicated by MMC_CAP_NEEDS_POLL
- * 4) the medium is non-removable - indicated by MMC_CAP_NONREMOVABLE
- *
- * While we increment the runtime PM counter for all scenarios when
- * the mmc core activates us by calling an appropriate set_ios(), we
- * must additionally ensure that in case 2) the tmio mmc hardware stays
- * powered on during runtime for the card detection to work.
+ * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
+ * looping forever...
+ */
+ if (mmc->f_min == 0) {
+ ret = -EINVAL;
+ goto host_free;
+ }
+
+ /*
+ * While using internal tmio hardware logic for card detection, we need
+ * to ensure it stays powered for it to work.
*/
if (_host->native_hotplug)
pm_runtime_get_noresume(&pdev->dev);
@@ -1074,8 +1153,12 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
_host->sdcard_irq_mask &= ~irq_mask;
- if (pdata->flags & TMIO_MMC_SDIO_IRQ)
- tmio_mmc_enable_sdio_irq(mmc, 0);
+ _host->sdio_irq_enabled = false;
+ if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
+ _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
+ sd_ctrl_write16(_host, CTL_SDIO_IRQ_MASK, _host->sdio_irq_mask);
+ sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0000);
+ }
spin_lock_init(&_host->lock);
mutex_init(&_host->ios_lock);
@@ -1087,9 +1170,12 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
/* See if we also get DMA */
tmio_mmc_request_dma(_host, pdata);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
ret = mmc_add_host(mmc);
- if (pdata->clk_disable)
- pdata->clk_disable(pdev);
if (ret < 0) {
tmio_mmc_host_remove(_host);
return ret;
@@ -1103,15 +1189,13 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
tmio_mmc_host_remove(_host);
return ret;
}
+ mmc_gpiod_request_cd_irq(mmc);
}
*host = _host;
return 0;
-pm_disable:
- pm_runtime_disable(&pdev->dev);
- iounmap(_host->ctl);
host_free:
mmc_free_host(mmc);
@@ -1142,34 +1226,20 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
}
EXPORT_SYMBOL(tmio_mmc_host_remove);
-#ifdef CONFIG_PM_SLEEP
-int tmio_mmc_host_suspend(struct device *dev)
+#ifdef CONFIG_PM
+int tmio_mmc_host_runtime_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct tmio_mmc_host *host = mmc_priv(mmc);
tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
- return 0;
-}
-EXPORT_SYMBOL(tmio_mmc_host_suspend);
-
-int tmio_mmc_host_resume(struct device *dev)
-{
- struct mmc_host *mmc = dev_get_drvdata(dev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
- tmio_mmc_enable_dma(host, true);
+ if (host->clk_cache)
+ tmio_mmc_clk_stop(host);
- /* The MMC core will perform the complete set up */
- host->resuming = true;
- return 0;
-}
-EXPORT_SYMBOL(tmio_mmc_host_resume);
-#endif
+ if (host->pdata->clk_disable)
+ host->pdata->clk_disable(host->pdev);
-#ifdef CONFIG_PM_RUNTIME
-int tmio_mmc_host_runtime_suspend(struct device *dev)
-{
return 0;
}
EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
@@ -1179,6 +1249,14 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
struct mmc_host *mmc = dev_get_drvdata(dev);
struct tmio_mmc_host *host = mmc_priv(mmc);
+ tmio_mmc_reset(host);
+ tmio_mmc_clk_update(host);
+
+ if (host->clk_cache) {
+ tmio_mmc_set_clock(host, host->clk_cache);
+ tmio_mmc_clk_start(host);
+ }
+
tmio_mmc_enable_dma(host, true);
return 0;
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 1defd5ed3236..9a6dfb0c4ecc 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -803,8 +803,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
default:
#ifdef CONFIG_MMC_DEBUG
- pr_warning("%s: Data command %d is not "
- "supported by this controller.\n",
+ pr_warn("%s: Data command %d is not supported by this controller\n",
mmc_hostname(host->mmc), cmd->opcode);
#endif
cmd->error = -EINVAL;
@@ -1429,8 +1428,8 @@ free:
free_dma(dma);
err:
- pr_warning(DRIVER_NAME ": Unable to allocate DMA %d. "
- "Falling back on FIFO.\n", dma);
+ pr_warn(DRIVER_NAME ": Unable to allocate DMA %d - falling back on FIFO\n",
+ dma);
}
static void wbsd_release_dma(struct wbsd_host *host)
@@ -1664,9 +1663,7 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma,
ret = wbsd_scan(host);
if (ret) {
if (pnp && (ret == -ENODEV)) {
- pr_warning(DRIVER_NAME
- ": Unable to confirm device presence. You may "
- "experience lock-ups.\n");
+ pr_warn(DRIVER_NAME ": Unable to confirm device presence - you may experience lock-ups\n");
} else {
wbsd_free_mmc(dev);
return ret;
@@ -1688,10 +1685,7 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma,
*/
if (pnp) {
if ((host->config != 0) && !wbsd_chip_validate(host)) {
- pr_warning(DRIVER_NAME
- ": PnP active but chip not configured! "
- "You probably have a buggy BIOS. "
- "Configuring chip manually.\n");
+ pr_warn(DRIVER_NAME ": PnP active but chip not configured! You probably have a buggy BIOS. Configuring chip manually.\n");
wbsd_chip_config(host);
}
} else
@@ -1884,10 +1878,7 @@ static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
*/
if (host->config != 0) {
if (!wbsd_chip_validate(host)) {
- pr_warning(DRIVER_NAME
- ": PnP active but chip not configured! "
- "You probably have a buggy BIOS. "
- "Configuring chip manually.\n");
+ pr_warn(DRIVER_NAME ": PnP active but chip not configured! You probably have a buggy BIOS. Configuring chip manually.\n");
wbsd_chip_config(host);
}
}
diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile
index c643e8a0a0dc..589b35247713 100644
--- a/drivers/net/ethernet/apm/xgene/Makefile
+++ b/drivers/net/ethernet/apm/xgene/Makefile
@@ -2,5 +2,6 @@
# Makefile for APM X-Gene Ethernet Driver.
#
-xgene-enet-objs := xgene_enet_hw.o xgene_enet_main.o xgene_enet_ethtool.o
+xgene-enet-objs := xgene_enet_hw.o xgene_enet_xgmac.o \
+ xgene_enet_main.o xgene_enet_ethtool.o
obj-$(CONFIG_NET_XGENE) += xgene-enet.o
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
index 63f2aa54a594..c1c997b92342 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -59,10 +59,22 @@ static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct phy_device *phydev = pdata->phy_dev;
- if (phydev == NULL)
- return -ENODEV;
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
+ if (phydev == NULL)
+ return -ENODEV;
- return phy_ethtool_gset(phydev, cmd);
+ return phy_ethtool_gset(phydev, cmd);
+ }
+
+ cmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE;
+ cmd->advertising = cmd->supported;
+ ethtool_cmd_speed_set(cmd, SPEED_10000);
+ cmd->duplex = DUPLEX_FULL;
+ cmd->port = PORT_FIBRE;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->autoneg = AUTONEG_DISABLE;
+
+ return 0;
}
static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
@@ -70,10 +82,14 @@ static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct phy_device *phydev = pdata->phy_dev;
- if (phydev == NULL)
- return -ENODEV;
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
+ if (phydev == NULL)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, cmd);
+ }
- return phy_ethtool_sset(phydev, cmd);
+ return -EINVAL;
}
static void xgene_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 812d8d65159b..c8f3824f7606 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -402,7 +402,7 @@ static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata,
return data;
}
-void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
+static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
{
u32 addr0, addr1;
u8 *dev_addr = pdata->ndev->dev_addr;
@@ -436,13 +436,13 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
return 0;
}
-void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
+static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
{
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
}
-void xgene_gmac_init(struct xgene_enet_pdata *pdata, int speed)
+static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
{
u32 value, mc2;
u32 intf_ctl, rgmii;
@@ -456,7 +456,7 @@ void xgene_gmac_init(struct xgene_enet_pdata *pdata, int speed)
xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl);
xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
- switch (speed) {
+ switch (pdata->phy_speed) {
case SPEED_10:
ENET_INTERFACE_MODE2_SET(&mc2, 1);
CFG_MACMODE_SET(&icm0, 0);
@@ -525,8 +525,8 @@ static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
}
-void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
- u32 dst_ring_num, u16 bufpool_id)
+static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
+ u32 dst_ring_num, u16 bufpool_id)
{
u32 cb;
u32 fpsel;
@@ -544,7 +544,7 @@ void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
}
-void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
+static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
{
u32 data;
@@ -552,7 +552,7 @@ void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
}
-void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
+static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
{
u32 data;
@@ -560,7 +560,7 @@ void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
}
-void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
+static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
{
u32 data;
@@ -568,7 +568,7 @@ void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
}
-void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
+static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
{
u32 data;
@@ -576,7 +576,7 @@ void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
}
-void xgene_enet_reset(struct xgene_enet_pdata *pdata)
+static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
{
u32 val;
@@ -593,7 +593,7 @@ void xgene_enet_reset(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
}
-void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
+static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
{
clk_disable_unprepare(pdata->clk);
}
@@ -627,10 +627,10 @@ static void xgene_enet_adjust_link(struct net_device *ndev)
if (phydev->link) {
if (pdata->phy_speed != phydev->speed) {
- xgene_gmac_init(pdata, phydev->speed);
+ pdata->phy_speed = phydev->speed;
+ xgene_gmac_init(pdata);
xgene_gmac_rx_enable(pdata);
xgene_gmac_tx_enable(pdata);
- pdata->phy_speed = phydev->speed;
phy_print_status(phydev);
}
} else {
@@ -726,3 +726,19 @@ void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
mdiobus_free(pdata->mdio_bus);
pdata->mdio_bus = NULL;
}
+
+struct xgene_mac_ops xgene_gmac_ops = {
+ .init = xgene_gmac_init,
+ .reset = xgene_gmac_reset,
+ .rx_enable = xgene_gmac_rx_enable,
+ .tx_enable = xgene_gmac_tx_enable,
+ .rx_disable = xgene_gmac_rx_disable,
+ .tx_disable = xgene_gmac_tx_disable,
+ .set_mac_addr = xgene_gmac_set_mac_addr,
+};
+
+struct xgene_port_ops xgene_gport_ops = {
+ .reset = xgene_enet_reset,
+ .cle_bypass = xgene_enet_cle_bypass,
+ .shutdown = xgene_gport_shutdown,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 371e7a5b2507..15ec4267779c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -42,6 +42,11 @@ static inline u32 xgene_get_bits(u32 val, u32 start, u32 end)
return (val & GENMASK(end, start)) >> start;
}
+enum xgene_enet_rm {
+ RM0,
+ RM3 = 3
+};
+
#define CSR_RING_ID 0x0008
#define OVERWRITE BIT(31)
#define IS_BUFFER_POOL BIT(20)
@@ -52,7 +57,6 @@ static inline u32 xgene_get_bits(u32 val, u32 start, u32 end)
#define CSR_RING_WR_BASE 0x0070
#define NUM_RING_CONFIG 5
#define BUFPOOL_MODE 3
-#define RM3 3
#define INC_DEC_CMD_ADDR 0x002c
#define UDP_HDR_SIZE 2
#define BUF_LEN_CODE_2K 0x5000
@@ -94,11 +98,9 @@ static inline u32 xgene_get_bits(u32 val, u32 start, u32 end)
#define BLOCK_ETH_CSR_OFFSET 0x2000
#define BLOCK_ETH_RING_IF_OFFSET 0x9000
-#define BLOCK_ETH_CLKRST_CSR_OFFSET 0xC000
#define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000
#define BLOCK_ETH_MAC_OFFSET 0x0000
-#define BLOCK_ETH_STATS_OFFSET 0x0014
#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
#define MAC_ADDR_REG_OFFSET 0x00
@@ -107,12 +109,6 @@ static inline u32 xgene_get_bits(u32 val, u32 start, u32 end)
#define MAC_READ_REG_OFFSET 0x0c
#define MAC_COMMAND_DONE_REG_OFFSET 0x10
-#define STAT_ADDR_REG_OFFSET 0x00
-#define STAT_COMMAND_REG_OFFSET 0x04
-#define STAT_WRITE_REG_OFFSET 0x08
-#define STAT_READ_REG_OFFSET 0x0c
-#define STAT_COMMAND_DONE_REG_OFFSET 0x10
-
#define MII_MGMT_CONFIG_ADDR 0x20
#define MII_MGMT_COMMAND_ADDR 0x24
#define MII_MGMT_ADDRESS_ADDR 0x28
@@ -318,20 +314,10 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status);
-void xgene_enet_reset(struct xgene_enet_pdata *priv);
-void xgene_gmac_reset(struct xgene_enet_pdata *priv);
-void xgene_gmac_init(struct xgene_enet_pdata *priv, int speed);
-void xgene_gmac_tx_enable(struct xgene_enet_pdata *priv);
-void xgene_gmac_rx_enable(struct xgene_enet_pdata *priv);
-void xgene_gmac_tx_disable(struct xgene_enet_pdata *priv);
-void xgene_gmac_rx_disable(struct xgene_enet_pdata *priv);
-void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata);
-void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
- u32 dst_ring_num, u16 bufpool_id);
-void xgene_gport_shutdown(struct xgene_enet_pdata *priv);
-void xgene_gmac_get_tx_stats(struct xgene_enet_pdata *pdata);
-
int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
+extern struct xgene_mac_ops xgene_gmac_ops;
+extern struct xgene_port_ops xgene_gport_ops;
+
#endif /* __XGENE_ENET_HW_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index e4222af2baa6..9b85239ceedf 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -21,6 +21,7 @@
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
+#include "xgene_enet_xgmac.h"
static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
@@ -390,7 +391,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
}
}
- return budget;
+ return count;
}
static int xgene_enet_napi(struct napi_struct *napi, const int budget)
@@ -413,7 +414,7 @@ static void xgene_enet_timeout(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
- xgene_gmac_reset(pdata);
+ pdata->mac_ops->reset(pdata);
}
static int xgene_enet_register_irq(struct net_device *ndev)
@@ -445,18 +446,21 @@ static void xgene_enet_free_irq(struct net_device *ndev)
static int xgene_enet_open(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ struct xgene_mac_ops *mac_ops = pdata->mac_ops;
int ret;
- xgene_gmac_tx_enable(pdata);
- xgene_gmac_rx_enable(pdata);
+ mac_ops->tx_enable(pdata);
+ mac_ops->rx_enable(pdata);
ret = xgene_enet_register_irq(ndev);
if (ret)
return ret;
napi_enable(&pdata->rx_ring->napi);
- if (pdata->phy_dev)
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
phy_start(pdata->phy_dev);
+ else
+ schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
netif_start_queue(ndev);
@@ -466,18 +470,21 @@ static int xgene_enet_open(struct net_device *ndev)
static int xgene_enet_close(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ struct xgene_mac_ops *mac_ops = pdata->mac_ops;
netif_stop_queue(ndev);
- if (pdata->phy_dev)
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
phy_stop(pdata->phy_dev);
+ else
+ cancel_delayed_work_sync(&pdata->link_work);
napi_disable(&pdata->rx_ring->napi);
xgene_enet_free_irq(ndev);
xgene_enet_process_ring(pdata->rx_ring, -1);
- xgene_gmac_tx_disable(pdata);
- xgene_gmac_rx_disable(pdata);
+ mac_ops->tx_disable(pdata);
+ mac_ops->rx_disable(pdata);
return 0;
}
@@ -613,7 +620,6 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6);
ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
- pdata->rm = RM3;
ring = xgene_enet_setup_ring(ring);
netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
ring->num, ring->size, ring->id, ring->slots);
@@ -724,7 +730,7 @@ static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
ret = eth_mac_addr(ndev, addr);
if (ret)
return ret;
- xgene_gmac_set_mac_addr(pdata);
+ pdata->mac_ops->set_mac_addr(pdata);
return ret;
}
@@ -803,8 +809,13 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node);
if (pdata->phy_mode < 0) {
- dev_err(dev, "Incorrect phy-connection-type in DTS\n");
- return -EINVAL;
+ dev_err(dev, "Unable to get phy-connection-type\n");
+ return pdata->phy_mode;
+ }
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
+ pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
+ dev_err(dev, "Incorrect phy-connection-type specified\n");
+ return -ENODEV;
}
pdata->clk = devm_clk_get(&pdev->dev, NULL);
@@ -819,12 +830,18 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
- pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET;
- pdata->mcx_stats_addr = base_addr + BLOCK_ETH_STATS_OFFSET;
- pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
+ pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET;
+ pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
+ pdata->rm = RM3;
+ } else {
+ pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
+ pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
+ pdata->rm = RM0;
+ }
pdata->rx_buff_cnt = NUM_PKT_BUF;
- return ret;
+ return 0;
}
static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
@@ -834,8 +851,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
u16 dst_ring_num;
int ret;
- xgene_gmac_tx_disable(pdata);
- xgene_gmac_rx_disable(pdata);
+ pdata->port_ops->reset(pdata);
ret = xgene_enet_create_desc_rings(ndev);
if (ret) {
@@ -853,11 +869,26 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
}
dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring);
- xgene_enet_cle_bypass(pdata, dst_ring_num, buf_pool->id);
+ pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
+ pdata->mac_ops->init(pdata);
return ret;
}
+static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
+{
+ switch (pdata->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ pdata->mac_ops = &xgene_gmac_ops;
+ pdata->port_ops = &xgene_gport_ops;
+ break;
+ default:
+ pdata->mac_ops = &xgene_xgmac_ops;
+ pdata->port_ops = &xgene_xgport_ops;
+ break;
+ }
+}
+
static int xgene_enet_probe(struct platform_device *pdev)
{
struct net_device *ndev;
@@ -886,8 +917,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
if (ret)
goto err;
- xgene_enet_reset(pdata);
- xgene_gmac_init(pdata, SPEED_1000);
+ xgene_enet_setup_ops(pdata);
ret = register_netdev(ndev);
if (ret) {
@@ -907,7 +937,10 @@ static int xgene_enet_probe(struct platform_device *pdev)
napi = &pdata->rx_ring->napi;
netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
- ret = xgene_enet_mdio_config(pdata);
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ ret = xgene_enet_mdio_config(pdata);
+ else
+ INIT_DELAYED_WORK(&pdata->link_work, xgene_enet_link_state);
return ret;
err:
@@ -918,19 +951,21 @@ err:
static int xgene_enet_remove(struct platform_device *pdev)
{
struct xgene_enet_pdata *pdata;
+ struct xgene_mac_ops *mac_ops;
struct net_device *ndev;
pdata = platform_get_drvdata(pdev);
+ mac_ops = pdata->mac_ops;
ndev = pdata->ndev;
- xgene_gmac_rx_disable(pdata);
- xgene_gmac_tx_disable(pdata);
+ mac_ops->rx_disable(pdata);
+ mac_ops->tx_disable(pdata);
netif_napi_del(&pdata->rx_ring->napi);
xgene_enet_mdio_remove(pdata);
xgene_enet_delete_desc_rings(pdata);
unregister_netdev(ndev);
- xgene_gport_shutdown(pdata);
+ pdata->port_ops->shutdown(pdata);
free_netdev(ndev);
return 0;
@@ -956,5 +991,6 @@ module_platform_driver(xgene_enet_driver);
MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
MODULE_VERSION(XGENE_DRV_VERSION);
+MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 0815866986b0..86cf68b65584 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -68,6 +68,23 @@ struct xgene_enet_desc_ring {
};
};
+struct xgene_mac_ops {
+ void (*init)(struct xgene_enet_pdata *pdata);
+ void (*reset)(struct xgene_enet_pdata *pdata);
+ void (*tx_enable)(struct xgene_enet_pdata *pdata);
+ void (*rx_enable)(struct xgene_enet_pdata *pdata);
+ void (*tx_disable)(struct xgene_enet_pdata *pdata);
+ void (*rx_disable)(struct xgene_enet_pdata *pdata);
+ void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
+};
+
+struct xgene_port_ops {
+ void (*reset)(struct xgene_enet_pdata *pdata);
+ void (*cle_bypass)(struct xgene_enet_pdata *pdata,
+ u32 dst_ring_num, u16 bufpool_id);
+ void (*shutdown)(struct xgene_enet_pdata *pdata);
+};
+
/* ethernet private data */
struct xgene_enet_pdata {
struct net_device *ndev;
@@ -88,16 +105,17 @@ struct xgene_enet_pdata {
void __iomem *eth_ring_if_addr;
void __iomem *eth_diag_csr_addr;
void __iomem *mcx_mac_addr;
- void __iomem *mcx_stats_addr;
void __iomem *mcx_mac_csr_addr;
void __iomem *base_addr;
void __iomem *ring_csr_addr;
void __iomem *ring_cmd_addr;
u32 phy_addr;
int phy_mode;
- u32 speed;
- u16 rm;
+ enum xgene_enet_rm rm;
struct rtnl_link_stats64 stats;
+ struct xgene_mac_ops *mac_ops;
+ struct xgene_port_ops *port_ops;
+ struct delayed_work link_work;
};
/* Set the specified value into a bit-field defined by its starting position
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
new file mode 100644
index 000000000000..cd64b9f18b58
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -0,0 +1,331 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xgene_enet_main.h"
+#include "xgene_enet_hw.h"
+#include "xgene_enet_xgmac.h"
+
+static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
+ u32 offset, u32 val)
+{
+ void __iomem *addr = pdata->eth_csr_addr + offset;
+
+ iowrite32(val, addr);
+}
+
+static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
+ u32 offset, u32 val)
+{
+ void __iomem *addr = pdata->eth_ring_if_addr + offset;
+
+ iowrite32(val, addr);
+}
+
+static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
+ u32 offset, u32 val)
+{
+ void __iomem *addr = pdata->eth_diag_csr_addr + offset;
+
+ iowrite32(val, addr);
+}
+
+static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
+ void __iomem *cmd, void __iomem *cmd_done,
+ u32 wr_addr, u32 wr_data)
+{
+ u32 done;
+ u8 wait = 10;
+
+ iowrite32(wr_addr, addr);
+ iowrite32(wr_data, wr);
+ iowrite32(XGENE_ENET_WR_CMD, cmd);
+
+ /* wait for write command to complete */
+ while (!(done = ioread32(cmd_done)) && wait--)
+ udelay(1);
+
+ if (!done)
+ return false;
+
+ iowrite32(0, cmd);
+
+ return true;
+}
+
+static void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata,
+ u32 wr_addr, u32 wr_data)
+{
+ void __iomem *addr, *wr, *cmd, *cmd_done;
+
+ addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
+ wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
+ cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
+ cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
+
+ if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
+ netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
+ wr_addr);
+}
+
+static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
+ u32 offset, u32 *val)
+{
+ void __iomem *addr = pdata->eth_csr_addr + offset;
+
+ *val = ioread32(addr);
+}
+
+static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
+ u32 offset, u32 *val)
+{
+ void __iomem *addr = pdata->eth_diag_csr_addr + offset;
+
+ *val = ioread32(addr);
+}
+
+static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
+ void __iomem *cmd, void __iomem *cmd_done,
+ u32 rd_addr, u32 *rd_data)
+{
+ u32 done;
+ u8 wait = 10;
+
+ iowrite32(rd_addr, addr);
+ iowrite32(XGENE_ENET_RD_CMD, cmd);
+
+ /* wait for read command to complete */
+ while (!(done = ioread32(cmd_done)) && wait--)
+ udelay(1);
+
+ if (!done)
+ return false;
+
+ *rd_data = ioread32(rd);
+ iowrite32(0, cmd);
+
+ return true;
+}
+
+static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
+ u32 rd_addr, u32 *rd_data)
+{
+ void __iomem *addr, *rd, *cmd, *cmd_done;
+
+ addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
+ rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
+ cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
+ cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
+
+ if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
+ netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
+ rd_addr);
+}
+
+static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
+{
+ struct net_device *ndev = pdata->ndev;
+ u32 data;
+ u8 wait = 10;
+
+ xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
+ do {
+ usleep_range(100, 110);
+ xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
+ } while ((data != 0xffffffff) && wait--);
+
+ if (data != 0xffffffff) {
+ netdev_err(ndev, "Failed to release memory from shutdown\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
+{
+ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, 0);
+ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, 0);
+ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, 0);
+ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, 0);
+}
+
+static void xgene_xgmac_reset(struct xgene_enet_pdata *pdata)
+{
+ xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, HSTMACRST);
+ xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, 0);
+}
+
+static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
+{
+ u32 addr0, addr1;
+ u8 *dev_addr = pdata->ndev->dev_addr;
+
+ addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
+ (dev_addr[1] << 8) | dev_addr[0];
+ addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
+
+ xgene_enet_wr_mac(pdata, HSTMACADR_LSW_ADDR, addr0);
+ xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
+}
+
+static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
+{
+ u32 data;
+
+ xgene_enet_rd_csr(pdata, XG_LINK_STATUS_ADDR, &data);
+
+ return data;
+}
+
+static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
+{
+ u32 data;
+
+ xgene_xgmac_reset(pdata);
+
+ xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ data |= HSTPPEN;
+ data &= ~HSTLENCHK;
+ xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
+
+ xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR, 0x06000600);
+ xgene_xgmac_set_mac_addr(pdata);
+
+ xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
+ data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
+ xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data);
+
+ xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX);
+ xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0);
+ xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data);
+ data |= BIT(12);
+ xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data);
+ xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82);
+}
+
+static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata)
+{
+ u32 data;
+
+ xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTRFEN);
+}
+
+static void xgene_xgmac_tx_enable(struct xgene_enet_pdata *pdata)
+{
+ u32 data;
+
+ xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTTFEN);
+}
+
+static void xgene_xgmac_rx_disable(struct xgene_enet_pdata *pdata)
+{
+ u32 data;
+
+ xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTRFEN);
+}
+
+static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
+{
+ u32 data;
+
+ xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
+ xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
+}
+
+static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
+{
+ clk_prepare_enable(pdata->clk);
+ clk_disable_unprepare(pdata->clk);
+ clk_prepare_enable(pdata->clk);
+
+ xgene_enet_ecc_init(pdata);
+ xgene_enet_config_ring_if_assoc(pdata);
+}
+
+static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
+ u32 dst_ring_num, u16 bufpool_id)
+{
+ u32 cb, fpsel;
+
+ xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb);
+ cb |= CFG_CLE_BYPASS_EN0;
+ CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
+ xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb);
+
+ fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
+ xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb);
+ CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
+ CFG_CLE_FPSEL0_SET(&cb, fpsel);
+ xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb);
+}
+
+static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
+{
+ clk_disable_unprepare(pdata->clk);
+}
+
+void xgene_enet_link_state(struct work_struct *work)
+{
+ struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work),
+ struct xgene_enet_pdata, link_work);
+ struct net_device *ndev = pdata->ndev;
+ u32 link_status, poll_interval;
+
+ link_status = xgene_enet_link_status(pdata);
+ if (link_status) {
+ if (!netif_carrier_ok(ndev)) {
+ netif_carrier_on(ndev);
+ xgene_xgmac_init(pdata);
+ xgene_xgmac_rx_enable(pdata);
+ xgene_xgmac_tx_enable(pdata);
+ netdev_info(ndev, "Link is Up - 10Gbps\n");
+ }
+ poll_interval = PHY_POLL_LINK_ON;
+ } else {
+ if (netif_carrier_ok(ndev)) {
+ xgene_xgmac_rx_disable(pdata);
+ xgene_xgmac_tx_disable(pdata);
+ netif_carrier_off(ndev);
+ netdev_info(ndev, "Link is Down\n");
+ }
+ poll_interval = PHY_POLL_LINK_OFF;
+ }
+
+ schedule_delayed_work(&pdata->link_work, poll_interval);
+}
+
+struct xgene_mac_ops xgene_xgmac_ops = {
+ .init = xgene_xgmac_init,
+ .reset = xgene_xgmac_reset,
+ .rx_enable = xgene_xgmac_rx_enable,
+ .tx_enable = xgene_xgmac_tx_enable,
+ .rx_disable = xgene_xgmac_rx_disable,
+ .tx_disable = xgene_xgmac_tx_disable,
+ .set_mac_addr = xgene_xgmac_set_mac_addr,
+};
+
+struct xgene_port_ops xgene_xgport_ops = {
+ .reset = xgene_enet_reset,
+ .cle_bypass = xgene_enet_xgcle_bypass,
+ .shutdown = xgene_enet_shutdown,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
new file mode 100644
index 000000000000..d2d59e7ed9ab
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -0,0 +1,57 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ * Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_XGMAC_H__
+#define __XGENE_ENET_XGMAC_H__
+
+#define BLOCK_AXG_MAC_OFFSET 0x0800
+#define BLOCK_AXG_MAC_CSR_OFFSET 0x2000
+
+#define AXGMAC_CONFIG_0 0x0000
+#define AXGMAC_CONFIG_1 0x0004
+#define HSTMACRST BIT(31)
+#define HSTTCTLEN BIT(31)
+#define HSTTFEN BIT(30)
+#define HSTRCTLEN BIT(29)
+#define HSTRFEN BIT(28)
+#define HSTPPEN BIT(7)
+#define HSTDRPLT64 BIT(5)
+#define HSTLENCHK BIT(3)
+#define HSTMACADR_LSW_ADDR 0x0010
+#define HSTMACADR_MSW_ADDR 0x0014
+#define HSTMAXFRAME_LENGTH_ADDR 0x0020
+
+#define XG_RSIF_CONFIG_REG_ADDR 0x00a0
+#define XCLE_BYPASS_REG0_ADDR 0x0160
+#define XCLE_BYPASS_REG1_ADDR 0x0164
+#define XG_CFG_BYPASS_ADDR 0x0204
+#define XG_LINK_STATUS_ADDR 0x0228
+#define XG_ENET_SPARE_CFG_REG_ADDR 0x040c
+#define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410
+#define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804
+
+#define PHY_POLL_LINK_ON (10 * HZ)
+#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
+
+void xgene_enet_link_state(struct work_struct *work);
+extern struct xgene_mac_ops xgene_xgmac_ops;
+extern struct xgene_port_ops xgene_xgport_ops;
+
+#endif /* __XGENE_ENET_XGMAC_H__ */
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 075688188644..9ae36979bdee 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -436,7 +436,8 @@ static int bcm_sysport_set_wol(struct net_device *dev,
/* Flag the device and relevant IRQ as wakeup capable */
if (wol->wolopts) {
device_set_wakeup_enable(kdev, 1);
- enable_irq_wake(priv->wol_irq);
+ if (priv->wol_irq_disabled)
+ enable_irq_wake(priv->wol_irq);
priv->wol_irq_disabled = 0;
} else {
device_set_wakeup_enable(kdev, 0);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index fff2634b6f34..fdc9ec09e453 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1285,11 +1285,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
cb = &priv->rx_cbs[priv->rx_read_ptr];
skb = cb->skb;
- rxpktprocessed++;
-
- priv->rx_read_ptr++;
- priv->rx_read_ptr &= (priv->num_rx_bds - 1);
-
/* We do not have a backing SKB, so we do not have a
* corresponding DMA mapping for this incoming packet since
* bcmgenet_rx_refill always either has both skb and mapping or
@@ -1404,6 +1399,10 @@ refill:
err = bcmgenet_rx_refill(priv, cb);
if (err)
netif_err(priv, rx_err, dev, "Rx refill failed\n");
+
+ rxpktprocessed++;
+ priv->rx_read_ptr++;
+ priv->rx_read_ptr &= (priv->num_rx_bds - 1);
}
return rxpktprocessed;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index b82b7e4e06b2..149a0d70c108 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -86,7 +86,9 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
/* Flag the device and relevant IRQ as wakeup capable */
if (wol->wolopts) {
device_set_wakeup_enable(kdev, 1);
- enable_irq_wake(priv->wol_irq);
+ /* Avoid unbalanced enable_irq_wake calls */
+ if (priv->wol_irq_disabled)
+ enable_irq_wake(priv->wol_irq);
priv->wol_irq_disabled = false;
} else {
device_set_wakeup_enable(kdev, 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 9b2c669b6522..410ed5805a9a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -968,7 +968,7 @@ void t4_intr_enable(struct adapter *adapter);
void t4_intr_disable(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
-int t4_wait_dev_ready(struct adapter *adap);
+int t4_wait_dev_ready(void __iomem *regs);
int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc);
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 321f3d9385c9..5b38e955af6e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6137,7 +6137,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
pci_cleanup_aer_uncorrect_error_status(pdev);
- if (t4_wait_dev_ready(adap) < 0)
+ if (t4_wait_dev_ready(adap->regs) < 0)
return PCI_ERS_RESULT_DISCONNECT;
if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
return PCI_ERS_RESULT_DISCONNECT;
@@ -6530,6 +6530,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device;
}
+ err = t4_wait_dev_ready(regs);
+ if (err < 0)
+ goto out_unmap_bar0;
+
/* We control everything through one PF */
func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
if (func != ent->driver_data) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index fab4c84a1da4..5e1b314e11af 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1123,7 +1123,10 @@ out_free: dev_kfree_skb_any(skb);
lso->c.ipid_ofst = htons(0);
lso->c.mss = htons(ssi->gso_size);
lso->c.seqno_offset = htonl(0);
- lso->c.len = htonl(skb->len);
+ if (is_t4(adap->params.chip))
+ lso->c.len = htonl(skb->len);
+ else
+ lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len));
cpl = (void *)(lso + 1);
cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
TXPKT_IPHDR_LEN(l3hdr_len) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 22d7581341a9..1fff1495fe31 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3845,12 +3845,19 @@ static void init_link_config(struct link_config *lc, unsigned int caps)
}
}
-int t4_wait_dev_ready(struct adapter *adap)
+#define CIM_PF_NOACCESS 0xeeeeeeee
+
+int t4_wait_dev_ready(void __iomem *regs)
{
- if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
+ u32 whoami;
+
+ whoami = readl(regs + PL_WHOAMI);
+ if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
return 0;
+
msleep(500);
- return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
+ whoami = readl(regs + PL_WHOAMI);
+ return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
}
struct flash_desc {
@@ -3919,10 +3926,6 @@ int t4_prep_adapter(struct adapter *adapter)
uint16_t device_id;
u32 pl_rev;
- ret = t4_wait_dev_ready(adapter);
- if (ret < 0)
- return ret;
-
get_pci_mode(adapter, &adapter->params.pci);
pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 52e08103f221..5f4db2398c71 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -527,6 +527,7 @@ struct cpl_tx_pkt_lso_core {
#define LSO_LAST_SLICE (1 << 22)
#define LSO_FIRST_SLICE (1 << 23)
#define LSO_OPCODE(x) ((x) << 24)
+#define LSO_T5_XFER_SIZE(x) ((x) << 0)
__be16 ipid_ofst;
__be16 mss;
__be32 seqno_offset;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index eee272883027..a1024db5dc13 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -72,9 +72,8 @@
#define PIDX_MASK 0x00003fffU
#define PIDX_SHIFT 0
#define PIDX(x) ((x) << PIDX_SHIFT)
-#define S_PIDX_T5 0
-#define M_PIDX_T5 0x1fffU
-#define PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5)
+#define PIDX_SHIFT_T5 0
+#define PIDX_T5(x) ((x) << PIDX_SHIFT_T5)
#define SGE_TIMERREGS 6
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 8498a641b2e3..bfa398d91826 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -163,15 +163,19 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
netif_carrier_on(dev);
switch (pi->link_cfg.speed) {
- case SPEED_10000:
+ case 40000:
+ s = "40Gbps";
+ break;
+
+ case 10000:
s = "10Gbps";
break;
- case SPEED_1000:
+ case 1000:
s = "1000Mbps";
break;
- case SPEED_100:
+ case 100:
s = "100Mbps";
break;
@@ -2351,7 +2355,7 @@ static void cfg_queues(struct adapter *adapter)
struct port_info *pi = adap2pinfo(adapter, pidx);
pi->first_qset = qidx;
- pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+ pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
qidx += pi->nqsets;
}
s->ethqsets = qidx;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index a5fb9493dee8..85036e6b42c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1208,7 +1208,10 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
lso->ipid_ofst = cpu_to_be16(0);
lso->mss = cpu_to_be16(ssi->gso_size);
lso->seqno_offset = cpu_to_be32(0);
- lso->len = cpu_to_be32(skb->len);
+ if (is_t4(adapter->params.chip))
+ lso->len = cpu_to_be32(skb->len);
+ else
+ lso->len = cpu_to_be32(LSO_T5_XFER_SIZE(skb->len));
/*
* Set up TX Packet CPL pointer, control word and perform
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index f412d0fa0850..95df61dcb4ce 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -228,6 +228,12 @@ static inline bool is_10g_port(const struct link_config *lc)
return (lc->supported & SUPPORTED_10000baseT_Full) != 0;
}
+static inline bool is_x_10g_port(const struct link_config *lc)
+{
+ return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
+ (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+}
+
static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
{
return adapter->params.vpd.cclk / 1000;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 25dfeb8f28ed..e984fdc48ba2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -327,6 +327,8 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
v |= SUPPORTED_1000baseT_Full;
if (word & FW_PORT_CAP_SPEED_10G)
v |= SUPPORTED_10000baseT_Full;
+ if (word & FW_PORT_CAP_SPEED_40G)
+ v |= SUPPORTED_40000baseSR4_Full;
if (word & FW_PORT_CAP_ANEG)
v |= SUPPORTED_Autoneg;
init_link_config(&pi->link_cfg, v);
@@ -1352,11 +1354,13 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
if (word & FW_PORT_CMD_TXPAUSE)
fc |= PAUSE_TX;
if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
- speed = SPEED_100;
+ speed = 100;
else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
- speed = SPEED_1000;
+ speed = 1000;
else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
- speed = SPEED_10000;
+ speed = 10000;
+ else if (word & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ speed = 40000;
/*
* Scan all of our "ports" (Virtual Interfaces) looking for
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 2c578db401e8..08f5b911d96b 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -125,7 +125,7 @@ out:
}
#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
-#define FCC_NAPI_TX_EVENT_MSK (FCC_ENET_TXF | FCC_ENET_TXB)
+#define FCC_NAPI_TX_EVENT_MSK (FCC_ENET_TXB)
#define FCC_RX_EVENT (FCC_ENET_RXF)
#define FCC_TX_EVENT (FCC_ENET_TXB)
#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index 41aa0b475ca0..f30411f0701f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -116,7 +116,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
}
#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
-#define SCC_NAPI_TX_EVENT_MSK (SCCE_ENET_TXF | SCCE_ENET_TXB)
+#define SCC_NAPI_TX_EVENT_MSK (SCCE_ENET_TXB)
#define SCC_RX_EVENT (SCCE_ENET_RXF)
#define SCC_TX_EVENT (SCCE_ENET_TXB)
#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 583e71ab7f51..964c6bf37710 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -28,7 +28,9 @@
#include <linux/of_device.h>
#include <asm/io.h>
+#if IS_ENABLED(CONFIG_UCC_GETH)
#include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */
+#endif
#include "gianfar.h"
@@ -102,19 +104,22 @@ static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
{
struct fsl_pq_mdio_priv *priv = bus->priv;
struct fsl_pq_mii __iomem *regs = priv->regs;
- u32 status;
+ unsigned int timeout;
/* Set the PHY address and the register address we want to write */
- out_be32(&regs->miimadd, (mii_id << 8) | regnum);
+ iowrite32be((mii_id << 8) | regnum, &regs->miimadd);
/* Write out the value we want */
- out_be32(&regs->miimcon, value);
+ iowrite32be(value, &regs->miimcon);
/* Wait for the transaction to finish */
- status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
- MII_TIMEOUT, 0);
+ timeout = MII_TIMEOUT;
+ while ((ioread32be(&regs->miimind) & MIIMIND_BUSY) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
- return status ? 0 : -ETIMEDOUT;
+ return timeout ? 0 : -ETIMEDOUT;
}
/*
@@ -131,25 +136,29 @@ static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct fsl_pq_mdio_priv *priv = bus->priv;
struct fsl_pq_mii __iomem *regs = priv->regs;
- u32 status;
+ unsigned int timeout;
u16 value;
/* Set the PHY address and the register address we want to read */
- out_be32(&regs->miimadd, (mii_id << 8) | regnum);
+ iowrite32be((mii_id << 8) | regnum, &regs->miimadd);
/* Clear miimcom, and then initiate a read */
- out_be32(&regs->miimcom, 0);
- out_be32(&regs->miimcom, MII_READ_COMMAND);
+ iowrite32be(0, &regs->miimcom);
+ iowrite32be(MII_READ_COMMAND, &regs->miimcom);
/* Wait for the transaction to finish, normally less than 100us */
- status = spin_event_timeout(!(in_be32(&regs->miimind) &
- (MIIMIND_NOTVALID | MIIMIND_BUSY)),
- MII_TIMEOUT, 0);
- if (!status)
+ timeout = MII_TIMEOUT;
+ while ((ioread32be(&regs->miimind) &
+ (MIIMIND_NOTVALID | MIIMIND_BUSY)) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ if (!timeout)
return -ETIMEDOUT;
/* Grab the value of the register from miimstat */
- value = in_be32(&regs->miimstat);
+ value = ioread32be(&regs->miimstat);
dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
return value;
@@ -160,23 +169,26 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
{
struct fsl_pq_mdio_priv *priv = bus->priv;
struct fsl_pq_mii __iomem *regs = priv->regs;
- u32 status;
+ unsigned int timeout;
mutex_lock(&bus->mdio_lock);
/* Reset the management interface */
- out_be32(&regs->miimcfg, MIIMCFG_RESET);
+ iowrite32be(MIIMCFG_RESET, &regs->miimcfg);
/* Setup the MII Mgmt clock speed */
- out_be32(&regs->miimcfg, MIIMCFG_INIT_VALUE);
+ iowrite32be(MIIMCFG_INIT_VALUE, &regs->miimcfg);
/* Wait until the bus is free */
- status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
- MII_TIMEOUT, 0);
+ timeout = MII_TIMEOUT;
+ while ((ioread32be(&regs->miimind) & MIIMIND_BUSY) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
mutex_unlock(&bus->mdio_lock);
- if (!status) {
+ if (!timeout) {
dev_err(&bus->dev, "timeout waiting for MII bus\n");
return -EBUSY;
}
@@ -433,7 +445,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
tbipa = data->get_tbipa(priv->map);
- out_be32(tbipa, be32_to_cpup(prop));
+ iowrite32be(be32_to_cpup(prop), tbipa);
}
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index fb29d049f4e1..379b1a578d3d 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -88,8 +88,10 @@
#include <linux/net_tstamp.h>
#include <asm/io.h>
+#ifdef CONFIG_PPC
#include <asm/reg.h>
#include <asm/mpc85xx.h>
+#endif
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
@@ -100,6 +102,8 @@
#include <linux/phy_fixed.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include "gianfar.h"
@@ -161,7 +165,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
lstatus |= BD_LFLAG(RXBD_WRAP);
- eieio();
+ gfar_wmb();
bdp->lstatus = lstatus;
}
@@ -1061,6 +1065,7 @@ static void gfar_init_filer_table(struct gfar_private *priv)
}
}
+#ifdef CONFIG_PPC
static void __gfar_detect_errata_83xx(struct gfar_private *priv)
{
unsigned int pvr = mfspr(SPRN_PVR);
@@ -1093,6 +1098,7 @@ static void __gfar_detect_errata_85xx(struct gfar_private *priv)
((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
}
+#endif
static void gfar_detect_errata(struct gfar_private *priv)
{
@@ -1101,10 +1107,12 @@ static void gfar_detect_errata(struct gfar_private *priv)
/* no plans to fix */
priv->errata |= GFAR_ERRATA_A002;
+#ifdef CONFIG_PPC
if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
__gfar_detect_errata_85xx(priv);
else /* non-mpc85xx parts, i.e. e300 core based */
__gfar_detect_errata_83xx(priv);
+#endif
if (priv->errata)
dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
@@ -1754,26 +1762,32 @@ static void gfar_halt_nodisable(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
+ unsigned int timeout;
+ int stopped;
gfar_ints_disable(priv);
+ if (gfar_is_dma_stopped(priv))
+ return;
+
/* Stop the DMA, and wait for it to stop */
tempval = gfar_read(&regs->dmactrl);
- if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
- (DMACTRL_GRS | DMACTRL_GTS)) {
- int ret;
-
- tempval |= (DMACTRL_GRS | DMACTRL_GTS);
- gfar_write(&regs->dmactrl, tempval);
+ tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&regs->dmactrl, tempval);
- do {
- ret = spin_event_timeout(((gfar_read(&regs->ievent) &
- (IEVENT_GRSC | IEVENT_GTSC)) ==
- (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
- if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
- ret = __gfar_is_rx_idle(priv);
- } while (!ret);
+retry:
+ timeout = 1000;
+ while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
+ cpu_relax();
+ timeout--;
}
+
+ if (!timeout)
+ stopped = gfar_is_dma_stopped(priv);
+
+ if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
+ !__gfar_is_rx_idle(priv))
+ goto retry;
}
/* Halt the receive and transmit queues */
@@ -2357,18 +2371,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
spin_lock_irqsave(&tx_queue->txlock, flags);
- /* The powerpc-specific eieio() is used, as wmb() has too strong
- * semantics (it requires synchronization between cacheable and
- * uncacheable mappings, which eieio doesn't provide and which we
- * don't need), thus requiring a more expensive sync instruction. At
- * some point, the set of architecture-independent barrier functions
- * should be expanded to include weaker barriers.
- */
- eieio();
+ gfar_wmb();
txbdp_start->lstatus = lstatus;
- eieio(); /* force lstatus write before tx_skbuff */
+ gfar_wmb(); /* force lstatus write before tx_skbuff */
tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
@@ -3240,22 +3247,21 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- int idx;
- char tmpbuf[ETH_ALEN];
u32 tempval;
u32 __iomem *macptr = &regs->macstnaddr1;
macptr += num*2;
- /* Now copy it into the mac registers backwards, cuz
- * little endian is silly
+ /* For a station address of 0x12345678ABCD in transmission
+ * order (BE), MACnADDR1 is set to 0xCDAB7856 and
+ * MACnADDR2 is set to 0x34120000.
*/
- for (idx = 0; idx < ETH_ALEN; idx++)
- tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
+ tempval = (addr[5] << 24) | (addr[4] << 16) |
+ (addr[3] << 8) | addr[2];
- gfar_write(macptr, *((u32 *) (tmpbuf)));
+ gfar_write(macptr, tempval);
- tempval = *((u32 *) (tmpbuf + 4));
+ tempval = (addr[1] << 24) | (addr[0] << 16);
gfar_write(macptr+1, tempval);
}
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 84632c569f2c..2805cfbf1765 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1226,6 +1226,37 @@ static inline void gfar_write_isrg(struct gfar_private *priv)
}
}
+static inline int gfar_is_dma_stopped(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ return ((gfar_read(&regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)) ==
+ (IEVENT_GRSC | IEVENT_GTSC));
+}
+
+static inline int gfar_is_rx_dma_stopped(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ return gfar_read(&regs->ievent) & IEVENT_GRSC;
+}
+
+static inline void gfar_wmb(void)
+{
+#if defined(CONFIG_PPC)
+ /* The powerpc-specific eieio() is used, as wmb() has too strong
+ * semantics (it requires synchronization between cacheable and
+ * uncacheable mappings, which eieio() doesn't provide and which we
+ * don't need), thus requiring a more expensive sync instruction. At
+ * some point, the set of architecture-independent barrier functions
+ * should be expanded to include weaker barriers.
+ */
+ eieio();
+#else
+ wmb(); /* order write acesses for BD (or FCB) fields */
+#endif
+}
+
irqreturn_t gfar_receive(int irq, void *dev_id);
int startup_gfar(struct net_device *dev);
void stop_gfar(struct net_device *dev);
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 6a6d5ee51e6a..6919adb66f53 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -304,6 +304,7 @@ config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
depends on PCI_MSI
+ select PTP_1588_CLOCK
---help---
This driver supports Intel(R) FM10000 Ethernet Switch Host
Interface. For more information on how to identify your adapter,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 6c800a330d66..9d7118a0d67a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -219,11 +219,10 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
/* flip page offset to other buffer */
rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
- /* since we are the only owner of the page and we need to
- * increment it, just set the value to 2 in order to avoid
- * an unnecessary locked operation
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
*/
- atomic_set(&page->_count, 2);
+ atomic_inc(&page->_count);
#else
/* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ae59c0b108c5..a21b14495ebd 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6545,11 +6545,10 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
/* flip page offset to other buffer */
rx_buffer->page_offset ^= IGB_RX_BUFSZ;
- /* since we are the only owner of the page and we need to
- * increment it, just set the value to 2 in order to avoid
- * an unnecessary locked operation
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
*/
- atomic_set(&page->_count, 2);
+ atomic_inc(&page->_count);
#else
/* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d677b5a23b58..fec5212d4337 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1865,12 +1865,10 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
/* flip page offset to other buffer */
rx_buffer->page_offset ^= truesize;
- /*
- * since we are the only owner of the page and we need to
- * increment it, just set the value to 2 in order to avoid
- * an unecessary locked operation
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
*/
- atomic_set(&page->_count, 2);
+ atomic_inc(&page->_count);
#else
/* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index b3b72ad92d4a..d323a695dfbc 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -64,7 +64,8 @@ config MVPP2
config PXA168_ETH
tristate "Marvell pxa168 ethernet support"
- depends on (CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST) && HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
+ depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST
select PHYLIB
---help---
This driver supports the pxa168 Ethernet ports.
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index a33048ee9621..01660c595f5c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -76,10 +76,10 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
page_alloc->dma = dma;
page_alloc->page_offset = frag_info->frag_align;
/* Not doing get_page() for each frag is a big win
- * on asymetric workloads.
+ * on asymetric workloads. Note we can not use atomic_set().
*/
- atomic_set(&page->_count,
- page_alloc->page_size / frag_info->frag_stride);
+ atomic_add(page_alloc->page_size / frag_info->frag_stride - 1,
+ &page->_count);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 5efe60ea6526..0adcf73cf722 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -134,7 +134,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
void __iomem *ioaddr = (void __iomem *)dev->base_addr;
unsigned int value = 0;
unsigned int perfect_addr_number = hw->unicast_filter_entries;
- u32 mc_filter[2];
+ u32 mc_filter[8];
int mcbitslog2 = hw->mcast_bits_log2;
pr_debug("%s: # mcasts %d, # unicast %d\n", __func__,
@@ -182,7 +182,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
struct netdev_hw_addr *ha;
netdev_for_each_uc_addr(ha, dev) {
- stmmac_get_mac_addr(ioaddr, ha->addr,
+ stmmac_set_mac_addr(ioaddr, ha->addr,
GMAC_ADDR_HIGH(reg),
GMAC_ADDR_LOW(reg));
reg++;
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 15396720f489..3652afd3ec78 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -954,7 +954,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&port->vio.lock, flags);
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
+ if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
@@ -1049,7 +1049,7 @@ ldc_start_done:
dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
- if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
+ if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
netif_stop_queue(dev);
if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
netif_wake_queue(dev);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 38b4fae61f04..29b3bb410781 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -260,7 +260,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
mode == MACVLAN_MODE_BRIDGE) ?:
netif_rx_ni(nskb);
macvlan_count_rx(vlan, skb->len + ETH_HLEN,
- err == NET_RX_SUCCESS, 1);
+ err == NET_RX_SUCCESS, true);
}
}
}
@@ -379,7 +379,7 @@ static void macvlan_forward_source_one(struct sk_buff *skb,
nskb->pkt_type = PACKET_HOST;
ret = netif_rx(nskb);
- macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
+ macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false);
}
static void macvlan_forward_source(struct sk_buff *skb,
@@ -407,7 +407,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
const struct macvlan_dev *src;
struct net_device *dev;
unsigned int len = 0;
- int ret = NET_RX_DROP;
+ int ret;
+ rx_handler_result_t handle_res;
port = macvlan_port_get_rcu(skb->dev);
if (is_multicast_ether_addr(eth->h_dest)) {
@@ -423,6 +424,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
vlan = src;
ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
netif_rx(skb);
+ handle_res = RX_HANDLER_CONSUMED;
goto out;
}
@@ -448,17 +450,20 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
}
len = skb->len + ETH_HLEN;
skb = skb_share_check(skb, GFP_ATOMIC);
- if (!skb)
+ if (!skb) {
+ ret = NET_RX_DROP;
+ handle_res = RX_HANDLER_CONSUMED;
goto out;
+ }
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
- ret = netif_rx(skb);
-
+ ret = NET_RX_SUCCESS;
+ handle_res = RX_HANDLER_ANOTHER;
out:
- macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
- return RX_HANDLER_CONSUMED;
+ macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false);
+ return handle_res;
}
static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 011dbda2b2f1..492435fce1d4 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -26,6 +26,7 @@
#include <linux/phy.h>
#include <linux/micrel_phy.h>
#include <linux/of.h>
+#include <linux/clk.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
@@ -72,9 +73,12 @@ static int ksz_config_flags(struct phy_device *phydev)
{
int regval;
- if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
+ if (phydev->dev_flags & (MICREL_PHY_50MHZ_CLK | MICREL_PHY_25MHZ_CLK)) {
regval = phy_read(phydev, MII_KSZPHY_CTRL);
- regval |= KSZ8051_RMII_50MHZ_CLK;
+ if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK)
+ regval |= KSZ8051_RMII_50MHZ_CLK;
+ else
+ regval &= ~KSZ8051_RMII_50MHZ_CLK;
return phy_write(phydev, MII_KSZPHY_CTRL, regval);
}
return 0;
@@ -440,6 +444,27 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
{
}
+static int ksz8021_probe(struct phy_device *phydev)
+{
+ struct clk *clk;
+
+ clk = devm_clk_get(&phydev->dev, "rmii-ref");
+ if (!IS_ERR(clk)) {
+ unsigned long rate = clk_get_rate(clk);
+
+ if (rate > 24500000 && rate < 25500000) {
+ phydev->dev_flags |= MICREL_PHY_25MHZ_CLK;
+ } else if (rate > 49500000 && rate < 50500000) {
+ phydev->dev_flags |= MICREL_PHY_50MHZ_CLK;
+ } else {
+ dev_err(&phydev->dev, "Clock rate out of range: %ld\n", rate);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
@@ -462,6 +487,7 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .probe = ksz8021_probe,
.config_init = ksz8021_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
@@ -477,6 +503,7 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .probe = ksz8021_probe,
.config_init = ksz8021_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 80e6f3430f65..68c3a3f4e0ab 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -594,7 +594,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (file == ppp->owner)
ppp_shutdown_interface(ppp);
}
- if (atomic_long_read(&file->f_count) <= 2) {
+ if (atomic_long_read(&file->f_count) < 2) {
ppp_release(NULL, file);
err = 0;
} else
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index acaaf6784179..186ce541c657 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2152,9 +2152,7 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
goto out;
if (on) {
- ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
- if (ret)
- goto out;
+ __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
tfile->flags |= TUN_FASYNC;
} else
tfile->flags &= ~TUN_FASYNC;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 5cfd414b9a3e..864159eb744e 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -26,7 +26,7 @@
#include <linux/mdio.h>
/* Version Information */
-#define DRIVER_VERSION "v1.06.1 (2014/10/01)"
+#define DRIVER_VERSION "v1.07.0 (2014/10/09)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
#define MODULENAME "r8152"
@@ -566,6 +566,7 @@ struct r8152 {
spinlock_t rx_lock, tx_lock;
struct delayed_work schedule;
struct mii_if_info mii;
+ struct mutex control; /* use for hw setting */
struct rtl_ops {
void (*init)(struct r8152 *);
@@ -942,15 +943,8 @@ static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
if (phy_id != R8152_PHY_ID)
return -EINVAL;
- ret = usb_autopm_get_interface(tp->intf);
- if (ret < 0)
- goto out;
-
ret = r8152_mdio_read(tp, reg);
- usb_autopm_put_interface(tp->intf);
-
-out:
return ret;
}
@@ -965,12 +959,7 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
if (phy_id != R8152_PHY_ID)
return;
- if (usb_autopm_get_interface(tp->intf) < 0)
- return;
-
r8152_mdio_write(tp, reg, val);
-
- usb_autopm_put_interface(tp->intf);
}
static int
@@ -989,12 +978,16 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
if (ret < 0)
goto out1;
+ mutex_lock(&tp->control);
+
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+ mutex_unlock(&tp->control);
+
usb_autopm_put_interface(tp->intf);
out1:
return ret;
@@ -2145,6 +2138,13 @@ static int rtl8152_set_features(struct net_device *dev,
{
netdev_features_t changed = features ^ dev->features;
struct r8152 *tp = netdev_priv(dev);
+ int ret;
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out;
+
+ mutex_lock(&tp->control);
if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -2153,7 +2153,12 @@ static int rtl8152_set_features(struct net_device *dev,
rtl_rx_vlan_en(tp, false);
}
- return 0;
+ mutex_unlock(&tp->control);
+
+ usb_autopm_put_interface(tp->intf);
+
+out:
+ return ret;
}
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
@@ -2851,6 +2856,11 @@ static void rtl_work_func_t(struct work_struct *work)
if (test_bit(RTL8152_UNPLUG, &tp->flags))
goto out1;
+ if (!mutex_trylock(&tp->control)) {
+ schedule_delayed_work(&tp->schedule, 0);
+ goto out1;
+ }
+
if (test_bit(RTL8152_LINK_CHG, &tp->flags))
set_carrier(tp);
@@ -2866,6 +2876,8 @@ static void rtl_work_func_t(struct work_struct *work)
if (test_bit(PHY_RESET, &tp->flags))
rtl_phy_reset(tp);
+ mutex_unlock(&tp->control);
+
out1:
usb_autopm_put_interface(tp->intf);
}
@@ -2885,6 +2897,8 @@ static int rtl8152_open(struct net_device *netdev)
goto out;
}
+ mutex_lock(&tp->control);
+
/* The WORK_ENABLE may be set when autoresume occurs */
if (test_bit(WORK_ENABLE, &tp->flags)) {
clear_bit(WORK_ENABLE, &tp->flags);
@@ -2913,6 +2927,8 @@ static int rtl8152_open(struct net_device *netdev)
free_all_mem(tp);
}
+ mutex_unlock(&tp->control);
+
usb_autopm_put_interface(tp->intf);
out:
@@ -2933,6 +2949,8 @@ static int rtl8152_close(struct net_device *netdev)
if (res < 0) {
rtl_drop_queued_tx(tp);
} else {
+ mutex_lock(&tp->control);
+
/* The autosuspend may have been enabled and wouldn't
* be disable when autoresume occurs, because the
* netif_running() would be false.
@@ -2945,6 +2963,9 @@ static int rtl8152_close(struct net_device *netdev)
tasklet_disable(&tp->tl);
tp->rtl_ops.down(tp);
tasklet_enable(&tp->tl);
+
+ mutex_unlock(&tp->control);
+
usb_autopm_put_interface(tp->intf);
}
@@ -3169,6 +3190,8 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
{
struct r8152 *tp = usb_get_intfdata(intf);
+ mutex_lock(&tp->control);
+
if (PMSG_IS_AUTO(message))
set_bit(SELECTIVE_SUSPEND, &tp->flags);
else
@@ -3188,6 +3211,8 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
tasklet_enable(&tp->tl);
}
+ mutex_unlock(&tp->control);
+
return 0;
}
@@ -3195,6 +3220,8 @@ static int rtl8152_resume(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
+ mutex_lock(&tp->control);
+
if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
tp->rtl_ops.init(tp);
netif_device_attach(tp->netdev);
@@ -3220,6 +3247,8 @@ static int rtl8152_resume(struct usb_interface *intf)
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
}
+ mutex_unlock(&tp->control);
+
return 0;
}
@@ -3230,9 +3259,13 @@ static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (usb_autopm_get_interface(tp->intf) < 0)
return;
+ mutex_lock(&tp->control);
+
wol->supported = WAKE_ANY;
wol->wolopts = __rtl_get_wol(tp);
+ mutex_unlock(&tp->control);
+
usb_autopm_put_interface(tp->intf);
}
@@ -3245,9 +3278,13 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (ret < 0)
goto out_set_wol;
+ mutex_lock(&tp->control);
+
__rtl_set_wol(tp, wol->wolopts);
tp->saved_wolopts = wol->wolopts & WAKE_ANY;
+ mutex_unlock(&tp->control);
+
usb_autopm_put_interface(tp->intf);
out_set_wol:
@@ -3282,11 +3319,25 @@ static
int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
{
struct r8152 *tp = netdev_priv(netdev);
+ int ret;
if (!tp->mii.mdio_read)
return -EOPNOTSUPP;
- return mii_ethtool_gset(&tp->mii, cmd);
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out;
+
+ mutex_lock(&tp->control);
+
+ ret = mii_ethtool_gset(&tp->mii, cmd);
+
+ mutex_unlock(&tp->control);
+
+ usb_autopm_put_interface(tp->intf);
+
+out:
+ return ret;
}
static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -3298,8 +3349,12 @@ static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (ret < 0)
goto out;
+ mutex_lock(&tp->control);
+
ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
+ mutex_unlock(&tp->control);
+
usb_autopm_put_interface(tp->intf);
out:
@@ -3459,8 +3514,12 @@ rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata)
if (ret < 0)
goto out;
+ mutex_lock(&tp->control);
+
ret = tp->rtl_ops.eee_get(tp, edata);
+ mutex_unlock(&tp->control);
+
usb_autopm_put_interface(tp->intf);
out:
@@ -3477,10 +3536,14 @@ rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *edata)
if (ret < 0)
goto out;
+ mutex_lock(&tp->control);
+
ret = tp->rtl_ops.eee_set(tp, edata);
if (!ret)
ret = mii_nway_restart(&tp->mii);
+ mutex_unlock(&tp->control);
+
usb_autopm_put_interface(tp->intf);
out:
@@ -3522,7 +3585,9 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
break;
case SIOCGMIIREG:
+ mutex_lock(&tp->control);
data->val_out = r8152_mdio_read(tp, data->reg_num);
+ mutex_unlock(&tp->control);
break;
case SIOCSMIIREG:
@@ -3530,7 +3595,9 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
res = -EPERM;
break;
}
+ mutex_lock(&tp->control);
r8152_mdio_write(tp, data->reg_num, data->val_in);
+ mutex_unlock(&tp->control);
break;
default:
@@ -3723,6 +3790,7 @@ static int rtl8152_probe(struct usb_interface *intf,
goto out;
tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
+ mutex_init(&tp->control);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
netdev->netdev_ops = &rtl8152_netdev_ops;
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index bfa0b1518da1..01a7db061c6a 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -294,7 +294,6 @@ struct ath_tx_control {
* (axq_qnum).
*/
struct ath_tx {
- u16 seq_no;
u32 txqsetup;
spinlock_t txbuflock;
struct list_head txbuf;
@@ -563,6 +562,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs);
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *q);
void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
+void ath_assign_seq(struct ath_common *common, struct sk_buff *skb);
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control *txctl);
void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -592,6 +592,8 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
struct ath_vif {
struct list_head list;
+ u16 seq_no;
+
/* BSS info */
u8 bssid[ETH_ALEN];
u16 aid;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index a6af855ef6ed..ecb783beeec2 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -144,16 +144,8 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
mgmt_hdr->u.beacon.timestamp = avp->tsf_adjust;
info = IEEE80211_SKB_CB(skb);
- if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
- /*
- * TODO: make sure the seq# gets assigned properly (vs. other
- * TX frames)
- */
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- sc->tx.seq_no += 0x10;
- hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
- }
+
+ ath_assign_seq(common, skb);
if (vif->p2p)
ath9k_beacon_add_noa(sc, avp, skb);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index d779f4fa50e3..4014c4be6e79 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -464,6 +464,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
return -ENOMEM;
ah->dev = priv->dev;
+ ah->hw = priv->hw;
ah->hw_version.devid = devid;
ah->hw_version.usbdev = drv_info;
ah->ah_flags |= AH_USE_EEPROM;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 205162449b72..6f6a974f7fdb 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2332,7 +2332,7 @@ static void ath9k_remove_chanctx(struct ieee80211_hw *hw,
conf->def.chan->center_freq);
ctx->assigned = false;
- ctx->hw_queue_base = -1;
+ ctx->hw_queue_base = 0;
ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_UNASSIGN);
mutex_unlock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index 8a69d08ec55c..40ab65e6882f 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -54,6 +54,12 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info;
struct sk_buff *skb;
+ struct ath_vif *avp;
+
+ if (!sc->tx99_vif)
+ return NULL;
+
+ avp = (struct ath_vif *)sc->tx99_vif->drv_priv;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
@@ -71,7 +77,7 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
- hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
+ hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
tx_info = IEEE80211_SKB_CB(skb);
memset(tx_info, 0, sizeof(*tx_info));
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 151ae49fa57e..493a183d0aaf 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2139,6 +2139,28 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
return bf;
}
+void ath_assign_seq(struct ath_common *common, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath_vif *avp;
+
+ if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
+ return;
+
+ if (!vif)
+ return;
+
+ avp = (struct ath_vif *)vif->drv_priv;
+
+ if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+ avp->seq_no += 0x10;
+
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
+}
+
static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control *txctl)
{
@@ -2162,17 +2184,7 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
if (info->control.hw_key)
frmlen += info->control.hw_key->icv_len;
- /*
- * As a temporary workaround, assign seq# here; this will likely need
- * to be cleaned up to work better with Beacon transmission and virtual
- * BSSes.
- */
- if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
- if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
- sc->tx.seq_no += 0x10;
- hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
- }
+ ath_assign_seq(ath9k_hw_common(sc->sc_ah), skb);
if ((vif && vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_AP_VLAN) ||
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
index 83f47af19280..338d72337604 100644
--- a/drivers/net/wireless/ath/main.c
+++ b/drivers/net/wireless/ath/main.c
@@ -79,13 +79,13 @@ void ath_printk(const char *level, const struct ath_common* common,
vaf.fmt = fmt;
vaf.va = &args;
- if (common && common->hw && common->hw->wiphy)
+ if (common && common->hw && common->hw->wiphy) {
printk("%sath: %s: %pV",
level, wiphy_name(common->hw->wiphy), &vaf);
- else
+ trace_ath_log(common->hw->wiphy, &vaf);
+ } else {
printk("%sath: %pV", level, &vaf);
-
- trace_ath_log(common->hw->wiphy, &vaf);
+ }
va_end(args);
}
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index ded967aa6ecb..706b844bce00 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -742,35 +742,49 @@ static void rtl8180_int_disable(struct ieee80211_hw *dev)
}
static void rtl8180_conf_basic_rates(struct ieee80211_hw *dev,
- u32 rates_mask)
+ u32 basic_mask)
{
struct rtl8180_priv *priv = dev->priv;
-
- u8 max, min;
u16 reg;
-
- max = fls(rates_mask) - 1;
- min = ffs(rates_mask) - 1;
+ u32 resp_mask;
+ u8 basic_max;
+ u8 resp_max, resp_min;
+
+ resp_mask = basic_mask;
+ /* IEEE80211 says the response rate should be equal to the highest basic
+ * rate that is not faster than received frame. But it says also that if
+ * the basic rate set does not contains any rate for the current
+ * modulation class then mandatory rate set must be used for that
+ * modulation class. Eventually add OFDM mandatory rates..
+ */
+ if ((resp_mask & 0xf) == resp_mask)
+ resp_mask |= 0x150; /* 6, 12, 24Mbps */
switch (priv->chip_family) {
case RTL818X_CHIP_FAMILY_RTL8180:
/* in 8180 this is NOT a BITMAP */
+ basic_max = fls(basic_mask) - 1;
reg = rtl818x_ioread16(priv, &priv->map->BRSR);
reg &= ~3;
- reg |= max;
+ reg |= basic_max;
rtl818x_iowrite16(priv, &priv->map->BRSR, reg);
break;
case RTL818X_CHIP_FAMILY_RTL8185:
+ resp_max = fls(resp_mask) - 1;
+ resp_min = ffs(resp_mask) - 1;
/* in 8185 this is a BITMAP */
- rtl818x_iowrite16(priv, &priv->map->BRSR, rates_mask);
- rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (max << 4) | min);
+ rtl818x_iowrite16(priv, &priv->map->BRSR, basic_mask);
+ rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (resp_max << 4) |
+ resp_min);
break;
case RTL818X_CHIP_FAMILY_RTL8187SE:
- /* in 8187se this is a BITMAP */
- rtl818x_iowrite16(priv, &priv->map->BRSR_8187SE, rates_mask);
+ /* in 8187se this is a BITMAP. BRSR reg actually sets
+ * response rates.
+ */
+ rtl818x_iowrite16(priv, &priv->map->BRSR_8187SE, resp_mask);
break;
}
}
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 976667ae8549..6866dcf24340 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1370,7 +1370,7 @@ struct rtl_mac {
bool rdg_en;
/*AP*/
- u8 bssid[6];
+ u8 bssid[ETH_ALEN] __aligned(2);
u32 vendor;
u8 mcs[16]; /* 16 bytes mcs for HT rates. */
u32 basic_rates; /* b/g rates */
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 9c47b897b6d2..8079c31ac5e6 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -937,22 +937,18 @@ static int read_xenbus_vif_flags(struct backend_info *be)
return 0;
}
-
-/* ** Driver Registration ** */
-
-
static const struct xenbus_device_id netback_ids[] = {
{ "vif" },
{ "" }
};
-
-static DEFINE_XENBUS_DRIVER(netback, ,
+static struct xenbus_driver netback_driver = {
+ .ids = netback_ids,
.probe = netback_probe,
.remove = netback_remove,
.uevent = netback_uevent,
.otherend_changed = frontend_changed,
-);
+};
int xenvif_xenbus_init(void)
{
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ca82f545ec2c..fa671442f420 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -2300,12 +2300,6 @@ static void xennet_sysfs_delif(struct net_device *netdev)
#endif /* CONFIG_SYSFS */
-static const struct xenbus_device_id netfront_ids[] = {
- { "vif" },
- { "" }
-};
-
-
static int xennet_remove(struct xenbus_device *dev)
{
struct netfront_info *info = dev_get_drvdata(&dev->dev);
@@ -2338,12 +2332,18 @@ static int xennet_remove(struct xenbus_device *dev)
return 0;
}
-static DEFINE_XENBUS_DRIVER(netfront, ,
+static const struct xenbus_device_id netfront_ids[] = {
+ { "vif" },
+ { "" }
+};
+
+static struct xenbus_driver netfront_driver = {
+ .ids = netfront_ids,
.probe = netfront_probe,
.remove = xennet_remove,
.resume = netfront_resume,
.otherend_changed = netback_changed,
-);
+};
static int __init netif_init(void)
{
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 5160c4eb73c2..1a13f5b722c5 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -11,6 +11,7 @@ config OF_SELFTEST
bool "Device Tree Runtime self tests"
depends on OF_IRQ && OF_EARLY_FLATTREE
select OF_DYNAMIC
+ select OF_RESOLVE
help
This option builds in test cases for the device tree infrastructure
that are executed once at boot time, and the results dumped to the
@@ -79,4 +80,7 @@ config OF_RESERVED_MEM
help
Helpers to allow for reservation of memory regions
+config OF_RESOLVE
+ bool
+
endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 2b6a7b129d10..ca9209ce50cd 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_OF_PCI) += of_pci.o
obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
obj-$(CONFIG_OF_MTD) += of_mtd.o
obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
+obj-$(CONFIG_OF_RESOLVE) += resolver.o
CFLAGS_fdt.o = -I$(src)/../../scripts/dtc/libfdt
CFLAGS_fdt_address.o = -I$(src)/../../scripts/dtc/libfdt
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 293ed4b687ba..2305dc0382bc 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1021,6 +1021,9 @@ struct device_node *of_find_node_by_phandle(phandle handle)
struct device_node *np;
unsigned long flags;
+ if (!handle)
+ return NULL;
+
raw_spin_lock_irqsave(&devtree_lock, flags);
for (np = of_allnodes; np; np = np->allnext)
if (np->phandle == handle)
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
new file mode 100644
index 000000000000..aed7959f800d
--- /dev/null
+++ b/drivers/of/resolver.c
@@ -0,0 +1,336 @@
+/*
+ * Functions for dealing with DT resolution
+ *
+ * Copyright (C) 2012 Pantelis Antoniou <panto@antoniou-consulting.com>
+ * Copyright (C) 2012 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+/* illegal phandle value (set when unresolved) */
+#define OF_PHANDLE_ILLEGAL 0xdeadbeef
+
+/**
+ * Find a node with the give full name by recursively following any of
+ * the child node links.
+ */
+static struct device_node *__of_find_node_by_full_name(struct device_node *node,
+ const char *full_name)
+{
+ struct device_node *child, *found;
+
+ if (node == NULL)
+ return NULL;
+
+ /* check */
+ if (of_node_cmp(node->full_name, full_name) == 0)
+ return node;
+
+ for_each_child_of_node(node, child) {
+ found = __of_find_node_by_full_name(child, full_name);
+ if (found != NULL)
+ return found;
+ }
+
+ return NULL;
+}
+
+/*
+ * Find live tree's maximum phandle value.
+ */
+static phandle of_get_tree_max_phandle(void)
+{
+ struct device_node *node;
+ phandle phandle;
+ unsigned long flags;
+
+ /* now search recursively */
+ raw_spin_lock_irqsave(&devtree_lock, flags);
+ phandle = 0;
+ for_each_of_allnodes(node) {
+ if (node->phandle != OF_PHANDLE_ILLEGAL &&
+ node->phandle > phandle)
+ phandle = node->phandle;
+ }
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
+ return phandle;
+}
+
+/*
+ * Adjust a subtree's phandle values by a given delta.
+ * Makes sure not to just adjust the device node's phandle value,
+ * but modify the phandle properties values as well.
+ */
+static void __of_adjust_tree_phandles(struct device_node *node,
+ int phandle_delta)
+{
+ struct device_node *child;
+ struct property *prop;
+ phandle phandle;
+
+ /* first adjust the node's phandle direct value */
+ if (node->phandle != 0 && node->phandle != OF_PHANDLE_ILLEGAL)
+ node->phandle += phandle_delta;
+
+ /* now adjust phandle & linux,phandle values */
+ for_each_property_of_node(node, prop) {
+
+ /* only look for these two */
+ if (of_prop_cmp(prop->name, "phandle") != 0 &&
+ of_prop_cmp(prop->name, "linux,phandle") != 0)
+ continue;
+
+ /* must be big enough */
+ if (prop->length < 4)
+ continue;
+
+ /* read phandle value */
+ phandle = be32_to_cpup(prop->value);
+ if (phandle == OF_PHANDLE_ILLEGAL) /* unresolved */
+ continue;
+
+ /* adjust */
+ *(uint32_t *)prop->value = cpu_to_be32(node->phandle);
+ }
+
+ /* now do the children recursively */
+ for_each_child_of_node(node, child)
+ __of_adjust_tree_phandles(child, phandle_delta);
+}
+
+static int __of_adjust_phandle_ref(struct device_node *node, struct property *rprop, int value, bool is_delta)
+{
+ phandle phandle;
+ struct device_node *refnode;
+ struct property *sprop;
+ char *propval, *propcur, *propend, *nodestr, *propstr, *s;
+ int offset, propcurlen;
+ int err = 0;
+
+ /* make a copy */
+ propval = kmalloc(rprop->length, GFP_KERNEL);
+ if (!propval) {
+ pr_err("%s: Could not copy value of '%s'\n",
+ __func__, rprop->name);
+ return -ENOMEM;
+ }
+ memcpy(propval, rprop->value, rprop->length);
+
+ propend = propval + rprop->length;
+ for (propcur = propval; propcur < propend; propcur += propcurlen + 1) {
+ propcurlen = strlen(propcur);
+
+ nodestr = propcur;
+ s = strchr(propcur, ':');
+ if (!s) {
+ pr_err("%s: Illegal symbol entry '%s' (1)\n",
+ __func__, propcur);
+ err = -EINVAL;
+ goto err_fail;
+ }
+ *s++ = '\0';
+
+ propstr = s;
+ s = strchr(s, ':');
+ if (!s) {
+ pr_err("%s: Illegal symbol entry '%s' (2)\n",
+ __func__, (char *)rprop->value);
+ err = -EINVAL;
+ goto err_fail;
+ }
+
+ *s++ = '\0';
+ err = kstrtoint(s, 10, &offset);
+ if (err != 0) {
+ pr_err("%s: Could get offset '%s'\n",
+ __func__, (char *)rprop->value);
+ goto err_fail;
+ }
+
+ /* look into the resolve node for the full path */
+ refnode = __of_find_node_by_full_name(node, nodestr);
+ if (!refnode) {
+ pr_warn("%s: Could not find refnode '%s'\n",
+ __func__, (char *)rprop->value);
+ continue;
+ }
+
+ /* now find the property */
+ for_each_property_of_node(refnode, sprop) {
+ if (of_prop_cmp(sprop->name, propstr) == 0)
+ break;
+ }
+
+ if (!sprop) {
+ pr_err("%s: Could not find property '%s'\n",
+ __func__, (char *)rprop->value);
+ err = -ENOENT;
+ goto err_fail;
+ }
+
+ phandle = is_delta ? be32_to_cpup(sprop->value + offset) + value : value;
+ *(__be32 *)(sprop->value + offset) = cpu_to_be32(phandle);
+ }
+
+err_fail:
+ kfree(propval);
+ return err;
+}
+
+/*
+ * Adjust the local phandle references by the given phandle delta.
+ * Assumes the existances of a __local_fixups__ node at the root
+ * of the tree. Does not take any devtree locks so make sure you
+ * call this on a tree which is at the detached state.
+ */
+static int __of_adjust_tree_phandle_references(struct device_node *node,
+ int phandle_delta)
+{
+ struct device_node *child;
+ struct property *rprop;
+ int err;
+
+ /* locate the symbols & fixups nodes on resolve */
+ for_each_child_of_node(node, child)
+ if (of_node_cmp(child->name, "__local_fixups__") == 0)
+ break;
+
+ /* no local fixups */
+ if (!child)
+ return 0;
+
+ /* find the local fixups property */
+ for_each_property_of_node(child, rprop) {
+ /* skip properties added automatically */
+ if (of_prop_cmp(rprop->name, "name") == 0)
+ continue;
+
+ err = __of_adjust_phandle_ref(node, rprop, phandle_delta, true);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * of_resolve - Resolve the given node against the live tree.
+ *
+ * @resolve: Node to resolve
+ *
+ * Perform dynamic Device Tree resolution against the live tree
+ * to the given node to resolve. This depends on the live tree
+ * having a __symbols__ node, and the resolve node the __fixups__ &
+ * __local_fixups__ nodes (if needed).
+ * The result of the operation is a resolve node that it's contents
+ * are fit to be inserted or operate upon the live tree.
+ * Returns 0 on success or a negative error value on error.
+ */
+int of_resolve_phandles(struct device_node *resolve)
+{
+ struct device_node *child, *refnode;
+ struct device_node *root_sym, *resolve_sym, *resolve_fix;
+ struct property *rprop;
+ const char *refpath;
+ phandle phandle, phandle_delta;
+ int err;
+
+ /* the resolve node must exist, and be detached */
+ if (!resolve || !of_node_check_flag(resolve, OF_DETACHED))
+ return -EINVAL;
+
+ /* first we need to adjust the phandles */
+ phandle_delta = of_get_tree_max_phandle() + 1;
+ __of_adjust_tree_phandles(resolve, phandle_delta);
+ err = __of_adjust_tree_phandle_references(resolve, phandle_delta);
+ if (err != 0)
+ return err;
+
+ root_sym = NULL;
+ resolve_sym = NULL;
+ resolve_fix = NULL;
+
+ /* this may fail (if no fixups are required) */
+ root_sym = of_find_node_by_path("/__symbols__");
+
+ /* locate the symbols & fixups nodes on resolve */
+ for_each_child_of_node(resolve, child) {
+
+ if (!resolve_sym &&
+ of_node_cmp(child->name, "__symbols__") == 0)
+ resolve_sym = child;
+
+ if (!resolve_fix &&
+ of_node_cmp(child->name, "__fixups__") == 0)
+ resolve_fix = child;
+
+ /* both found, don't bother anymore */
+ if (resolve_sym && resolve_fix)
+ break;
+ }
+
+ /* we do allow for the case where no fixups are needed */
+ if (!resolve_fix) {
+ err = 0; /* no error */
+ goto out;
+ }
+
+ /* we need to fixup, but no root symbols... */
+ if (!root_sym) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ for_each_property_of_node(resolve_fix, rprop) {
+
+ /* skip properties added automatically */
+ if (of_prop_cmp(rprop->name, "name") == 0)
+ continue;
+
+ err = of_property_read_string(root_sym,
+ rprop->name, &refpath);
+ if (err != 0) {
+ pr_err("%s: Could not find symbol '%s'\n",
+ __func__, rprop->name);
+ goto out;
+ }
+
+ refnode = of_find_node_by_path(refpath);
+ if (!refnode) {
+ pr_err("%s: Could not find node by path '%s'\n",
+ __func__, refpath);
+ err = -ENOENT;
+ goto out;
+ }
+
+ phandle = refnode->phandle;
+ of_node_put(refnode);
+
+ pr_debug("%s: %s phandle is 0x%08x\n",
+ __func__, rprop->name, phandle);
+
+ err = __of_adjust_phandle_ref(resolve, rprop, phandle, false);
+ if (err)
+ break;
+ }
+
+out:
+ /* NULL is handled by of_node_put as NOP */
+ of_node_put(root_sym);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(of_resolve_phandles);
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index a737cb5974de..78001270a598 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/hashtable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
@@ -24,7 +25,7 @@ static struct selftest_results {
int failed;
} selftest_results;
-#define NO_OF_NODES 2
+#define NO_OF_NODES 3
static struct device_node *nodes[NO_OF_NODES];
static int last_node_index;
static bool selftest_live_tree;
@@ -145,6 +146,97 @@ static void __init of_selftest_dynamic(void)
"Adding a large property should have passed\n");
}
+static int __init of_selftest_check_node_linkage(struct device_node *np)
+{
+ struct device_node *child, *allnext_index = np;
+ int count = 0, rc;
+
+ for_each_child_of_node(np, child) {
+ if (child->parent != np) {
+ pr_err("Child node %s links to wrong parent %s\n",
+ child->name, np->name);
+ return -EINVAL;
+ }
+
+ while (allnext_index && allnext_index != child)
+ allnext_index = allnext_index->allnext;
+ if (allnext_index != child) {
+ pr_err("Node %s is ordered differently in sibling and allnode lists\n",
+ child->name);
+ return -EINVAL;
+ }
+
+ rc = of_selftest_check_node_linkage(child);
+ if (rc < 0)
+ return rc;
+ count += rc;
+ }
+
+ return count + 1;
+}
+
+static void __init of_selftest_check_tree_linkage(void)
+{
+ struct device_node *np;
+ int allnode_count = 0, child_count;
+
+ if (!of_allnodes)
+ return;
+
+ for_each_of_allnodes(np)
+ allnode_count++;
+ child_count = of_selftest_check_node_linkage(of_allnodes);
+
+ selftest(child_count > 0, "Device node data structure is corrupted\n");
+ selftest(child_count == allnode_count, "allnodes list size (%i) doesn't match"
+ "sibling lists size (%i)\n", allnode_count, child_count);
+ pr_debug("allnodes list size (%i); sibling lists size (%i)\n", allnode_count, child_count);
+}
+
+struct node_hash {
+ struct hlist_node node;
+ struct device_node *np;
+};
+
+static DEFINE_HASHTABLE(phandle_ht, 8);
+static void __init of_selftest_check_phandles(void)
+{
+ struct device_node *np;
+ struct node_hash *nh;
+ struct hlist_node *tmp;
+ int i, dup_count = 0, phandle_count = 0;
+
+ for_each_of_allnodes(np) {
+ if (!np->phandle)
+ continue;
+
+ hash_for_each_possible(phandle_ht, nh, node, np->phandle) {
+ if (nh->np->phandle == np->phandle) {
+ pr_info("Duplicate phandle! %i used by %s and %s\n",
+ np->phandle, nh->np->full_name, np->full_name);
+ dup_count++;
+ break;
+ }
+ }
+
+ nh = kzalloc(sizeof(*nh), GFP_KERNEL);
+ if (WARN_ON(!nh))
+ return;
+
+ nh->np = np;
+ hash_add(phandle_ht, &nh->node, np->phandle);
+ phandle_count++;
+ }
+ selftest(dup_count == 0, "Found %i duplicates in %i phandles\n",
+ dup_count, phandle_count);
+
+ /* Clean up */
+ hash_for_each_safe(phandle_ht, i, tmp, nh, node) {
+ hash_del(&nh->node);
+ kfree(nh);
+ }
+}
+
static void __init of_selftest_parse_phandle_with_args(void)
{
struct device_node *np;
@@ -637,6 +729,8 @@ static int attach_node_and_children(struct device_node *np)
dup = np;
while (dup) {
+ if (WARN_ON(last_node_index >= NO_OF_NODES))
+ return -EINVAL;
nodes[last_node_index++] = dup;
dup = dup->sibling;
}
@@ -670,6 +764,7 @@ static int __init selftest_data_add(void)
extern uint8_t __dtb_testcases_begin[];
extern uint8_t __dtb_testcases_end[];
const int size = __dtb_testcases_end - __dtb_testcases_begin;
+ int rc;
if (!size) {
pr_warn("%s: No testcase data to attach; not running tests\n",
@@ -690,6 +785,12 @@ static int __init selftest_data_add(void)
pr_warn("%s: No tree to attach; not running tests\n", __func__);
return -ENODATA;
}
+ of_node_set_flag(selftest_data_node, OF_DETACHED);
+ rc = of_resolve_phandles(selftest_data_node);
+ if (rc) {
+ pr_err("%s: Failed to resolve phandles (rc=%i)\n", __func__, rc);
+ return -EINVAL;
+ }
if (!of_allnodes) {
/* enabling flag for removing nodes */
@@ -717,10 +818,6 @@ static void detach_node_and_children(struct device_node *np)
{
while (np->child)
detach_node_and_children(np->child);
-
- while (np->sibling)
- detach_node_and_children(np->sibling);
-
of_detach_node(np);
}
@@ -749,8 +846,7 @@ static void selftest_data_remove(void)
if (nodes[last_node_index]) {
np = of_find_node_by_path(nodes[last_node_index]->full_name);
if (strcmp(np->full_name, "/aliases") != 0) {
- detach_node_and_children(np->child);
- of_detach_node(np);
+ detach_node_and_children(np);
} else {
for_each_property_of_node(np, prop) {
if (strcmp(prop->name, "testcase-alias") == 0)
@@ -780,6 +876,8 @@ static int __init of_selftest(void)
of_node_put(np);
pr_info("start of selftest - you will see error messages\n");
+ of_selftest_check_tree_linkage();
+ of_selftest_check_phandles();
of_selftest_find_node_by_name();
of_selftest_dynamic();
of_selftest_parse_phandle_with_args();
@@ -790,12 +888,16 @@ static int __init of_selftest(void)
of_selftest_parse_interrupts_extended();
of_selftest_match_node();
of_selftest_platform_populate();
- pr_info("end of selftest - %i passed, %i failed\n",
- selftest_results.passed, selftest_results.failed);
/* removing selftest data from live tree */
selftest_data_remove();
+ /* Double check linkage after removing testcase data */
+ of_selftest_check_tree_linkage();
+
+ pr_info("end of selftest - %i passed, %i failed\n",
+ selftest_results.passed, selftest_results.failed);
+
return 0;
}
late_initcall(of_selftest);
diff --git a/drivers/of/testcase-data/testcases.dts b/drivers/of/testcase-data/testcases.dts
index 219ef9324e9c..6994e15c24bf 100644
--- a/drivers/of/testcase-data/testcases.dts
+++ b/drivers/of/testcase-data/testcases.dts
@@ -13,3 +13,38 @@
#include "tests-interrupts.dtsi"
#include "tests-match.dtsi"
#include "tests-platform.dtsi"
+
+/*
+ * phandle fixup data - generated by dtc patches that aren't upstream.
+ * This data must be regenerated whenever phandle references are modified in
+ * the testdata tree.
+ *
+ * The format of this data may be subject to change. For the time being consider
+ * this a kernel-internal data format.
+ */
+/ { __local_fixups__ {
+ fixup = "/testcase-data/testcase-device2:interrupt-parent:0",
+ "/testcase-data/testcase-device1:interrupt-parent:0",
+ "/testcase-data/interrupts/interrupts-extended0:interrupts-extended:60",
+ "/testcase-data/interrupts/interrupts-extended0:interrupts-extended:52",
+ "/testcase-data/interrupts/interrupts-extended0:interrupts-extended:44",
+ "/testcase-data/interrupts/interrupts-extended0:interrupts-extended:36",
+ "/testcase-data/interrupts/interrupts-extended0:interrupts-extended:24",
+ "/testcase-data/interrupts/interrupts-extended0:interrupts-extended:8",
+ "/testcase-data/interrupts/interrupts-extended0:interrupts-extended:0",
+ "/testcase-data/interrupts/interrupts1:interrupt-parent:0",
+ "/testcase-data/interrupts/interrupts0:interrupt-parent:0",
+ "/testcase-data/interrupts/intmap1:interrupt-map:12",
+ "/testcase-data/interrupts/intmap0:interrupt-map:52",
+ "/testcase-data/interrupts/intmap0:interrupt-map:36",
+ "/testcase-data/interrupts/intmap0:interrupt-map:16",
+ "/testcase-data/interrupts/intmap0:interrupt-map:4",
+ "/testcase-data/phandle-tests/consumer-a:phandle-list-bad-args:12",
+ "/testcase-data/phandle-tests/consumer-a:phandle-list-bad-args:0",
+ "/testcase-data/phandle-tests/consumer-a:phandle-list:56",
+ "/testcase-data/phandle-tests/consumer-a:phandle-list:52",
+ "/testcase-data/phandle-tests/consumer-a:phandle-list:40",
+ "/testcase-data/phandle-tests/consumer-a:phandle-list:24",
+ "/testcase-data/phandle-tests/consumer-a:phandle-list:8",
+ "/testcase-data/phandle-tests/consumer-a:phandle-list:0";
+}; };
diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
index 90cca5e3805f..ef31b77404ef 100644
--- a/drivers/parisc/power.c
+++ b/drivers/parisc/power.c
@@ -121,7 +121,6 @@ static int kpowerswd(void *param)
unsigned long soft_power_reg = (unsigned long) param;
schedule_timeout_interruptible(pwrsw_enabled ? HZ : HZ/POWERSWITCH_POLL_PER_SEC);
- __set_current_state(TASK_RUNNING);
if (unlikely(!pwrsw_enabled))
continue;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 2f7c92c4757a..9fab30af0e75 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -302,6 +302,7 @@ void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
__get_cached_msi_msg(entry, msg);
}
+EXPORT_SYMBOL_GPL(get_cached_msi_msg);
void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
@@ -346,6 +347,7 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
__write_msi_msg(entry, msg);
}
+EXPORT_SYMBOL_GPL(write_msi_msg);
static void free_msi_irqs(struct pci_dev *dev)
{
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 53df39a22c8a..116ca3746adb 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1136,11 +1136,13 @@ static const struct xenbus_device_id xenpci_ids[] = {
{""},
};
-static DEFINE_XENBUS_DRIVER(xenpci, "pcifront",
+static struct xenbus_driver xenpci_driver = {
+ .name = "pcifront",
+ .ids = xenpci_ids,
.probe = pcifront_xenbus_probe,
.remove = pcifront_xenbus_remove,
.otherend_changed = pcifront_backend_changed,
-);
+};
static int __init pcifront_init(void)
{
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 390e8e33d5e3..25721bf20092 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -163,18 +163,24 @@ static void dell_wmi_notify(u32 value, void *context)
const struct key_entry *key;
int reported_key;
u16 *buffer_entry = (u16 *)obj->buffer.pointer;
+ int buffer_size = obj->buffer.length/2;
- if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
+ if (buffer_size >= 2 && dell_new_hk_type && buffer_entry[1] != 0x10) {
pr_info("Received unknown WMI event (0x%x)\n",
buffer_entry[1]);
kfree(obj);
return;
}
- if (dell_new_hk_type || buffer_entry[1] == 0x0)
+ if (buffer_size >= 3 && (dell_new_hk_type || buffer_entry[1] == 0x0))
reported_key = (int)buffer_entry[2];
- else
+ else if (buffer_size >= 2)
reported_key = (int)buffer_entry[1] & 0xffff;
+ else {
+ pr_info("Received unknown WMI event\n");
+ kfree(obj);
+ return;
+ }
key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
reported_key);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index bd533c22be57..db79902c4a8e 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -263,13 +263,11 @@ static int acpi_setter_handle(struct eeepc_laptop *eeepc, int cm,
/*
* Sys helpers
*/
-static int parse_arg(const char *buf, unsigned long count, int *val)
+static int parse_arg(const char *buf, int *val)
{
- if (!count)
- return 0;
if (sscanf(buf, "%i", val) != 1)
return -EINVAL;
- return count;
+ return 0;
}
static ssize_t store_sys_acpi(struct device *dev, int cm,
@@ -278,12 +276,13 @@ static ssize_t store_sys_acpi(struct device *dev, int cm,
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
int rv, value;
- rv = parse_arg(buf, count, &value);
- if (rv > 0)
- value = set_acpi(eeepc, cm, value);
- if (value < 0)
+ rv = parse_arg(buf, &value);
+ if (rv < 0)
+ return rv;
+ rv = set_acpi(eeepc, cm, value);
+ if (rv < 0)
return -EIO;
- return rv;
+ return count;
}
static ssize_t show_sys_acpi(struct device *dev, int cm, char *buf)
@@ -296,30 +295,34 @@ static ssize_t show_sys_acpi(struct device *dev, int cm, char *buf)
return sprintf(buf, "%d\n", value);
}
-#define EEEPC_CREATE_DEVICE_ATTR(_name, _mode, _cm) \
- static ssize_t show_##_name(struct device *dev, \
+#define EEEPC_ACPI_SHOW_FUNC(_name, _cm) \
+ static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return show_sys_acpi(dev, _cm, buf); \
- } \
- static ssize_t store_##_name(struct device *dev, \
+ }
+
+#define EEEPC_ACPI_STORE_FUNC(_name, _cm) \
+ static ssize_t _name##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
return store_sys_acpi(dev, _cm, buf, count); \
- } \
- static struct device_attribute dev_attr_##_name = { \
- .attr = { \
- .name = __stringify(_name), \
- .mode = _mode }, \
- .show = show_##_name, \
- .store = store_##_name, \
}
-EEEPC_CREATE_DEVICE_ATTR(camera, 0644, CM_ASL_CAMERA);
-EEEPC_CREATE_DEVICE_ATTR(cardr, 0644, CM_ASL_CARDREADER);
-EEEPC_CREATE_DEVICE_ATTR(disp, 0200, CM_ASL_DISPLAYSWITCH);
+#define EEEPC_CREATE_DEVICE_ATTR_RW(_name, _cm) \
+ EEEPC_ACPI_SHOW_FUNC(_name, _cm) \
+ EEEPC_ACPI_STORE_FUNC(_name, _cm) \
+ static DEVICE_ATTR_RW(_name)
+
+#define EEEPC_CREATE_DEVICE_ATTR_WO(_name, _cm) \
+ EEEPC_ACPI_STORE_FUNC(_name, _cm) \
+ static DEVICE_ATTR_WO(_name)
+
+EEEPC_CREATE_DEVICE_ATTR_RW(camera, CM_ASL_CAMERA);
+EEEPC_CREATE_DEVICE_ATTR_RW(cardr, CM_ASL_CARDREADER);
+EEEPC_CREATE_DEVICE_ATTR_WO(disp, CM_ASL_DISPLAYSWITCH);
struct eeepc_cpufv {
int num;
@@ -329,14 +332,17 @@ struct eeepc_cpufv {
static int get_cpufv(struct eeepc_laptop *eeepc, struct eeepc_cpufv *c)
{
c->cur = get_acpi(eeepc, CM_ASL_CPUFV);
+ if (c->cur < 0)
+ return -ENODEV;
+
c->num = (c->cur >> 8) & 0xff;
c->cur &= 0xff;
- if (c->cur < 0 || c->num <= 0 || c->num > 12)
+ if (c->num == 0 || c->num > 12)
return -ENODEV;
return 0;
}
-static ssize_t show_available_cpufv(struct device *dev,
+static ssize_t available_cpufv_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -353,7 +359,7 @@ static ssize_t show_available_cpufv(struct device *dev,
return len;
}
-static ssize_t show_cpufv(struct device *dev,
+static ssize_t cpufv_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -365,7 +371,7 @@ static ssize_t show_cpufv(struct device *dev,
return sprintf(buf, "%#x\n", (c.num << 8) | c.cur);
}
-static ssize_t store_cpufv(struct device *dev,
+static ssize_t cpufv_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -377,16 +383,18 @@ static ssize_t store_cpufv(struct device *dev,
return -EPERM;
if (get_cpufv(eeepc, &c))
return -ENODEV;
- rv = parse_arg(buf, count, &value);
+ rv = parse_arg(buf, &value);
if (rv < 0)
return rv;
- if (!rv || value < 0 || value >= c.num)
+ if (value < 0 || value >= c.num)
return -EINVAL;
- set_acpi(eeepc, CM_ASL_CPUFV, value);
- return rv;
+ rv = set_acpi(eeepc, CM_ASL_CPUFV, value);
+ if (rv)
+ return rv;
+ return count;
}
-static ssize_t show_cpufv_disabled(struct device *dev,
+static ssize_t cpufv_disabled_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -395,14 +403,14 @@ static ssize_t show_cpufv_disabled(struct device *dev,
return sprintf(buf, "%d\n", eeepc->cpufv_disabled);
}
-static ssize_t store_cpufv_disabled(struct device *dev,
+static ssize_t cpufv_disabled_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
int rv, value;
- rv = parse_arg(buf, count, &value);
+ rv = parse_arg(buf, &value);
if (rv < 0)
return rv;
@@ -412,7 +420,7 @@ static ssize_t store_cpufv_disabled(struct device *dev,
pr_warn("cpufv enabled (not officially supported "
"on this model)\n");
eeepc->cpufv_disabled = false;
- return rv;
+ return count;
case 1:
return -EPERM;
default:
@@ -421,29 +429,9 @@ static ssize_t store_cpufv_disabled(struct device *dev,
}
-static struct device_attribute dev_attr_cpufv = {
- .attr = {
- .name = "cpufv",
- .mode = 0644 },
- .show = show_cpufv,
- .store = store_cpufv
-};
-
-static struct device_attribute dev_attr_available_cpufv = {
- .attr = {
- .name = "available_cpufv",
- .mode = 0444 },
- .show = show_available_cpufv
-};
-
-static struct device_attribute dev_attr_cpufv_disabled = {
- .attr = {
- .name = "cpufv_disabled",
- .mode = 0644 },
- .show = show_cpufv_disabled,
- .store = store_cpufv_disabled
-};
-
+static DEVICE_ATTR_RW(cpufv);
+static DEVICE_ATTR_RO(available_cpufv);
+static DEVICE_ATTR_RW(cpufv_disabled);
static struct attribute *platform_attributes[] = {
&dev_attr_camera.attr,
@@ -545,7 +533,7 @@ static int eeepc_led_init(struct eeepc_laptop *eeepc)
eeepc->tpd_led.name = "eeepc::touchpad";
eeepc->tpd_led.brightness_set = tpd_led_set;
if (get_acpi(eeepc, CM_ASL_TPD) >= 0) /* if method is available */
- eeepc->tpd_led.brightness_get = tpd_led_get;
+ eeepc->tpd_led.brightness_get = tpd_led_get;
eeepc->tpd_led.max_brightness = 1;
rv = led_classdev_register(&eeepc->platform_device->dev,
@@ -680,22 +668,21 @@ static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
status = acpi_get_handle(NULL, node, &handle);
- if (ACPI_SUCCESS(status)) {
- status = acpi_install_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- eeepc_rfkill_notify,
- eeepc);
- if (ACPI_FAILURE(status))
- pr_warn("Failed to register notify on %s\n", node);
-
- /*
- * Refresh pci hotplug in case the rfkill state was
- * changed during setup.
- */
- eeepc_rfkill_hotplug(eeepc, handle);
- } else
+ if (ACPI_FAILURE(status))
return -ENODEV;
+ status = acpi_install_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ eeepc_rfkill_notify,
+ eeepc);
+ if (ACPI_FAILURE(status))
+ pr_warn("Failed to register notify on %s\n", node);
+
+ /*
+ * Refresh pci hotplug in case the rfkill state was
+ * changed during setup.
+ */
+ eeepc_rfkill_hotplug(eeepc, handle);
return 0;
}
@@ -707,20 +694,21 @@ static void eeepc_unregister_rfkill_notifier(struct eeepc_laptop *eeepc,
status = acpi_get_handle(NULL, node, &handle);
- if (ACPI_SUCCESS(status)) {
- status = acpi_remove_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- eeepc_rfkill_notify);
- if (ACPI_FAILURE(status))
- pr_err("Error removing rfkill notify handler %s\n",
- node);
- /*
- * Refresh pci hotplug in case the rfkill
- * state was changed after
- * eeepc_unregister_rfkill_notifier()
- */
- eeepc_rfkill_hotplug(eeepc, handle);
- }
+ if (ACPI_FAILURE(status))
+ return;
+
+ status = acpi_remove_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ eeepc_rfkill_notify);
+ if (ACPI_FAILURE(status))
+ pr_err("Error removing rfkill notify handler %s\n",
+ node);
+ /*
+ * Refresh pci hotplug in case the rfkill
+ * state was changed after
+ * eeepc_unregister_rfkill_notifier()
+ */
+ eeepc_rfkill_hotplug(eeepc, handle);
}
static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
@@ -1042,10 +1030,11 @@ static ssize_t store_sys_hwmon(void (*set)(int), const char *buf, size_t count)
{
int rv, value;
- rv = parse_arg(buf, count, &value);
- if (rv > 0)
- set(value);
- return rv;
+ rv = parse_arg(buf, &value);
+ if (rv < 0)
+ return rv;
+ set(value);
+ return count;
}
static ssize_t show_sys_hwmon(int (*get)(void), char *buf)
@@ -1053,26 +1042,36 @@ static ssize_t show_sys_hwmon(int (*get)(void), char *buf)
return sprintf(buf, "%d\n", get());
}
-#define EEEPC_CREATE_SENSOR_ATTR(_name, _mode, _get, _set) \
- static ssize_t show_##_name(struct device *dev, \
+#define EEEPC_SENSOR_SHOW_FUNC(_name, _get) \
+ static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return show_sys_hwmon(_get, buf); \
- } \
- static ssize_t store_##_name(struct device *dev, \
+ }
+
+#define EEEPC_SENSOR_STORE_FUNC(_name, _set) \
+ static ssize_t _name##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
return store_sys_hwmon(_set, buf, count); \
- } \
- static DEVICE_ATTR(_name, _mode, show_##_name, store_##_name)
+ }
+
+#define EEEPC_CREATE_SENSOR_ATTR_RW(_name, _get, _set) \
+ EEEPC_SENSOR_SHOW_FUNC(_name, _get) \
+ EEEPC_SENSOR_STORE_FUNC(_name, _set) \
+ static DEVICE_ATTR_RW(_name)
-EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL);
-EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR,
- eeepc_get_fan_pwm, eeepc_set_fan_pwm);
-EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
- eeepc_get_fan_ctrl, eeepc_set_fan_ctrl);
+#define EEEPC_CREATE_SENSOR_ATTR_RO(_name, _get) \
+ EEEPC_SENSOR_SHOW_FUNC(_name, _get) \
+ static DEVICE_ATTR_RO(_name)
+
+EEEPC_CREATE_SENSOR_ATTR_RO(fan1_input, eeepc_get_fan_rpm);
+EEEPC_CREATE_SENSOR_ATTR_RW(pwm1, eeepc_get_fan_pwm,
+ eeepc_set_fan_pwm);
+EEEPC_CREATE_SENSOR_ATTR_RW(pwm1_enable, eeepc_get_fan_ctrl,
+ eeepc_set_fan_ctrl);
static struct attribute *hwmon_attrs[] = {
&dev_attr_pwm1.attr,
@@ -1424,8 +1423,9 @@ static int eeepc_acpi_add(struct acpi_device *device)
result = eeepc_backlight_init(eeepc);
if (result)
goto fail_backlight;
- } else
+ } else {
pr_info("Backlight controlled by ACPI video driver\n");
+ }
result = eeepc_input_init(eeepc);
if (result)
diff --git a/drivers/platform/x86/intel-rst.c b/drivers/platform/x86/intel-rst.c
index d45bca34bf1b..7344d841f4d9 100644
--- a/drivers/platform/x86/intel-rst.c
+++ b/drivers/platform/x86/intel-rst.c
@@ -35,7 +35,7 @@ static ssize_t irst_show_wakeup_events(struct device *dev,
acpi = to_acpi_device(dev);
status = acpi_evaluate_integer(acpi->handle, "GFFS", NULL, &value);
- if (!ACPI_SUCCESS(status))
+ if (ACPI_FAILURE(status))
return -EINVAL;
return sprintf(buf, "%lld\n", value);
@@ -59,7 +59,7 @@ static ssize_t irst_store_wakeup_events(struct device *dev,
status = acpi_execute_simple_method(acpi->handle, "SFFS", value);
- if (!ACPI_SUCCESS(status))
+ if (ACPI_FAILURE(status))
return -EINVAL;
return count;
@@ -81,7 +81,7 @@ static ssize_t irst_show_wakeup_time(struct device *dev,
acpi = to_acpi_device(dev);
status = acpi_evaluate_integer(acpi->handle, "GFTV", NULL, &value);
- if (!ACPI_SUCCESS(status))
+ if (ACPI_FAILURE(status))
return -EINVAL;
return sprintf(buf, "%lld\n", value);
@@ -105,7 +105,7 @@ static ssize_t irst_store_wakeup_time(struct device *dev,
status = acpi_execute_simple_method(acpi->handle, "SFTV", value);
- if (!ACPI_SUCCESS(status))
+ if (ACPI_FAILURE(status))
return -EINVAL;
return count;
@@ -119,21 +119,16 @@ static struct device_attribute irst_timeout_attr = {
static int irst_add(struct acpi_device *acpi)
{
- int error = 0;
+ int error;
error = device_create_file(&acpi->dev, &irst_timeout_attr);
- if (error)
- goto out;
+ if (unlikely(error))
+ return error;
error = device_create_file(&acpi->dev, &irst_wakeup_attr);
- if (error)
- goto out_timeout;
+ if (unlikely(error))
+ device_remove_file(&acpi->dev, &irst_timeout_attr);
- return 0;
-
-out_timeout:
- device_remove_file(&acpi->dev, &irst_timeout_attr);
-out:
return error;
}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 3bbc6eb60de5..f959978c7aac 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3440,7 +3440,7 @@ err_exit:
delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj);
hotkey_dev_attributes = NULL;
- return (res < 0)? res : 1;
+ return (res < 0) ? res : 1;
}
/* Thinkpad X1 Carbon support 5 modes including Home mode, Web browser
@@ -4576,7 +4576,7 @@ static int __init video_init(struct ibm_init_struct *iibm)
str_supported(video_supported != TPACPI_VIDEO_NONE),
video_supported);
- return (video_supported != TPACPI_VIDEO_NONE)? 0 : 1;
+ return (video_supported != TPACPI_VIDEO_NONE) ? 0 : 1;
}
static void video_exit(void)
@@ -4669,7 +4669,7 @@ static int video_outputsw_set(int status)
return -ENOSYS;
}
- return (res)? 0 : -EIO;
+ return (res) ? 0 : -EIO;
}
static int video_autosw_get(void)
@@ -4695,7 +4695,7 @@ static int video_autosw_get(void)
static int video_autosw_set(int enable)
{
- if (!acpi_evalf(vid_handle, NULL, "_DOS", "vd", (enable)? 1 : 0))
+ if (!acpi_evalf(vid_handle, NULL, "_DOS", "vd", (enable) ? 1 : 0))
return -EIO;
return 0;
}
@@ -4730,20 +4730,20 @@ static int video_outputsw_cycle(void)
return -EIO;
}
- return (res)? 0 : -EIO;
+ return (res) ? 0 : -EIO;
}
static int video_expand_toggle(void)
{
switch (video_supported) {
case TPACPI_VIDEO_570:
- return acpi_evalf(ec_handle, NULL, "_Q17", "v")?
+ return acpi_evalf(ec_handle, NULL, "_Q17", "v") ?
0 : -EIO;
case TPACPI_VIDEO_770:
- return acpi_evalf(vid_handle, NULL, "VEXP", "v")?
+ return acpi_evalf(vid_handle, NULL, "VEXP", "v") ?
0 : -EIO;
case TPACPI_VIDEO_NEW:
- return acpi_evalf(NULL, NULL, "\\VEXP", "v")?
+ return acpi_evalf(NULL, NULL, "\\VEXP", "v") ?
0 : -EIO;
default:
return -ENOSYS;
@@ -4887,14 +4887,14 @@ static int light_set_status(int status)
if (tp_features.light) {
if (cmos_handle) {
rc = acpi_evalf(cmos_handle, NULL, NULL, "vd",
- (status)?
+ (status) ?
TP_CMOS_THINKLIGHT_ON :
TP_CMOS_THINKLIGHT_OFF);
} else {
rc = acpi_evalf(lght_handle, NULL, NULL, "vd",
- (status)? 1 : 0);
+ (status) ? 1 : 0);
}
- return (rc)? 0 : -EIO;
+ return (rc) ? 0 : -EIO;
}
return -ENXIO;
@@ -4923,7 +4923,7 @@ static void light_sysfs_set(struct led_classdev *led_cdev,
static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev)
{
- return (light_get_status() == 1)? LED_FULL : LED_OFF;
+ return (light_get_status() == 1) ? LED_FULL : LED_OFF;
}
static struct tpacpi_led_classdev tpacpi_led_thinklight = {
@@ -5045,7 +5045,7 @@ static ssize_t cmos_command_store(struct device *dev,
return -EINVAL;
res = issue_thinkpad_cmos_command(cmos_cmd);
- return (res)? res : count;
+ return (res) ? res : count;
}
static struct device_attribute dev_attr_cmos_command =
@@ -5069,7 +5069,7 @@ static int __init cmos_init(struct ibm_init_struct *iibm)
if (res)
return res;
- return (cmos_handle)? 0 : 1;
+ return (cmos_handle) ? 0 : 1;
}
static void cmos_exit(void)
@@ -5179,9 +5179,9 @@ static int led_get_status(const unsigned int led)
if (!acpi_evalf(ec_handle,
&status, "GLED", "dd", 1 << led))
return -EIO;
- led_s = (status == 0)?
+ led_s = (status == 0) ?
TPACPI_LED_OFF :
- ((status == 1)?
+ ((status == 1) ?
TPACPI_LED_ON :
TPACPI_LED_BLINK);
tpacpi_led_state_cache[led] = led_s;
@@ -5578,7 +5578,7 @@ static int __init beep_init(struct ibm_init_struct *iibm)
tp_features.beep_needs_two_args = !!(quirks & TPACPI_BEEP_Q1);
- return (beep_handle)? 0 : 1;
+ return (beep_handle) ? 0 : 1;
}
static int beep_read(struct seq_file *m)
@@ -6527,7 +6527,7 @@ static int brightness_write(char *buf)
if (!rc && ibm_backlight_device)
backlight_force_update(ibm_backlight_device,
BACKLIGHT_UPDATE_SYSFS);
- return (rc == -EINTR)? -ERESTARTSYS : rc;
+ return (rc == -EINTR) ? -ERESTARTSYS : rc;
}
static struct ibm_struct brightness_driver_data = {
@@ -7984,7 +7984,7 @@ static ssize_t fan_pwm1_store(struct device *dev,
}
mutex_unlock(&fan_mutex);
- return (rc)? rc : count;
+ return (rc) ? rc : count;
}
static struct device_attribute dev_attr_fan_pwm1 =
@@ -8662,7 +8662,7 @@ static const char * __init str_supported(int is_supported)
{
static char text_unsupported[] __initdata = "not supported";
- return (is_supported)? &text_unsupported[4] : &text_unsupported[0];
+ return (is_supported) ? &text_unsupported[4] : &text_unsupported[0];
}
#endif /* CONFIG_THINKPAD_ACPI_DEBUG */
@@ -8783,7 +8783,7 @@ err_out:
ibm->name, ret);
ibm_exit(ibm);
- return (ret < 0)? ret : 0;
+ return (ret < 0) ? ret : 0;
}
/* Probing */
@@ -8794,7 +8794,7 @@ static bool __pure __init tpacpi_is_fw_digit(const char c)
}
/* Most models: xxyTkkWW (#.##c); Ancient 570/600 and -SL lacks (#.##c) */
-static bool __pure __init tpacpi_is_valid_fw_id(const char* const s,
+static bool __pure __init tpacpi_is_valid_fw_id(const char * const s,
const char t)
{
return s && strlen(s) >= 8 &&
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index d0dce734b2ed..ef3a1904e92f 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -71,7 +71,8 @@ MODULE_LICENSE("GPL");
/* Toshiba ACPI method paths */
#define METHOD_VIDEO_OUT "\\_SB_.VALX.DSSX"
-/* Toshiba HCI interface definitions
+/* The Toshiba configuration interface is composed of the HCI and the SCI,
+ * which are defined as follows:
*
* HCI is Toshiba's "Hardware Control Interface" which is supposed to
* be uniform across all their models. Ideally we would just call
@@ -84,7 +85,7 @@ MODULE_LICENSE("GPL");
* conceal differences in hardware between different models.
*/
-#define HCI_WORDS 6
+#define TCI_WORDS 6
/* operations */
#define HCI_SET 0xff00
@@ -95,17 +96,18 @@ MODULE_LICENSE("GPL");
#define SCI_SET 0xf400
/* return codes */
-#define HCI_SUCCESS 0x0000
-#define HCI_FAILURE 0x1000
-#define HCI_NOT_SUPPORTED 0x8000
-#define HCI_EMPTY 0x8c00
-#define HCI_DATA_NOT_AVAILABLE 0x8d20
-#define HCI_NOT_INITIALIZED 0x8d50
-#define SCI_OPEN_CLOSE_OK 0x0044
-#define SCI_ALREADY_OPEN 0x8100
-#define SCI_NOT_OPENED 0x8200
-#define SCI_INPUT_DATA_ERROR 0x8300
-#define SCI_NOT_PRESENT 0x8600
+#define TOS_SUCCESS 0x0000
+#define TOS_OPEN_CLOSE_OK 0x0044
+#define TOS_FAILURE 0x1000
+#define TOS_NOT_SUPPORTED 0x8000
+#define TOS_ALREADY_OPEN 0x8100
+#define TOS_NOT_OPENED 0x8200
+#define TOS_INPUT_DATA_ERROR 0x8300
+#define TOS_WRITE_PROTECTED 0x8400
+#define TOS_NOT_PRESENT 0x8600
+#define TOS_FIFO_EMPTY 0x8c00
+#define TOS_DATA_NOT_AVAILABLE 0x8d20
+#define TOS_NOT_INITIALIZED 0x8d50
/* registers */
#define HCI_FAN 0x0004
@@ -138,8 +140,12 @@ MODULE_LICENSE("GPL");
#define HCI_WIRELESS_BT_PRESENT 0x0f
#define HCI_WIRELESS_BT_ATTACH 0x40
#define HCI_WIRELESS_BT_POWER 0x80
+#define SCI_KBD_MODE_MASK 0x1f
#define SCI_KBD_MODE_FNZ 0x1
#define SCI_KBD_MODE_AUTO 0x2
+#define SCI_KBD_MODE_ON 0x8
+#define SCI_KBD_MODE_OFF 0x10
+#define SCI_KBD_TIME_MAX 0x3c001a
struct toshiba_acpi_dev {
struct acpi_device *acpi_dev;
@@ -155,6 +161,7 @@ struct toshiba_acpi_dev {
int force_fan;
int last_key_event;
int key_event_valid;
+ int kbd_type;
int kbd_mode;
int kbd_time;
@@ -190,6 +197,7 @@ static const struct key_entry toshiba_acpi_keymap[] = {
{ KE_KEY, 0x101, { KEY_MUTE } },
{ KE_KEY, 0x102, { KEY_ZOOMOUT } },
{ KE_KEY, 0x103, { KEY_ZOOMIN } },
+ { KE_KEY, 0x10f, { KEY_TAB } },
{ KE_KEY, 0x12c, { KEY_KBDILLUMTOGGLE } },
{ KE_KEY, 0x139, { KEY_ZOOMRESET } },
{ KE_KEY, 0x13b, { KEY_COFFEE } },
@@ -210,7 +218,11 @@ static const struct key_entry toshiba_acpi_keymap[] = {
{ KE_KEY, 0xb32, { KEY_NEXTSONG } },
{ KE_KEY, 0xb33, { KEY_PLAYPAUSE } },
{ KE_KEY, 0xb5a, { KEY_MEDIA } },
- { KE_IGNORE, 0x1430, { KEY_RESERVED } },
+ { KE_IGNORE, 0x1430, { KEY_RESERVED } }, /* Wake from sleep */
+ { KE_IGNORE, 0x1501, { KEY_RESERVED } }, /* Output changed */
+ { KE_IGNORE, 0x1502, { KEY_RESERVED } }, /* HDMI plugged/unplugged */
+ { KE_IGNORE, 0x1ABE, { KEY_RESERVED } }, /* Protection level set */
+ { KE_IGNORE, 0x1ABF, { KEY_RESERVED } }, /* Protection level off */
{ KE_END, 0 },
};
@@ -264,22 +276,22 @@ static int write_acpi_int(const char *methodName, int val)
return (status == AE_OK) ? 0 : -EIO;
}
-/* Perform a raw HCI call. Here we don't care about input or output buffer
- * format.
+/* Perform a raw configuration call. Here we don't care about input or output
+ * buffer format.
*/
-static acpi_status hci_raw(struct toshiba_acpi_dev *dev,
- const u32 in[HCI_WORDS], u32 out[HCI_WORDS])
+static acpi_status tci_raw(struct toshiba_acpi_dev *dev,
+ const u32 in[TCI_WORDS], u32 out[TCI_WORDS])
{
struct acpi_object_list params;
- union acpi_object in_objs[HCI_WORDS];
+ union acpi_object in_objs[TCI_WORDS];
struct acpi_buffer results;
- union acpi_object out_objs[HCI_WORDS + 1];
+ union acpi_object out_objs[TCI_WORDS + 1];
acpi_status status;
int i;
- params.count = HCI_WORDS;
+ params.count = TCI_WORDS;
params.pointer = in_objs;
- for (i = 0; i < HCI_WORDS; ++i) {
+ for (i = 0; i < TCI_WORDS; ++i) {
in_objs[i].type = ACPI_TYPE_INTEGER;
in_objs[i].integer.value = in[i];
}
@@ -290,7 +302,7 @@ static acpi_status hci_raw(struct toshiba_acpi_dev *dev,
status = acpi_evaluate_object(dev->acpi_dev->handle,
(char *)dev->method_hci, &params,
&results);
- if ((status == AE_OK) && (out_objs->package.count <= HCI_WORDS)) {
+ if ((status == AE_OK) && (out_objs->package.count <= TCI_WORDS)) {
for (i = 0; i < out_objs->package.count; ++i) {
out[i] = out_objs->package.elements[i].integer.value;
}
@@ -305,47 +317,49 @@ static acpi_status hci_raw(struct toshiba_acpi_dev *dev,
* may be useful (such as "not supported").
*/
-static acpi_status hci_write1(struct toshiba_acpi_dev *dev, u32 reg,
- u32 in1, u32 *result)
+static u32 hci_write1(struct toshiba_acpi_dev *dev, u32 reg, u32 in1)
{
- u32 in[HCI_WORDS] = { HCI_SET, reg, in1, 0, 0, 0 };
- u32 out[HCI_WORDS];
- acpi_status status = hci_raw(dev, in, out);
- *result = (status == AE_OK) ? out[0] : HCI_FAILURE;
- return status;
+ u32 in[TCI_WORDS] = { HCI_SET, reg, in1, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status = tci_raw(dev, in, out);
+
+ return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE;
}
-static acpi_status hci_read1(struct toshiba_acpi_dev *dev, u32 reg,
- u32 *out1, u32 *result)
+static u32 hci_read1(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1)
{
- u32 in[HCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 };
- u32 out[HCI_WORDS];
- acpi_status status = hci_raw(dev, in, out);
+ u32 in[TCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status))
+ return TOS_FAILURE;
+
*out1 = out[2];
- *result = (status == AE_OK) ? out[0] : HCI_FAILURE;
- return status;
+
+ return out[0];
}
-static acpi_status hci_write2(struct toshiba_acpi_dev *dev, u32 reg,
- u32 in1, u32 in2, u32 *result)
+static u32 hci_write2(struct toshiba_acpi_dev *dev, u32 reg, u32 in1, u32 in2)
{
- u32 in[HCI_WORDS] = { HCI_SET, reg, in1, in2, 0, 0 };
- u32 out[HCI_WORDS];
- acpi_status status = hci_raw(dev, in, out);
- *result = (status == AE_OK) ? out[0] : HCI_FAILURE;
- return status;
+ u32 in[TCI_WORDS] = { HCI_SET, reg, in1, in2, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status = tci_raw(dev, in, out);
+
+ return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE;
}
-static acpi_status hci_read2(struct toshiba_acpi_dev *dev, u32 reg,
- u32 *out1, u32 *out2, u32 *result)
+static u32 hci_read2(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1, u32 *out2)
{
- u32 in[HCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 };
- u32 out[HCI_WORDS];
- acpi_status status = hci_raw(dev, in, out);
+ u32 in[TCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status))
+ return TOS_FAILURE;
+
*out1 = out[2];
*out2 = out[3];
- *result = (status == AE_OK) ? out[0] : HCI_FAILURE;
- return status;
+
+ return out[0];
}
/* common sci tasks
@@ -353,22 +367,22 @@ static acpi_status hci_read2(struct toshiba_acpi_dev *dev, u32 reg,
static int sci_open(struct toshiba_acpi_dev *dev)
{
- u32 in[HCI_WORDS] = { SCI_OPEN, 0, 0, 0, 0, 0 };
- u32 out[HCI_WORDS];
+ u32 in[TCI_WORDS] = { SCI_OPEN, 0, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
acpi_status status;
- status = hci_raw(dev, in, out);
- if (ACPI_FAILURE(status) || out[0] == HCI_FAILURE) {
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
pr_err("ACPI call to open SCI failed\n");
return 0;
}
- if (out[0] == SCI_OPEN_CLOSE_OK) {
+ if (out[0] == TOS_OPEN_CLOSE_OK) {
return 1;
- } else if (out[0] == SCI_ALREADY_OPEN) {
+ } else if (out[0] == TOS_ALREADY_OPEN) {
pr_info("Toshiba SCI already opened\n");
return 1;
- } else if (out[0] == SCI_NOT_PRESENT) {
+ } else if (out[0] == TOS_NOT_PRESENT) {
pr_info("Toshiba SCI is not present\n");
}
@@ -377,61 +391,62 @@ static int sci_open(struct toshiba_acpi_dev *dev)
static void sci_close(struct toshiba_acpi_dev *dev)
{
- u32 in[HCI_WORDS] = { SCI_CLOSE, 0, 0, 0, 0, 0 };
- u32 out[HCI_WORDS];
+ u32 in[TCI_WORDS] = { SCI_CLOSE, 0, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
acpi_status status;
- status = hci_raw(dev, in, out);
- if (ACPI_FAILURE(status) || out[0] == HCI_FAILURE) {
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
pr_err("ACPI call to close SCI failed\n");
return;
}
- if (out[0] == SCI_OPEN_CLOSE_OK)
+ if (out[0] == TOS_OPEN_CLOSE_OK)
return;
- else if (out[0] == SCI_NOT_OPENED)
+ else if (out[0] == TOS_NOT_OPENED)
pr_info("Toshiba SCI not opened\n");
- else if (out[0] == SCI_NOT_PRESENT)
+ else if (out[0] == TOS_NOT_PRESENT)
pr_info("Toshiba SCI is not present\n");
}
-static acpi_status sci_read(struct toshiba_acpi_dev *dev, u32 reg,
- u32 *out1, u32 *result)
+static u32 sci_read(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1)
{
- u32 in[HCI_WORDS] = { SCI_GET, reg, 0, 0, 0, 0 };
- u32 out[HCI_WORDS];
- acpi_status status = hci_raw(dev, in, out);
+ u32 in[TCI_WORDS] = { SCI_GET, reg, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status))
+ return TOS_FAILURE;
+
*out1 = out[2];
- *result = (ACPI_SUCCESS(status)) ? out[0] : HCI_FAILURE;
- return status;
+
+ return out[0];
}
-static acpi_status sci_write(struct toshiba_acpi_dev *dev, u32 reg,
- u32 in1, u32 *result)
+static u32 sci_write(struct toshiba_acpi_dev *dev, u32 reg, u32 in1)
{
- u32 in[HCI_WORDS] = { SCI_SET, reg, in1, 0, 0, 0 };
- u32 out[HCI_WORDS];
- acpi_status status = hci_raw(dev, in, out);
- *result = (ACPI_SUCCESS(status)) ? out[0] : HCI_FAILURE;
- return status;
+ u32 in[TCI_WORDS] = { SCI_SET, reg, in1, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status = tci_raw(dev, in, out);
+
+ return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE;
}
/* Illumination support */
static int toshiba_illumination_available(struct toshiba_acpi_dev *dev)
{
- u32 in[HCI_WORDS] = { SCI_GET, SCI_ILLUMINATION, 0, 0, 0, 0 };
- u32 out[HCI_WORDS];
+ u32 in[TCI_WORDS] = { SCI_GET, SCI_ILLUMINATION, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
acpi_status status;
if (!sci_open(dev))
return 0;
- status = hci_raw(dev, in, out);
+ status = tci_raw(dev, in, out);
sci_close(dev);
- if (ACPI_FAILURE(status) || out[0] == HCI_FAILURE) {
+ if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
pr_err("ACPI call to query Illumination support failed\n");
return 0;
- } else if (out[0] == HCI_NOT_SUPPORTED || out[1] != 1) {
+ } else if (out[0] == TOS_NOT_SUPPORTED) {
pr_info("Illumination device not available\n");
return 0;
}
@@ -445,7 +460,6 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
struct toshiba_acpi_dev *dev = container_of(cdev,
struct toshiba_acpi_dev, led_dev);
u32 state, result;
- acpi_status status;
/* First request : initialize communication. */
if (!sci_open(dev))
@@ -453,12 +467,12 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
/* Switch the illumination on/off */
state = brightness ? 1 : 0;
- status = sci_write(dev, SCI_ILLUMINATION, state, &result);
+ result = sci_write(dev, SCI_ILLUMINATION, state);
sci_close(dev);
- if (ACPI_FAILURE(status)) {
+ if (result == TOS_FAILURE) {
pr_err("ACPI call for illumination failed\n");
return;
- } else if (result == HCI_NOT_SUPPORTED) {
+ } else if (result == TOS_NOT_SUPPORTED) {
pr_info("Illumination not supported\n");
return;
}
@@ -469,19 +483,18 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
struct toshiba_acpi_dev *dev = container_of(cdev,
struct toshiba_acpi_dev, led_dev);
u32 state, result;
- acpi_status status;
/* First request : initialize communication. */
if (!sci_open(dev))
return LED_OFF;
/* Check the illumination */
- status = sci_read(dev, SCI_ILLUMINATION, &state, &result);
+ result = sci_read(dev, SCI_ILLUMINATION, &state);
sci_close(dev);
- if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+ if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
pr_err("ACPI call for illumination failed\n");
return LED_OFF;
- } else if (result == HCI_NOT_SUPPORTED) {
+ } else if (result == TOS_NOT_SUPPORTED) {
pr_info("Illumination not supported\n");
return LED_OFF;
}
@@ -490,20 +503,55 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
}
/* KBD Illumination */
+static int toshiba_kbd_illum_available(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { SCI_GET, SCI_KBD_ILLUM_STATUS, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ if (!sci_open(dev))
+ return 0;
+
+ status = tci_raw(dev, in, out);
+ sci_close(dev);
+ if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
+ pr_err("ACPI call to query kbd illumination support failed\n");
+ return 0;
+ } else if (out[0] == TOS_NOT_SUPPORTED) {
+ pr_info("Keyboard illumination not available\n");
+ return 0;
+ }
+
+ /* Check for keyboard backlight timeout max value,
+ * previous kbd backlight implementation set this to
+ * 0x3c0003, and now the new implementation set this
+ * to 0x3c001a, use this to distinguish between them
+ */
+ if (out[3] == SCI_KBD_TIME_MAX)
+ dev->kbd_type = 2;
+ else
+ dev->kbd_type = 1;
+ /* Get the current keyboard backlight mode */
+ dev->kbd_mode = out[2] & SCI_KBD_MODE_MASK;
+ /* Get the current time (1-60 seconds) */
+ dev->kbd_time = out[2] >> HCI_MISC_SHIFT;
+
+ return 1;
+}
+
static int toshiba_kbd_illum_status_set(struct toshiba_acpi_dev *dev, u32 time)
{
u32 result;
- acpi_status status;
if (!sci_open(dev))
return -EIO;
- status = sci_write(dev, SCI_KBD_ILLUM_STATUS, time, &result);
+ result = sci_write(dev, SCI_KBD_ILLUM_STATUS, time);
sci_close(dev);
- if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+ if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
pr_err("ACPI call to set KBD backlight status failed\n");
return -EIO;
- } else if (result == HCI_NOT_SUPPORTED) {
+ } else if (result == TOS_NOT_SUPPORTED) {
pr_info("Keyboard backlight status not supported\n");
return -ENODEV;
}
@@ -514,17 +562,16 @@ static int toshiba_kbd_illum_status_set(struct toshiba_acpi_dev *dev, u32 time)
static int toshiba_kbd_illum_status_get(struct toshiba_acpi_dev *dev, u32 *time)
{
u32 result;
- acpi_status status;
if (!sci_open(dev))
return -EIO;
- status = sci_read(dev, SCI_KBD_ILLUM_STATUS, time, &result);
+ result = sci_read(dev, SCI_KBD_ILLUM_STATUS, time);
sci_close(dev);
- if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+ if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
pr_err("ACPI call to get KBD backlight status failed\n");
return -EIO;
- } else if (result == HCI_NOT_SUPPORTED) {
+ } else if (result == TOS_NOT_SUPPORTED) {
pr_info("Keyboard backlight status not supported\n");
return -ENODEV;
}
@@ -537,14 +584,13 @@ static enum led_brightness toshiba_kbd_backlight_get(struct led_classdev *cdev)
struct toshiba_acpi_dev *dev = container_of(cdev,
struct toshiba_acpi_dev, kbd_led);
u32 state, result;
- acpi_status status;
/* Check the keyboard backlight state */
- status = hci_read1(dev, HCI_KBD_ILLUMINATION, &state, &result);
- if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+ result = hci_read1(dev, HCI_KBD_ILLUMINATION, &state);
+ if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
pr_err("ACPI call to get the keyboard backlight failed\n");
return LED_OFF;
- } else if (result == HCI_NOT_SUPPORTED) {
+ } else if (result == TOS_NOT_SUPPORTED) {
pr_info("Keyboard backlight not supported\n");
return LED_OFF;
}
@@ -558,15 +604,14 @@ static void toshiba_kbd_backlight_set(struct led_classdev *cdev,
struct toshiba_acpi_dev *dev = container_of(cdev,
struct toshiba_acpi_dev, kbd_led);
u32 state, result;
- acpi_status status;
/* Set the keyboard backlight state */
state = brightness ? 1 : 0;
- status = hci_write1(dev, HCI_KBD_ILLUMINATION, state, &result);
- if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+ result = hci_write1(dev, HCI_KBD_ILLUMINATION, state);
+ if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
pr_err("ACPI call to set KBD Illumination mode failed\n");
return;
- } else if (result == HCI_NOT_SUPPORTED) {
+ } else if (result == TOS_NOT_SUPPORTED) {
pr_info("Keyboard backlight not supported\n");
return;
}
@@ -576,17 +621,16 @@ static void toshiba_kbd_backlight_set(struct led_classdev *cdev,
static int toshiba_touchpad_set(struct toshiba_acpi_dev *dev, u32 state)
{
u32 result;
- acpi_status status;
if (!sci_open(dev))
return -EIO;
- status = sci_write(dev, SCI_TOUCHPAD, state, &result);
+ result = sci_write(dev, SCI_TOUCHPAD, state);
sci_close(dev);
- if (ACPI_FAILURE(status)) {
+ if (result == TOS_FAILURE) {
pr_err("ACPI call to set the touchpad failed\n");
return -EIO;
- } else if (result == HCI_NOT_SUPPORTED) {
+ } else if (result == TOS_NOT_SUPPORTED) {
return -ENODEV;
}
@@ -596,17 +640,16 @@ static int toshiba_touchpad_set(struct toshiba_acpi_dev *dev, u32 state)
static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state)
{
u32 result;
- acpi_status status;
if (!sci_open(dev))
return -EIO;
- status = sci_read(dev, SCI_TOUCHPAD, state, &result);
+ result = sci_read(dev, SCI_TOUCHPAD, state);
sci_close(dev);
- if (ACPI_FAILURE(status)) {
+ if (result == TOS_FAILURE) {
pr_err("ACPI call to query the touchpad failed\n");
return -EIO;
- } else if (result == HCI_NOT_SUPPORTED) {
+ } else if (result == TOS_NOT_SUPPORTED) {
return -ENODEV;
}
@@ -617,11 +660,11 @@ static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state)
static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
{
acpi_status status;
- u32 in[HCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 };
- u32 out[HCI_WORDS];
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 };
+ u32 out[TCI_WORDS];
- status = hci_raw(dev, in, out);
- if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
pr_info("ACPI call to get ECO led failed\n");
return 0;
}
@@ -633,12 +676,12 @@ static enum led_brightness toshiba_eco_mode_get_status(struct led_classdev *cdev
{
struct toshiba_acpi_dev *dev = container_of(cdev,
struct toshiba_acpi_dev, eco_led);
- u32 in[HCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 };
- u32 out[HCI_WORDS];
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 };
+ u32 out[TCI_WORDS];
acpi_status status;
- status = hci_raw(dev, in, out);
- if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
pr_err("ACPI call to get ECO led failed\n");
return LED_OFF;
}
@@ -651,14 +694,14 @@ static void toshiba_eco_mode_set_status(struct led_classdev *cdev,
{
struct toshiba_acpi_dev *dev = container_of(cdev,
struct toshiba_acpi_dev, eco_led);
- u32 in[HCI_WORDS] = { HCI_SET, HCI_ECO_MODE, 0, 1, 0, 0 };
- u32 out[HCI_WORDS];
+ u32 in[TCI_WORDS] = { HCI_SET, HCI_ECO_MODE, 0, 1, 0, 0 };
+ u32 out[TCI_WORDS];
acpi_status status;
/* Switch the Eco Mode led on/off */
in[2] = (brightness) ? 1 : 0;
- status = hci_raw(dev, in, out);
- if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
pr_err("ACPI call to set ECO led failed\n");
return;
}
@@ -667,22 +710,22 @@ static void toshiba_eco_mode_set_status(struct led_classdev *cdev,
/* Accelerometer support */
static int toshiba_accelerometer_supported(struct toshiba_acpi_dev *dev)
{
- u32 in[HCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER2, 0, 0, 0, 0 };
- u32 out[HCI_WORDS];
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER2, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
acpi_status status;
/* Check if the accelerometer call exists,
* this call also serves as initialization
*/
- status = hci_raw(dev, in, out);
- if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
pr_err("ACPI call to query the accelerometer failed\n");
return -EIO;
- } else if (out[0] == HCI_DATA_NOT_AVAILABLE ||
- out[0] == HCI_NOT_INITIALIZED) {
+ } else if (out[0] == TOS_DATA_NOT_AVAILABLE ||
+ out[0] == TOS_NOT_INITIALIZED) {
pr_err("Accelerometer not initialized\n");
return -EIO;
- } else if (out[0] == HCI_NOT_SUPPORTED) {
+ } else if (out[0] == TOS_NOT_SUPPORTED) {
pr_info("Accelerometer not supported\n");
return -ENODEV;
}
@@ -693,13 +736,13 @@ static int toshiba_accelerometer_supported(struct toshiba_acpi_dev *dev)
static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
u32 *xy, u32 *z)
{
- u32 in[HCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER, 0, 1, 0, 0 };
- u32 out[HCI_WORDS];
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER, 0, 1, 0, 0 };
+ u32 out[TCI_WORDS];
acpi_status status;
/* Check the Accelerometer status */
- status = hci_raw(dev, in, out);
- if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
pr_err("ACPI call to query the accelerometer failed\n");
return -EIO;
}
@@ -719,8 +762,8 @@ static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present)
value = 0;
value2 = 0;
- hci_read2(dev, HCI_WIRELESS, &value, &value2, &hci_result);
- if (hci_result == HCI_SUCCESS)
+ hci_result = hci_read2(dev, HCI_WIRELESS, &value, &value2);
+ if (hci_result == TOS_SUCCESS)
*present = (value & HCI_WIRELESS_BT_PRESENT) ? true : false;
return hci_result;
@@ -733,7 +776,7 @@ static u32 hci_get_radio_state(struct toshiba_acpi_dev *dev, bool *radio_state)
value = 0;
value2 = 0x0001;
- hci_read2(dev, HCI_WIRELESS, &value, &value2, &hci_result);
+ hci_result = hci_read2(dev, HCI_WIRELESS, &value, &value2);
*radio_state = value & HCI_WIRELESS_KILL_SWITCH;
return hci_result;
@@ -750,7 +793,7 @@ static int bt_rfkill_set_block(void *data, bool blocked)
value = (blocked == false);
mutex_lock(&dev->mutex);
- if (hci_get_radio_state(dev, &radio_state) != HCI_SUCCESS) {
+ if (hci_get_radio_state(dev, &radio_state) != TOS_SUCCESS) {
err = -EIO;
goto out;
}
@@ -760,10 +803,10 @@ static int bt_rfkill_set_block(void *data, bool blocked)
goto out;
}
- hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER, &result1);
- hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH, &result2);
+ result1 = hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER);
+ result2 = hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH);
- if (result1 != HCI_SUCCESS || result2 != HCI_SUCCESS)
+ if (result1 != TOS_SUCCESS || result2 != TOS_SUCCESS)
err = -EIO;
else
err = 0;
@@ -782,7 +825,7 @@ static void bt_rfkill_poll(struct rfkill *rfkill, void *data)
mutex_lock(&dev->mutex);
hci_result = hci_get_radio_state(dev, &value);
- if (hci_result != HCI_SUCCESS) {
+ if (hci_result != TOS_SUCCESS) {
/* Can't do anything useful */
mutex_unlock(&dev->mutex);
return;
@@ -806,9 +849,9 @@ static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
u32 hci_result;
u32 status;
- hci_read1(dev, HCI_TR_BACKLIGHT, &status, &hci_result);
+ hci_result = hci_read1(dev, HCI_TR_BACKLIGHT, &status);
*enabled = !status;
- return hci_result == HCI_SUCCESS ? 0 : -EIO;
+ return hci_result == TOS_SUCCESS ? 0 : -EIO;
}
static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
@@ -816,8 +859,8 @@ static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
u32 hci_result;
u32 value = !enable;
- hci_write1(dev, HCI_TR_BACKLIGHT, value, &hci_result);
- return hci_result == HCI_SUCCESS ? 0 : -EIO;
+ hci_result = hci_write1(dev, HCI_TR_BACKLIGHT, value);
+ return hci_result == TOS_SUCCESS ? 0 : -EIO;
}
static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
@@ -838,8 +881,8 @@ static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
brightness++;
}
- hci_read1(dev, HCI_LCD_BRIGHTNESS, &value, &hci_result);
- if (hci_result == HCI_SUCCESS)
+ hci_result = hci_read1(dev, HCI_LCD_BRIGHTNESS, &value);
+ if (hci_result == TOS_SUCCESS)
return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
return -EIO;
@@ -879,8 +922,8 @@ static int lcd_proc_open(struct inode *inode, struct file *file)
static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
{
- u32 in[HCI_WORDS] = { HCI_SET, HCI_LCD_BRIGHTNESS, 0, 0, 0, 0 };
- u32 out[HCI_WORDS];
+ u32 in[TCI_WORDS] = { HCI_SET, HCI_LCD_BRIGHTNESS, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
acpi_status status;
if (dev->tr_backlight_supported) {
@@ -893,19 +936,19 @@ static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
}
in[2] = value << HCI_LCD_BRIGHTNESS_SHIFT;
- status = hci_raw(dev, in, out);
- if (ACPI_FAILURE(status) || out[0] == HCI_FAILURE) {
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
pr_err("ACPI call to set brightness failed");
return -EIO;
}
/* Extra check for "incomplete" backlight method, where the AML code
- * doesn't check for HCI_SET or HCI_GET and returns HCI_SUCCESS,
+ * doesn't check for HCI_SET or HCI_GET and returns TOS_SUCCESS,
* the actual brightness, and in some cases the max brightness.
*/
if (out[2] > 0 || out[3] == 0xE000)
return -ENODEV;
- return out[0] == HCI_SUCCESS ? 0 : -EIO;
+ return out[0] == TOS_SUCCESS ? 0 : -EIO;
}
static int set_lcd_status(struct backlight_device *bd)
@@ -953,8 +996,8 @@ static int get_video_status(struct toshiba_acpi_dev *dev, u32 *status)
{
u32 hci_result;
- hci_read1(dev, HCI_VIDEO_OUT, status, &hci_result);
- return hci_result == HCI_SUCCESS ? 0 : -EIO;
+ hci_result = hci_read1(dev, HCI_VIDEO_OUT, status);
+ return hci_result == TOS_SUCCESS ? 0 : -EIO;
}
static int video_proc_show(struct seq_file *m, void *v)
@@ -1057,8 +1100,8 @@ static int get_fan_status(struct toshiba_acpi_dev *dev, u32 *status)
{
u32 hci_result;
- hci_read1(dev, HCI_FAN, status, &hci_result);
- return hci_result == HCI_SUCCESS ? 0 : -EIO;
+ hci_result = hci_read1(dev, HCI_FAN, status);
+ return hci_result == TOS_SUCCESS ? 0 : -EIO;
}
static int fan_proc_show(struct seq_file *m, void *v)
@@ -1097,8 +1140,8 @@ static ssize_t fan_proc_write(struct file *file, const char __user *buf,
if (sscanf(cmd, " force_on : %i", &value) == 1 &&
value >= 0 && value <= 1) {
- hci_write1(dev, HCI_FAN, value, &hci_result);
- if (hci_result != HCI_SUCCESS)
+ hci_result = hci_write1(dev, HCI_FAN, value);
+ if (hci_result != TOS_SUCCESS)
return -EIO;
else
dev->force_fan = value;
@@ -1125,17 +1168,17 @@ static int keys_proc_show(struct seq_file *m, void *v)
u32 value;
if (!dev->key_event_valid && dev->system_event_supported) {
- hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
- if (hci_result == HCI_SUCCESS) {
+ hci_result = hci_read1(dev, HCI_SYSTEM_EVENT, &value);
+ if (hci_result == TOS_SUCCESS) {
dev->key_event_valid = 1;
dev->last_key_event = value;
- } else if (hci_result == HCI_EMPTY) {
+ } else if (hci_result == TOS_FIFO_EMPTY) {
/* better luck next time */
- } else if (hci_result == HCI_NOT_SUPPORTED) {
+ } else if (hci_result == TOS_NOT_SUPPORTED) {
/* This is a workaround for an unresolved issue on
* some machines where system events sporadically
* become disabled. */
- hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
+ hci_result = hci_write1(dev, HCI_SYSTEM_EVENT, 1);
pr_notice("Re-enabled hotkeys\n");
} else {
pr_err("Error reading hotkey status\n");
@@ -1249,6 +1292,62 @@ static const struct backlight_ops toshiba_backlight_data = {
/*
* Sysfs files
*/
+static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+static ssize_t toshiba_kbd_bl_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+static ssize_t toshiba_kbd_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+static ssize_t toshiba_available_kbd_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+static ssize_t toshiba_touchpad_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+static ssize_t toshiba_touchpad_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+static ssize_t toshiba_position_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR,
+ toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store);
+static DEVICE_ATTR(kbd_type, S_IRUGO, toshiba_kbd_type_show, NULL);
+static DEVICE_ATTR(available_kbd_modes, S_IRUGO,
+ toshiba_available_kbd_modes_show, NULL);
+static DEVICE_ATTR(kbd_backlight_timeout, S_IRUGO | S_IWUSR,
+ toshiba_kbd_bl_timeout_show, toshiba_kbd_bl_timeout_store);
+static DEVICE_ATTR(touchpad, S_IRUGO | S_IWUSR,
+ toshiba_touchpad_show, toshiba_touchpad_store);
+static DEVICE_ATTR(position, S_IRUGO, toshiba_position_show, NULL);
+
+static struct attribute *toshiba_attributes[] = {
+ &dev_attr_kbd_backlight_mode.attr,
+ &dev_attr_kbd_type.attr,
+ &dev_attr_available_kbd_modes.attr,
+ &dev_attr_kbd_backlight_timeout.attr,
+ &dev_attr_touchpad.attr,
+ &dev_attr_position.attr,
+ NULL,
+};
+
+static umode_t toshiba_sysfs_is_visible(struct kobject *,
+ struct attribute *, int);
+
+static struct attribute_group toshiba_attr_group = {
+ .is_visible = toshiba_sysfs_is_visible,
+ .attrs = toshiba_attributes,
+};
static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
struct device_attribute *attr,
@@ -1263,20 +1362,50 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
ret = kstrtoint(buf, 0, &mode);
if (ret)
return ret;
- if (mode != SCI_KBD_MODE_FNZ && mode != SCI_KBD_MODE_AUTO)
- return -EINVAL;
+
+ /* Check for supported modes depending on keyboard backlight type */
+ if (toshiba->kbd_type == 1) {
+ /* Type 1 supports SCI_KBD_MODE_FNZ and SCI_KBD_MODE_AUTO */
+ if (mode != SCI_KBD_MODE_FNZ && mode != SCI_KBD_MODE_AUTO)
+ return -EINVAL;
+ } else if (toshiba->kbd_type == 2) {
+ /* Type 2 doesn't support SCI_KBD_MODE_FNZ */
+ if (mode != SCI_KBD_MODE_AUTO && mode != SCI_KBD_MODE_ON &&
+ mode != SCI_KBD_MODE_OFF)
+ return -EINVAL;
+ }
/* Set the Keyboard Backlight Mode where:
- * Mode - Auto (2) | FN-Z (1)
* Auto - KBD backlight turns off automatically in given time
* FN-Z - KBD backlight "toggles" when hotkey pressed
+ * ON - KBD backlight is always on
+ * OFF - KBD backlight is always off
*/
+
+ /* Only make a change if the actual mode has changed */
if (toshiba->kbd_mode != mode) {
+ /* Shift the time to "base time" (0x3c0000 == 60 seconds) */
time = toshiba->kbd_time << HCI_MISC_SHIFT;
- time = time + toshiba->kbd_mode;
+
+ /* OR the "base time" to the actual method format */
+ if (toshiba->kbd_type == 1) {
+ /* Type 1 requires the current mode */
+ time |= toshiba->kbd_mode;
+ } else if (toshiba->kbd_type == 2) {
+ /* Type 2 requires the desired mode */
+ time |= mode;
+ }
+
ret = toshiba_kbd_illum_status_set(toshiba, time);
if (ret)
return ret;
+
+ /* Update sysfs entries on successful mode change*/
+ ret = sysfs_update_group(&toshiba->acpi_dev->dev.kobj,
+ &toshiba_attr_group);
+ if (ret)
+ return ret;
+
toshiba->kbd_mode = mode;
}
@@ -1293,7 +1422,30 @@ static ssize_t toshiba_kbd_bl_mode_show(struct device *dev,
if (toshiba_kbd_illum_status_get(toshiba, &time) < 0)
return -EIO;
- return sprintf(buf, "%i\n", time & 0x07);
+ return sprintf(buf, "%i\n", time & SCI_KBD_MODE_MASK);
+}
+
+static ssize_t toshiba_kbd_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", toshiba->kbd_type);
+}
+
+static ssize_t toshiba_available_kbd_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+
+ if (toshiba->kbd_type == 1)
+ return sprintf(buf, "%x %x\n",
+ SCI_KBD_MODE_FNZ, SCI_KBD_MODE_AUTO);
+
+ return sprintf(buf, "%x %x %x\n",
+ SCI_KBD_MODE_AUTO, SCI_KBD_MODE_ON, SCI_KBD_MODE_OFF);
}
static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
@@ -1301,18 +1453,38 @@ static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
const char *buf, size_t count)
{
struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
- int time = -1;
+ int time;
+ int ret;
- if (sscanf(buf, "%i", &time) != 1 && (time < 0 || time > 60))
- return -EINVAL;
+ ret = kstrtoint(buf, 0, &time);
+ if (ret)
+ return ret;
- /* Set the Keyboard Backlight Timeout: 0-60 seconds */
- if (time != -1 && toshiba->kbd_time != time) {
+ /* Check for supported values depending on kbd_type */
+ if (toshiba->kbd_type == 1) {
+ if (time < 0 || time > 60)
+ return -EINVAL;
+ } else if (toshiba->kbd_type == 2) {
+ if (time < 1 || time > 60)
+ return -EINVAL;
+ }
+
+ /* Set the Keyboard Backlight Timeout */
+
+ /* Only make a change if the actual timeout has changed */
+ if (toshiba->kbd_time != time) {
+ /* Shift the time to "base time" (0x3c0000 == 60 seconds) */
time = time << HCI_MISC_SHIFT;
- time = (toshiba->kbd_mode == SCI_KBD_MODE_AUTO) ?
- time + 1 : time + 2;
- if (toshiba_kbd_illum_status_set(toshiba, time) < 0)
- return -EIO;
+ /* OR the "base time" to the actual method format */
+ if (toshiba->kbd_type == 1)
+ time |= SCI_KBD_MODE_FNZ;
+ else if (toshiba->kbd_type == 2)
+ time |= SCI_KBD_MODE_AUTO;
+
+ ret = toshiba_kbd_illum_status_set(toshiba, time);
+ if (ret)
+ return ret;
+
toshiba->kbd_time = time >> HCI_MISC_SHIFT;
}
@@ -1338,12 +1510,18 @@ static ssize_t toshiba_touchpad_store(struct device *dev,
{
struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
int state;
+ int ret;
/* Set the TouchPad on/off, 0 - Disable | 1 - Enable */
- if (sscanf(buf, "%i", &state) == 1 && (state == 0 || state == 1)) {
- if (toshiba_touchpad_set(toshiba, state) < 0)
- return -EIO;
- }
+ ret = kstrtoint(buf, 0, &state);
+ if (ret)
+ return ret;
+ if (state != 0 && state != 1)
+ return -EINVAL;
+
+ ret = toshiba_touchpad_set(toshiba, state);
+ if (ret)
+ return ret;
return count;
}
@@ -1383,22 +1561,6 @@ static ssize_t toshiba_position_show(struct device *dev,
return sprintf(buf, "%d %d %d\n", x, y, z);
}
-static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR,
- toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store);
-static DEVICE_ATTR(kbd_backlight_timeout, S_IRUGO | S_IWUSR,
- toshiba_kbd_bl_timeout_show, toshiba_kbd_bl_timeout_store);
-static DEVICE_ATTR(touchpad, S_IRUGO | S_IWUSR,
- toshiba_touchpad_show, toshiba_touchpad_store);
-static DEVICE_ATTR(position, S_IRUGO, toshiba_position_show, NULL);
-
-static struct attribute *toshiba_attributes[] = {
- &dev_attr_kbd_backlight_mode.attr,
- &dev_attr_kbd_backlight_timeout.attr,
- &dev_attr_touchpad.attr,
- &dev_attr_position.attr,
- NULL,
-};
-
static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
@@ -1418,11 +1580,6 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
return exists ? attr->mode : 0;
}
-static struct attribute_group toshiba_attr_group = {
- .is_visible = toshiba_sysfs_is_visible,
- .attrs = toshiba_attributes,
-};
-
static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
{
@@ -1535,8 +1692,8 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
if (acpi_has_method(dev->acpi_dev->handle, "INFO"))
dev->info_supported = 1;
else {
- hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
- if (hci_result == HCI_SUCCESS)
+ hci_result = hci_write1(dev, HCI_SYSTEM_EVENT, 1);
+ if (hci_result == TOS_SUCCESS)
dev->system_event_supported = 1;
}
@@ -1558,7 +1715,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
goto err_remove_filter;
}
- hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE, &hci_result);
+ hci_result = hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE);
return 0;
err_remove_filter:
@@ -1716,7 +1873,7 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
goto error;
/* Register rfkill switch for Bluetooth */
- if (hci_get_bt_present(dev, &bt_present) == HCI_SUCCESS && bt_present) {
+ if (hci_get_bt_present(dev, &bt_present) == TOS_SUCCESS && bt_present) {
dev->bt_rfk = rfkill_alloc("Toshiba Bluetooth",
&acpi_dev->dev,
RFKILL_TYPE_BLUETOOTH,
@@ -1754,12 +1911,7 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
dev->eco_supported = 1;
}
- ret = toshiba_kbd_illum_status_get(dev, &dummy);
- if (!ret) {
- dev->kbd_time = dummy >> HCI_MISC_SHIFT;
- dev->kbd_mode = dummy & 0x07;
- }
- dev->kbd_illum_supported = !ret;
+ dev->kbd_illum_supported = toshiba_kbd_illum_available(dev);
/*
* Only register the LED if KBD illumination is supported
* and the keyboard backlight operation mode is set to FN-Z
@@ -1824,26 +1976,26 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
toshiba_acpi_report_hotkey(dev, scancode);
} else if (dev->system_event_supported) {
do {
- hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
+ hci_result = hci_read1(dev, HCI_SYSTEM_EVENT, &value);
switch (hci_result) {
- case HCI_SUCCESS:
+ case TOS_SUCCESS:
toshiba_acpi_report_hotkey(dev, (int)value);
break;
- case HCI_NOT_SUPPORTED:
+ case TOS_NOT_SUPPORTED:
/*
* This is a workaround for an unresolved
* issue on some machines where system events
* sporadically become disabled.
*/
- hci_write1(dev, HCI_SYSTEM_EVENT, 1,
- &hci_result);
+ hci_result =
+ hci_write1(dev, HCI_SYSTEM_EVENT, 1);
pr_notice("Re-enabled hotkeys\n");
/* fall through */
default:
retries--;
break;
}
- } while (retries && hci_result != HCI_EMPTY);
+ } while (retries && hci_result != TOS_FIFO_EMPTY);
}
}
@@ -1854,7 +2006,7 @@ static int toshiba_acpi_suspend(struct device *device)
u32 result;
if (dev->hotkey_dev)
- hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_DISABLE, &result);
+ result = hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_DISABLE);
return 0;
}
@@ -1871,7 +2023,7 @@ static int toshiba_acpi_resume(struct device *device)
if (ACPI_FAILURE(status))
pr_info("Unable to re-enable hotkeys\n");
- hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE, &result);
+ result = hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE);
}
return 0;
diff --git a/drivers/power/reset/restart-poweroff.c b/drivers/power/reset/restart-poweroff.c
index 3e51f8d29bfe..edd707ee7281 100644
--- a/drivers/power/reset/restart-poweroff.c
+++ b/drivers/power/reset/restart-poweroff.c
@@ -20,7 +20,8 @@
static void restart_poweroff_do_poweroff(void)
{
- arm_pm_restart(REBOOT_HARD, NULL);
+ reboot_mode = REBOOT_HARD;
+ machine_restart(NULL);
}
static int restart_poweroff_probe(struct platform_device *pdev)
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 5df05f26b7d9..329db997ee66 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1660,6 +1660,14 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
device->discipline->check_for_device_change(device, cqr, irb);
dasd_put_device(device);
}
+
+ /* check for for attention message */
+ if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
+ device = dasd_device_from_cdev_locked(cdev);
+ device->discipline->check_attention(device, irb->esw.esw1.lpum);
+ dasd_put_device(device);
+ }
+
if (!cqr)
return;
@@ -2261,8 +2269,8 @@ static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
{
struct dasd_device *device;
- int rc;
struct dasd_ccw_req *cqr, *n;
+ int rc;
retry:
list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
@@ -2310,21 +2318,26 @@ retry:
/*
* for alias devices simplify error recovery and
* return to upper layer
+ * do not skip ERP requests
*/
- if (cqr->startdev != cqr->basedev &&
+ if (cqr->startdev != cqr->basedev && !cqr->refers &&
(cqr->status == DASD_CQR_TERMINATED ||
cqr->status == DASD_CQR_NEED_ERP))
return -EAGAIN;
- else {
- /* normal recovery for basedev IO */
- if (__dasd_sleep_on_erp(cqr)) {
- if (!cqr->status == DASD_CQR_TERMINATED &&
- !cqr->status == DASD_CQR_NEED_ERP)
- break;
- rc = 1;
- }
+
+ /* normal recovery for basedev IO */
+ if (__dasd_sleep_on_erp(cqr)) {
+ goto retry;
+ /* remember that ERP was needed */
+ rc = 1;
+ /* skip processing for active cqr */
+ if (cqr->status != DASD_CQR_TERMINATED &&
+ cqr->status != DASD_CQR_NEED_ERP)
+ break;
}
}
+
+ /* start ERP requests in upper loop */
if (rc)
goto retry;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 14ba80bfa571..8286f742436b 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1432,6 +1432,29 @@ static ssize_t dasd_reservation_state_store(struct device *dev,
static DEVICE_ATTR(last_known_reservation_state, 0644,
dasd_reservation_state_show, dasd_reservation_state_store);
+static ssize_t dasd_pm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dasd_device *device;
+ u8 opm, nppm, cablepm, cuirpm, hpfpm;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return sprintf(buf, "0\n");
+
+ opm = device->path_data.opm;
+ nppm = device->path_data.npm;
+ cablepm = device->path_data.cablepm;
+ cuirpm = device->path_data.cuirpm;
+ hpfpm = device->path_data.hpfpm;
+ dasd_put_device(device);
+
+ return sprintf(buf, "%02x %02x %02x %02x %02x\n", opm, nppm,
+ cablepm, cuirpm, hpfpm);
+}
+
+static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
+
static struct attribute * dasd_attrs[] = {
&dev_attr_readonly.attr,
&dev_attr_discipline.attr,
@@ -1450,6 +1473,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_reservation_policy.attr,
&dev_attr_last_known_reservation_state.attr,
&dev_attr_safe_offline.attr,
+ &dev_attr_path_masks.attr,
NULL,
};
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 51dea7baf02c..d47f5b99623a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -29,6 +29,8 @@
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/itcw.h>
+#include <asm/schid.h>
+#include <asm/chpid.h>
#include "dasd_int.h"
#include "dasd_eckd.h"
@@ -112,6 +114,12 @@ struct path_verification_work_data {
static struct path_verification_work_data *path_verification_worker;
static DEFINE_MUTEX(dasd_path_verification_mutex);
+struct check_attention_work_data {
+ struct work_struct worker;
+ struct dasd_device *device;
+ __u8 lpum;
+};
+
/* initial attempt at a probe function. this can be simplified once
* the other detection code is gone */
static int
@@ -1126,6 +1134,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
"device %s instead of %s\n", lpm,
print_path_uid, print_device_uid);
path_err = -EINVAL;
+ path_data->cablepm |= lpm;
continue;
}
@@ -1141,6 +1150,13 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
break;
}
path_data->opm |= lpm;
+ /*
+ * if the path is used
+ * it should not be in one of the negative lists
+ */
+ path_data->cablepm &= ~lpm;
+ path_data->hpfpm &= ~lpm;
+ path_data->cuirpm &= ~lpm;
if (conf_data != private->conf_data)
kfree(conf_data);
@@ -1230,7 +1246,7 @@ static void do_path_verification_work(struct work_struct *work)
struct dasd_eckd_private path_private;
struct dasd_uid *uid;
__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
- __u8 lpm, opm, npm, ppm, epm;
+ __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
unsigned long flags;
char print_uid[60];
int rc;
@@ -1248,6 +1264,9 @@ static void do_path_verification_work(struct work_struct *work)
npm = 0;
ppm = 0;
epm = 0;
+ hpfpm = 0;
+ cablepm = 0;
+
for (lpm = 0x80; lpm; lpm >>= 1) {
if (!(lpm & data->tbvpm))
continue;
@@ -1289,6 +1308,7 @@ static void do_path_verification_work(struct work_struct *work)
opm &= ~lpm;
npm &= ~lpm;
ppm &= ~lpm;
+ hpfpm |= lpm;
continue;
}
@@ -1350,6 +1370,7 @@ static void do_path_verification_work(struct work_struct *work)
opm &= ~lpm;
npm &= ~lpm;
ppm &= ~lpm;
+ cablepm |= lpm;
continue;
}
}
@@ -1364,12 +1385,21 @@ static void do_path_verification_work(struct work_struct *work)
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (!device->path_data.opm && opm) {
device->path_data.opm = opm;
+ device->path_data.cablepm &= ~opm;
+ device->path_data.cuirpm &= ~opm;
+ device->path_data.hpfpm &= ~opm;
dasd_generic_path_operational(device);
- } else
+ } else {
device->path_data.opm |= opm;
+ device->path_data.cablepm &= ~opm;
+ device->path_data.cuirpm &= ~opm;
+ device->path_data.hpfpm &= ~opm;
+ }
device->path_data.npm |= npm;
device->path_data.ppm |= ppm;
device->path_data.tbvpm |= epm;
+ device->path_data.cablepm |= cablepm;
+ device->path_data.hpfpm |= hpfpm;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
@@ -4475,6 +4505,343 @@ out_err:
return -1;
}
+static int dasd_eckd_read_message_buffer(struct dasd_device *device,
+ struct dasd_rssd_messages *messages,
+ __u8 lpum)
+{
+ struct dasd_rssd_messages *message_buf;
+ struct dasd_psf_prssd_data *prssdp;
+ struct dasd_eckd_private *private;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ private = (struct dasd_eckd_private *) device->private;
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+ (sizeof(struct dasd_psf_prssd_data) +
+ sizeof(struct dasd_rssd_messages)),
+ device);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate read message buffer request");
+ return PTR_ERR(cqr);
+ }
+
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 256;
+ cqr->expires = 10 * HZ;
+
+ /* we need to check for messages on exactly this path */
+ set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
+ cqr->lpm = lpum;
+
+ /* Prepare for Read Subsystem Data */
+ prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+ memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+ prssdp->order = PSF_ORDER_PRSSD;
+ prssdp->suborder = 0x03; /* Message Buffer */
+ /* all other bytes of prssdp must be zero */
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = sizeof(struct dasd_psf_prssd_data);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)(addr_t) prssdp;
+
+ /* Read Subsystem Data - message buffer */
+ message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
+ memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
+
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = sizeof(struct dasd_rssd_messages);
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)(addr_t) message_buf;
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ rc = dasd_sleep_on_immediatly(cqr);
+ if (rc == 0) {
+ prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+ message_buf = (struct dasd_rssd_messages *)
+ (prssdp + 1);
+ memcpy(messages, message_buf,
+ sizeof(struct dasd_rssd_messages));
+ } else
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Reading messages failed with rc=%d\n"
+ , rc);
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+/*
+ * Perform Subsystem Function - CUIR response
+ */
+static int
+dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
+ __u32 message_id,
+ struct channel_path_desc *desc,
+ struct subchannel_id sch_id)
+{
+ struct dasd_psf_cuir_response *psf_cuir;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
+ sizeof(struct dasd_psf_cuir_response),
+ device);
+
+ if (IS_ERR(cqr)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Could not allocate PSF-CUIR request");
+ return PTR_ERR(cqr);
+ }
+
+ psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
+ psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
+ psf_cuir->cc = response;
+ if (desc)
+ psf_cuir->chpid = desc->chpid;
+ psf_cuir->message_id = message_id;
+ psf_cuir->cssid = sch_id.cssid;
+ psf_cuir->ssid = sch_id.ssid;
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->cda = (__u32)(addr_t)psf_cuir;
+ ccw->count = sizeof(struct dasd_psf_cuir_response);
+
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 256;
+ cqr->expires = 10*HZ;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ rc = dasd_sleep_on(cqr);
+
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+static int dasd_eckd_cuir_change_state(struct dasd_device *device, __u8 lpum)
+{
+ unsigned long flags;
+ __u8 tbcpm;
+
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ tbcpm = device->path_data.opm & ~lpum;
+ if (tbcpm) {
+ device->path_data.opm = tbcpm;
+ device->path_data.cuirpm |= lpum;
+ }
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ return tbcpm ? 0 : PSF_CUIR_LAST_PATH;
+}
+
+/*
+ * walk through all devices and quiesce them
+ * if it is the last path return error
+ *
+ * if only part of the devices are quiesced and an error
+ * occurs no onlining necessary, the storage server will
+ * notify the already set offline devices again
+ */
+static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
+ struct channel_path_desc *desc,
+ struct subchannel_id sch_id)
+{
+ struct alias_pav_group *pavgroup, *tempgroup;
+ struct dasd_eckd_private *private;
+ struct dasd_device *dev, *n;
+ int rc;
+
+ private = (struct dasd_eckd_private *) device->private;
+ rc = 0;
+
+ /* active devices */
+ list_for_each_entry_safe(dev, n,
+ &private->lcu->active_devices,
+ alias_list) {
+ rc = dasd_eckd_cuir_change_state(dev, lpum);
+ if (rc)
+ goto out;
+ }
+
+ /* inactive devices */
+ list_for_each_entry_safe(dev, n,
+ &private->lcu->inactive_devices,
+ alias_list) {
+ rc = dasd_eckd_cuir_change_state(dev, lpum);
+ if (rc)
+ goto out;
+ }
+
+ /* devices in PAV groups */
+ list_for_each_entry_safe(pavgroup, tempgroup,
+ &private->lcu->grouplist, group) {
+ list_for_each_entry_safe(dev, n, &pavgroup->baselist,
+ alias_list) {
+ rc = dasd_eckd_cuir_change_state(dev, lpum);
+ if (rc)
+ goto out;
+ }
+ list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
+ alias_list) {
+ rc = dasd_eckd_cuir_change_state(dev, lpum);
+ if (rc)
+ goto out;
+ }
+ }
+
+ pr_warn("Service on the storage server caused path %x.%02x to go offline",
+ sch_id.cssid, desc ? desc->chpid : 0);
+ rc = PSF_CUIR_COMPLETED;
+out:
+ return rc;
+}
+
+static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
+ struct channel_path_desc *desc,
+ struct subchannel_id sch_id)
+{
+ struct alias_pav_group *pavgroup, *tempgroup;
+ struct dasd_eckd_private *private;
+ struct dasd_device *dev, *n;
+
+ pr_info("Path %x.%02x is back online after service on the storage server",
+ sch_id.cssid, desc ? desc->chpid : 0);
+ private = (struct dasd_eckd_private *) device->private;
+
+ /*
+ * the path may have been added through a generic path event before
+ * only trigger path verification if the path is not already in use
+ */
+
+ list_for_each_entry_safe(dev, n,
+ &private->lcu->active_devices,
+ alias_list) {
+ if (!(dev->path_data.opm & lpum)) {
+ dev->path_data.tbvpm |= lpum;
+ dasd_schedule_device_bh(dev);
+ }
+ }
+
+ list_for_each_entry_safe(dev, n,
+ &private->lcu->inactive_devices,
+ alias_list) {
+ if (!(dev->path_data.opm & lpum)) {
+ dev->path_data.tbvpm |= lpum;
+ dasd_schedule_device_bh(dev);
+ }
+ }
+
+ /* devices in PAV groups */
+ list_for_each_entry_safe(pavgroup, tempgroup,
+ &private->lcu->grouplist,
+ group) {
+ list_for_each_entry_safe(dev, n,
+ &pavgroup->baselist,
+ alias_list) {
+ if (!(dev->path_data.opm & lpum)) {
+ dev->path_data.tbvpm |= lpum;
+ dasd_schedule_device_bh(dev);
+ }
+ }
+ list_for_each_entry_safe(dev, n,
+ &pavgroup->aliaslist,
+ alias_list) {
+ if (!(dev->path_data.opm & lpum)) {
+ dev->path_data.tbvpm |= lpum;
+ dasd_schedule_device_bh(dev);
+ }
+ }
+ }
+ return PSF_CUIR_COMPLETED;
+}
+
+static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
+ __u8 lpum)
+{
+ struct dasd_cuir_message *cuir = messages;
+ struct channel_path_desc *desc;
+ struct subchannel_id sch_id;
+ int pos, response;
+ ccw_device_get_schid(device->cdev, &sch_id);
+
+ /* get position of path in mask */
+ pos = 8 - ffs(lpum);
+ /* get channel path descriptor from this position */
+ desc = ccw_device_get_chp_desc(device->cdev, pos);
+
+ if (cuir->code == CUIR_QUIESCE) {
+ /* quiesce */
+ response = dasd_eckd_cuir_quiesce(device, lpum, desc, sch_id);
+ } else if (cuir->code == CUIR_RESUME) {
+ /* resume */
+ response = dasd_eckd_cuir_resume(device, lpum, desc, sch_id);
+ } else
+ response = PSF_CUIR_NOT_SUPPORTED;
+
+ dasd_eckd_psf_cuir_response(device, response, cuir->message_id,
+ desc, sch_id);
+
+ /* free descriptor copy */
+ kfree(desc);
+}
+
+static void dasd_eckd_check_attention_work(struct work_struct *work)
+{
+ struct check_attention_work_data *data;
+ struct dasd_rssd_messages *messages;
+ struct dasd_device *device;
+ int rc;
+
+ data = container_of(work, struct check_attention_work_data, worker);
+ device = data->device;
+
+ messages = kzalloc(sizeof(*messages), GFP_KERNEL);
+ if (!messages) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Could not allocate attention message buffer");
+ goto out;
+ }
+
+ rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
+ if (rc)
+ goto out;
+
+ if (messages->length == ATTENTION_LENGTH_CUIR &&
+ messages->format == ATTENTION_FORMAT_CUIR)
+ dasd_eckd_handle_cuir(device, messages, data->lpum);
+
+out:
+ dasd_put_device(device);
+ kfree(messages);
+ kfree(data);
+}
+
+static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
+{
+ struct check_attention_work_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_ATOMIC);
+ if (!data)
+ return -ENOMEM;
+ INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
+ dasd_get_device(device);
+ data->device = device;
+ data->lpum = lpum;
+ schedule_work(&data->worker);
+ return 0;
+}
+
static struct ccw_driver dasd_eckd_driver = {
.driver = {
.name = "dasd-eckd",
@@ -4539,6 +4906,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
.reload = dasd_eckd_reload_device,
.get_uid = dasd_eckd_get_uid,
.kick_validate = dasd_eckd_kick_validate_server,
+ .check_attention = dasd_eckd_check_attention,
};
static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 2555e494591f..ddab7df36e25 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -51,8 +51,35 @@
/*
* Perform Subsystem Function / Sub-Orders
*/
-#define PSF_ORDER_PRSSD 0x18
-#define PSF_ORDER_SSC 0x1D
+#define PSF_ORDER_PRSSD 0x18
+#define PSF_ORDER_CUIR_RESPONSE 0x1A
+#define PSF_ORDER_SSC 0x1D
+
+/*
+ * CUIR response condition codes
+ */
+#define PSF_CUIR_INVALID 0x00
+#define PSF_CUIR_COMPLETED 0x01
+#define PSF_CUIR_NOT_SUPPORTED 0x02
+#define PSF_CUIR_ERROR_IN_REQ 0x03
+#define PSF_CUIR_DENIED 0x04
+#define PSF_CUIR_LAST_PATH 0x05
+#define PSF_CUIR_DEVICE_ONLINE 0x06
+#define PSF_CUIR_VARY_FAILURE 0x07
+#define PSF_CUIR_SOFTWARE_FAILURE 0x08
+#define PSF_CUIR_NOT_RECOGNIZED 0x09
+
+/*
+ * CUIR codes
+ */
+#define CUIR_QUIESCE 0x01
+#define CUIR_RESUME 0x02
+
+/*
+ * attention message definitions
+ */
+#define ATTENTION_LENGTH_CUIR 0x0e
+#define ATTENTION_FORMAT_CUIR 0x01
/*
* Size that is reportet for large volumes in the old 16-bit no_cyl field
@@ -342,6 +369,38 @@ struct dasd_rssd_features {
char feature[256];
} __attribute__((packed));
+struct dasd_rssd_messages {
+ __u16 length;
+ __u8 format;
+ __u8 code;
+ __u32 message_id;
+ __u8 flags;
+ char messages[4087];
+} __packed;
+
+struct dasd_cuir_message {
+ __u16 length;
+ __u8 format;
+ __u8 code;
+ __u32 message_id;
+ __u8 flags;
+ __u8 neq_map[3];
+ __u8 ned_map;
+ __u8 record_selector;
+} __packed;
+
+struct dasd_psf_cuir_response {
+ __u8 order;
+ __u8 flags;
+ __u8 cc;
+ __u8 chpid;
+ __u16 device_nr;
+ __u16 reserved;
+ __u32 message_id;
+ __u64 system_id;
+ __u8 cssid;
+ __u8 ssid;
+} __packed;
/*
* Perform Subsystem Function - Prepare for Read Subsystem Data
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index c20170166909..8b5d4100abf7 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -357,6 +357,7 @@ struct dasd_discipline {
int (*get_uid) (struct dasd_device *, struct dasd_uid *);
void (*kick_validate) (struct dasd_device *);
+ int (*check_attention)(struct dasd_device *, __u8);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -382,6 +383,10 @@ struct dasd_path {
__u8 tbvpm;
__u8 ppm;
__u8 npm;
+ /* paths that are not used because of a special condition */
+ __u8 cablepm; /* miss-cabled */
+ __u8 hpfpm; /* the HPF requirements of the other paths are not met */
+ __u8 cuirpm; /* CUIR varied offline */
};
struct dasd_profile_info {
@@ -501,7 +506,10 @@ struct dasd_block {
struct dasd_profile profile;
};
-
+struct dasd_attention_data {
+ struct dasd_device *device;
+ __u8 lpum;
+};
/* reasons why device (ccw_device_start) was stopped */
#define DASD_STOPPED_NOT_ACC 1 /* not accessible */
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 71bf959732fe..dc24ecfac2d1 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -102,6 +102,19 @@ config SCLP_ASYNC
want for inform other people about your kernel panics,
need this feature and intend to run your kernel in LPAR.
+config HMC_DRV
+ def_tristate m
+ prompt "Support for file transfers from HMC drive CD/DVD-ROM"
+ depends on 64BIT
+ select CRC16
+ help
+ This option enables support for file transfers from a Hardware
+ Management Console (HMC) drive CD/DVD-ROM. It is available as a
+ module, called 'hmcdrv', and also as kernel built-in. There is one
+ optional parameter for this module: cachesize=N, which modifies the
+ transfer cache size from it's default value 0.5MB to N bytes. If N
+ is zero, then no caching is performed.
+
config S390_TAPE
def_tristate m
prompt "S/390 tape device support"
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 78b6ace7edcb..6fa9364d1c07 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -33,3 +33,6 @@ obj-$(CONFIG_S390_VMUR) += vmur.o
zcore_mod-objs := sclp_sdias.o zcore.o
obj-$(CONFIG_CRASH_DUMP) += zcore_mod.o
+
+hmcdrv-objs := hmcdrv_mod.o hmcdrv_dev.o hmcdrv_ftp.o hmcdrv_cache.o diag_ftp.o sclp_ftp.o
+obj-$(CONFIG_HMC_DRV) += hmcdrv.o
diff --git a/drivers/s390/char/diag_ftp.c b/drivers/s390/char/diag_ftp.c
new file mode 100644
index 000000000000..93889632fdf9
--- /dev/null
+++ b/drivers/s390/char/diag_ftp.c
@@ -0,0 +1,237 @@
+/*
+ * DIAGNOSE X'2C4' instruction based HMC FTP services, useable on z/VM
+ *
+ * Copyright IBM Corp. 2013
+ * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ *
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <asm/ctl_reg.h>
+
+#include "hmcdrv_ftp.h"
+#include "diag_ftp.h"
+
+/* DIAGNOSE X'2C4' return codes in Ry */
+#define DIAG_FTP_RET_OK 0 /* HMC FTP started successfully */
+#define DIAG_FTP_RET_EBUSY 4 /* HMC FTP service currently busy */
+#define DIAG_FTP_RET_EIO 8 /* HMC FTP service I/O error */
+/* and an artificial extension */
+#define DIAG_FTP_RET_EPERM 2 /* HMC FTP service privilege error */
+
+/* FTP service status codes (after INTR at guest real location 133) */
+#define DIAG_FTP_STAT_OK 0U /* request completed successfully */
+#define DIAG_FTP_STAT_PGCC 4U /* program check condition */
+#define DIAG_FTP_STAT_PGIOE 8U /* paging I/O error */
+#define DIAG_FTP_STAT_TIMEOUT 12U /* timeout */
+#define DIAG_FTP_STAT_EBASE 16U /* base of error codes from SCLP */
+#define DIAG_FTP_STAT_LDFAIL (DIAG_FTP_STAT_EBASE + 1U) /* failed */
+#define DIAG_FTP_STAT_LDNPERM (DIAG_FTP_STAT_EBASE + 2U) /* not allowed */
+#define DIAG_FTP_STAT_LDRUNS (DIAG_FTP_STAT_EBASE + 3U) /* runs */
+#define DIAG_FTP_STAT_LDNRUNS (DIAG_FTP_STAT_EBASE + 4U) /* not runs */
+
+/**
+ * struct diag_ftp_ldfpl - load file FTP parameter list (LDFPL)
+ * @bufaddr: real buffer address (at 4k boundary)
+ * @buflen: length of buffer
+ * @offset: dir/file offset
+ * @intparm: interruption parameter (unused)
+ * @transferred: bytes transferred
+ * @fsize: file size, filled on GET
+ * @failaddr: failing address
+ * @spare: padding
+ * @fident: file name - ASCII
+ */
+struct diag_ftp_ldfpl {
+ u64 bufaddr;
+ u64 buflen;
+ u64 offset;
+ u64 intparm;
+ u64 transferred;
+ u64 fsize;
+ u64 failaddr;
+ u64 spare;
+ u8 fident[HMCDRV_FTP_FIDENT_MAX];
+} __packed;
+
+static DECLARE_COMPLETION(diag_ftp_rx_complete);
+static int diag_ftp_subcode;
+
+/**
+ * diag_ftp_handler() - FTP services IRQ handler
+ * @extirq: external interrupt (sub-) code
+ * @param32: 32-bit interruption parameter from &struct diag_ftp_ldfpl
+ * @param64: unused (for 64-bit interrupt parameters)
+ */
+static void diag_ftp_handler(struct ext_code extirq,
+ unsigned int param32,
+ unsigned long param64)
+{
+ if ((extirq.subcode >> 8) != 8)
+ return; /* not a FTP services sub-code */
+
+ inc_irq_stat(IRQEXT_FTP);
+ diag_ftp_subcode = extirq.subcode & 0xffU;
+ complete(&diag_ftp_rx_complete);
+}
+
+/**
+ * diag_ftp_2c4() - DIAGNOSE X'2C4' service call
+ * @fpl: pointer to prepared LDFPL
+ * @cmd: FTP command to be executed
+ *
+ * Performs a DIAGNOSE X'2C4' call with (input/output) FTP parameter list
+ * @fpl and FTP function code @cmd. In case of an error the function does
+ * nothing and returns an (negative) error code.
+ *
+ * Notes:
+ * 1. This function only initiates a transfer, so the caller must wait
+ * for completion (asynchronous execution).
+ * 2. The FTP parameter list @fpl must be aligned to a double-word boundary.
+ * 3. fpl->bufaddr must be a real address, 4k aligned
+ */
+static int diag_ftp_2c4(struct diag_ftp_ldfpl *fpl,
+ enum hmcdrv_ftp_cmdid cmd)
+{
+ int rc;
+
+ asm volatile(
+ " diag %[addr],%[cmd],0x2c4\n"
+ "0: j 2f\n"
+ "1: la %[rc],%[err]\n"
+ "2:\n"
+ EX_TABLE(0b, 1b)
+ : [rc] "=d" (rc), "+m" (*fpl)
+ : [cmd] "0" (cmd), [addr] "d" (virt_to_phys(fpl)),
+ [err] "i" (DIAG_FTP_RET_EPERM)
+ : "cc");
+
+ switch (rc) {
+ case DIAG_FTP_RET_OK:
+ return 0;
+ case DIAG_FTP_RET_EBUSY:
+ return -EBUSY;
+ case DIAG_FTP_RET_EPERM:
+ return -EPERM;
+ case DIAG_FTP_RET_EIO:
+ default:
+ return -EIO;
+ }
+}
+
+/**
+ * diag_ftp_cmd() - executes a DIAG X'2C4' FTP command, targeting a HMC
+ * @ftp: pointer to FTP command specification
+ * @fsize: return of file size (or NULL if undesirable)
+ *
+ * Attention: Notice that this function is not reentrant - so the caller
+ * must ensure locking.
+ *
+ * Return: number of bytes read/written or a (negative) error code
+ */
+ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
+{
+ struct diag_ftp_ldfpl *ldfpl;
+ ssize_t len;
+#ifdef DEBUG
+ unsigned long start_jiffies;
+
+ pr_debug("starting DIAG X'2C4' on '%s', requesting %zd bytes\n",
+ ftp->fname, ftp->len);
+ start_jiffies = jiffies;
+#endif
+ init_completion(&diag_ftp_rx_complete);
+
+ ldfpl = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!ldfpl) {
+ len = -ENOMEM;
+ goto out;
+ }
+
+ len = strlcpy(ldfpl->fident, ftp->fname, sizeof(ldfpl->fident));
+ if (len >= HMCDRV_FTP_FIDENT_MAX) {
+ len = -EINVAL;
+ goto out_free;
+ }
+
+ ldfpl->transferred = 0;
+ ldfpl->fsize = 0;
+ ldfpl->offset = ftp->ofs;
+ ldfpl->buflen = ftp->len;
+ ldfpl->bufaddr = virt_to_phys(ftp->buf);
+
+ len = diag_ftp_2c4(ldfpl, ftp->id);
+ if (len)
+ goto out_free;
+
+ /*
+ * There is no way to cancel the running diag X'2C4', the code
+ * needs to wait unconditionally until the transfer is complete.
+ */
+ wait_for_completion(&diag_ftp_rx_complete);
+
+#ifdef DEBUG
+ pr_debug("completed DIAG X'2C4' after %lu ms\n",
+ (jiffies - start_jiffies) * 1000 / HZ);
+ pr_debug("status of DIAG X'2C4' is %u, with %lld/%lld bytes\n",
+ diag_ftp_subcode, ldfpl->transferred, ldfpl->fsize);
+#endif
+
+ switch (diag_ftp_subcode) {
+ case DIAG_FTP_STAT_OK: /* success */
+ len = ldfpl->transferred;
+ if (fsize)
+ *fsize = ldfpl->fsize;
+ break;
+ case DIAG_FTP_STAT_LDNPERM:
+ len = -EPERM;
+ break;
+ case DIAG_FTP_STAT_LDRUNS:
+ len = -EBUSY;
+ break;
+ case DIAG_FTP_STAT_LDFAIL:
+ len = -ENOENT; /* no such file or media */
+ break;
+ default:
+ len = -EIO;
+ break;
+ }
+
+out_free:
+ free_page((unsigned long) ldfpl);
+out:
+ return len;
+}
+
+/**
+ * diag_ftp_startup() - startup of FTP services, when running on z/VM
+ *
+ * Return: 0 on success, else an (negative) error code
+ */
+int diag_ftp_startup(void)
+{
+ int rc;
+
+ rc = register_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
+ if (rc)
+ return rc;
+
+ ctl_set_bit(0, 63 - 22);
+ return 0;
+}
+
+/**
+ * diag_ftp_shutdown() - shutdown of FTP services, when running on z/VM
+ */
+void diag_ftp_shutdown(void)
+{
+ ctl_clear_bit(0, 63 - 22);
+ unregister_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
+}
diff --git a/drivers/s390/char/diag_ftp.h b/drivers/s390/char/diag_ftp.h
new file mode 100644
index 000000000000..3abd2614053a
--- /dev/null
+++ b/drivers/s390/char/diag_ftp.h
@@ -0,0 +1,21 @@
+/*
+ * DIAGNOSE X'2C4' instruction based SE/HMC FTP Services, useable on z/VM
+ *
+ * Notice that all functions exported here are not reentrant.
+ * So usage should be exclusive, ensured by the caller (e.g. using a
+ * mutex).
+ *
+ * Copyright IBM Corp. 2013
+ * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef __DIAG_FTP_H__
+#define __DIAG_FTP_H__
+
+#include "hmcdrv_ftp.h"
+
+int diag_ftp_startup(void);
+void diag_ftp_shutdown(void);
+ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize);
+
+#endif /* __DIAG_FTP_H__ */
diff --git a/drivers/s390/char/hmcdrv_cache.c b/drivers/s390/char/hmcdrv_cache.c
new file mode 100644
index 000000000000..4cda5ada143a
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_cache.c
@@ -0,0 +1,252 @@
+/*
+ * SE/HMC Drive (Read) Cache Functions
+ *
+ * Copyright IBM Corp. 2013
+ * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ *
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/jiffies.h>
+
+#include "hmcdrv_ftp.h"
+#include "hmcdrv_cache.h"
+
+#define HMCDRV_CACHE_TIMEOUT 30 /* aging timeout in seconds */
+
+/**
+ * struct hmcdrv_cache_entry - file cache (only used on read/dir)
+ * @id: FTP command ID
+ * @content: kernel-space buffer, 4k aligned
+ * @len: size of @content cache (0 if caching disabled)
+ * @ofs: start of content within file (-1 if no cached content)
+ * @fname: file name
+ * @fsize: file size
+ * @timeout: cache timeout in jiffies
+ *
+ * Notice that the first three members (id, fname, fsize) are cached on all
+ * read/dir requests. But content is cached only under some preconditions.
+ * Uncached content is signalled by a negative value of @ofs.
+ */
+struct hmcdrv_cache_entry {
+ enum hmcdrv_ftp_cmdid id;
+ char fname[HMCDRV_FTP_FIDENT_MAX];
+ size_t fsize;
+ loff_t ofs;
+ unsigned long timeout;
+ void *content;
+ size_t len;
+};
+
+static int hmcdrv_cache_order; /* cache allocated page order */
+
+static struct hmcdrv_cache_entry hmcdrv_cache_file = {
+ .fsize = SIZE_MAX,
+ .ofs = -1,
+ .len = 0,
+ .fname = {'\0'}
+};
+
+/**
+ * hmcdrv_cache_get() - looks for file data/content in read cache
+ * @ftp: pointer to FTP command specification
+ *
+ * Return: number of bytes read from cache or a negative number if nothing
+ * in content cache (for the file/cmd specified in @ftp)
+ */
+static ssize_t hmcdrv_cache_get(const struct hmcdrv_ftp_cmdspec *ftp)
+{
+ loff_t pos; /* position in cache (signed) */
+ ssize_t len;
+
+ if ((ftp->id != hmcdrv_cache_file.id) ||
+ strcmp(hmcdrv_cache_file.fname, ftp->fname))
+ return -1;
+
+ if (ftp->ofs >= hmcdrv_cache_file.fsize) /* EOF ? */
+ return 0;
+
+ if ((hmcdrv_cache_file.ofs < 0) || /* has content? */
+ time_after(jiffies, hmcdrv_cache_file.timeout))
+ return -1;
+
+ /* there seems to be cached content - calculate the maximum number
+ * of bytes that can be returned (regarding file size and offset)
+ */
+ len = hmcdrv_cache_file.fsize - ftp->ofs;
+
+ if (len > ftp->len)
+ len = ftp->len;
+
+ /* check if the requested chunk falls into our cache (which starts
+ * at offset 'hmcdrv_cache_file.ofs' in the file of interest)
+ */
+ pos = ftp->ofs - hmcdrv_cache_file.ofs;
+
+ if ((pos >= 0) &&
+ ((pos + len) <= hmcdrv_cache_file.len)) {
+
+ memcpy(ftp->buf,
+ hmcdrv_cache_file.content + pos,
+ len);
+ pr_debug("using cached content of '%s', returning %zd/%zd bytes\n",
+ hmcdrv_cache_file.fname, len,
+ hmcdrv_cache_file.fsize);
+
+ return len;
+ }
+
+ return -1;
+}
+
+/**
+ * hmcdrv_cache_do() - do a HMC drive CD/DVD transfer with cache update
+ * @ftp: pointer to FTP command specification
+ * @func: FTP transfer function to be used
+ *
+ * Return: number of bytes read/written or a (negative) error code
+ */
+static ssize_t hmcdrv_cache_do(const struct hmcdrv_ftp_cmdspec *ftp,
+ hmcdrv_cache_ftpfunc func)
+{
+ ssize_t len;
+
+ /* only cache content if the read/dir cache really exists
+ * (hmcdrv_cache_file.len > 0), is large enough to handle the
+ * request (hmcdrv_cache_file.len >= ftp->len) and there is a need
+ * to do so (ftp->len > 0)
+ */
+ if ((ftp->len > 0) && (hmcdrv_cache_file.len >= ftp->len)) {
+
+ /* because the cache is not located at ftp->buf, we have to
+ * assemble a new HMC drive FTP cmd specification (pointing
+ * to our cache, and using the increased size)
+ */
+ struct hmcdrv_ftp_cmdspec cftp = *ftp; /* make a copy */
+ cftp.buf = hmcdrv_cache_file.content; /* and update */
+ cftp.len = hmcdrv_cache_file.len; /* buffer data */
+
+ len = func(&cftp, &hmcdrv_cache_file.fsize); /* now do */
+
+ if (len > 0) {
+ pr_debug("caching %zd bytes content for '%s'\n",
+ len, ftp->fname);
+
+ if (len > ftp->len)
+ len = ftp->len;
+
+ hmcdrv_cache_file.ofs = ftp->ofs;
+ hmcdrv_cache_file.timeout = jiffies +
+ HMCDRV_CACHE_TIMEOUT * HZ;
+ memcpy(ftp->buf, hmcdrv_cache_file.content, len);
+ }
+ } else {
+ len = func(ftp, &hmcdrv_cache_file.fsize);
+ hmcdrv_cache_file.ofs = -1; /* invalidate content */
+ }
+
+ if (len > 0) {
+ /* cache some file info (FTP command, file name and file
+ * size) unconditionally
+ */
+ strlcpy(hmcdrv_cache_file.fname, ftp->fname,
+ HMCDRV_FTP_FIDENT_MAX);
+ hmcdrv_cache_file.id = ftp->id;
+ pr_debug("caching cmd %d, file size %zu for '%s'\n",
+ ftp->id, hmcdrv_cache_file.fsize, ftp->fname);
+ }
+
+ return len;
+}
+
+/**
+ * hmcdrv_cache_cmd() - perform a cached HMC drive CD/DVD transfer
+ * @ftp: pointer to FTP command specification
+ * @func: FTP transfer function to be used
+ *
+ * Attention: Notice that this function is not reentrant - so the caller
+ * must ensure exclusive execution.
+ *
+ * Return: number of bytes read/written or a (negative) error code
+ */
+ssize_t hmcdrv_cache_cmd(const struct hmcdrv_ftp_cmdspec *ftp,
+ hmcdrv_cache_ftpfunc func)
+{
+ ssize_t len;
+
+ if ((ftp->id == HMCDRV_FTP_DIR) || /* read cache */
+ (ftp->id == HMCDRV_FTP_NLIST) ||
+ (ftp->id == HMCDRV_FTP_GET)) {
+
+ len = hmcdrv_cache_get(ftp);
+
+ if (len >= 0) /* got it from cache ? */
+ return len; /* yes */
+
+ len = hmcdrv_cache_do(ftp, func);
+
+ if (len >= 0)
+ return len;
+
+ } else {
+ len = func(ftp, NULL); /* simply do original command */
+ }
+
+ /* invalidate the (read) cache in case there was a write operation
+ * or an error on read/dir
+ */
+ hmcdrv_cache_file.id = HMCDRV_FTP_NOOP;
+ hmcdrv_cache_file.fsize = LLONG_MAX;
+ hmcdrv_cache_file.ofs = -1;
+
+ return len;
+}
+
+/**
+ * hmcdrv_cache_startup() - startup of HMC drive cache
+ * @cachesize: cache size
+ *
+ * Return: 0 on success, else a (negative) error code
+ */
+int hmcdrv_cache_startup(size_t cachesize)
+{
+ if (cachesize > 0) { /* perform caching ? */
+ hmcdrv_cache_order = get_order(cachesize);
+ hmcdrv_cache_file.content =
+ (void *) __get_free_pages(GFP_KERNEL | GFP_DMA,
+ hmcdrv_cache_order);
+
+ if (!hmcdrv_cache_file.content) {
+ pr_err("Allocating the requested cache size of %zu bytes failed\n",
+ cachesize);
+ return -ENOMEM;
+ }
+
+ pr_debug("content cache enabled, size is %zu bytes\n",
+ cachesize);
+ }
+
+ hmcdrv_cache_file.len = cachesize;
+ return 0;
+}
+
+/**
+ * hmcdrv_cache_shutdown() - shutdown of HMC drive cache
+ */
+void hmcdrv_cache_shutdown(void)
+{
+ if (hmcdrv_cache_file.content) {
+ free_pages((unsigned long) hmcdrv_cache_file.content,
+ hmcdrv_cache_order);
+ hmcdrv_cache_file.content = NULL;
+ }
+
+ hmcdrv_cache_file.id = HMCDRV_FTP_NOOP;
+ hmcdrv_cache_file.fsize = LLONG_MAX;
+ hmcdrv_cache_file.ofs = -1;
+ hmcdrv_cache_file.len = 0; /* no cache */
+}
diff --git a/drivers/s390/char/hmcdrv_cache.h b/drivers/s390/char/hmcdrv_cache.h
new file mode 100644
index 000000000000..a14b57526781
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_cache.h
@@ -0,0 +1,24 @@
+/*
+ * SE/HMC Drive (Read) Cache Functions
+ *
+ * Copyright IBM Corp. 2013
+ * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef __HMCDRV_CACHE_H__
+#define __HMCDRV_CACHE_H__
+
+#include <linux/mmzone.h>
+#include "hmcdrv_ftp.h"
+
+#define HMCDRV_CACHE_SIZE_DFLT (MAX_ORDER_NR_PAGES * PAGE_SIZE / 2UL)
+
+typedef ssize_t (*hmcdrv_cache_ftpfunc)(const struct hmcdrv_ftp_cmdspec *ftp,
+ size_t *fsize);
+
+ssize_t hmcdrv_cache_cmd(const struct hmcdrv_ftp_cmdspec *ftp,
+ hmcdrv_cache_ftpfunc func);
+int hmcdrv_cache_startup(size_t cachesize);
+void hmcdrv_cache_shutdown(void);
+
+#endif /* __HMCDRV_CACHE_H__ */
diff --git a/drivers/s390/char/hmcdrv_dev.c b/drivers/s390/char/hmcdrv_dev.c
new file mode 100644
index 000000000000..0c5176179c17
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_dev.c
@@ -0,0 +1,370 @@
+/*
+ * HMC Drive CD/DVD Device
+ *
+ * Copyright IBM Corp. 2013
+ * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ *
+ * This file provides a Linux "misc" character device for access to an
+ * assigned HMC drive CD/DVD-ROM. It works as follows: First create the
+ * device by calling hmcdrv_dev_init(). After open() a lseek(fd, 0,
+ * SEEK_END) indicates that a new FTP command follows (not needed on the
+ * first command after open). Then write() the FTP command ASCII string
+ * to it, e.g. "dir /" or "nls <directory>" or "get <filename>". At the
+ * end read() the response.
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+#include <linux/capability.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+
+#include "hmcdrv_dev.h"
+#include "hmcdrv_ftp.h"
+
+/* If the following macro is defined, then the HMC device creates it's own
+ * separated device class (and dynamically assigns a major number). If not
+ * defined then the HMC device is assigned to the "misc" class devices.
+ *
+#define HMCDRV_DEV_CLASS "hmcftp"
+ */
+
+#define HMCDRV_DEV_NAME "hmcdrv"
+#define HMCDRV_DEV_BUSY_DELAY 500 /* delay between -EBUSY trials in ms */
+#define HMCDRV_DEV_BUSY_RETRIES 3 /* number of retries on -EBUSY */
+
+struct hmcdrv_dev_node {
+
+#ifdef HMCDRV_DEV_CLASS
+ struct cdev dev; /* character device structure */
+ umode_t mode; /* mode of device node (unused, zero) */
+#else
+ struct miscdevice dev; /* "misc" device structure */
+#endif
+
+};
+
+static int hmcdrv_dev_open(struct inode *inode, struct file *fp);
+static int hmcdrv_dev_release(struct inode *inode, struct file *fp);
+static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence);
+static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
+ size_t len, loff_t *pos);
+static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
+ size_t len, loff_t *pos);
+static ssize_t hmcdrv_dev_transfer(char __kernel *cmd, loff_t offset,
+ char __user *buf, size_t len);
+
+/*
+ * device operations
+ */
+static const struct file_operations hmcdrv_dev_fops = {
+ .open = hmcdrv_dev_open,
+ .llseek = hmcdrv_dev_seek,
+ .release = hmcdrv_dev_release,
+ .read = hmcdrv_dev_read,
+ .write = hmcdrv_dev_write,
+};
+
+static struct hmcdrv_dev_node hmcdrv_dev; /* HMC device struct (static) */
+
+#ifdef HMCDRV_DEV_CLASS
+
+static struct class *hmcdrv_dev_class; /* device class pointer */
+static dev_t hmcdrv_dev_no; /* device number (major/minor) */
+
+/**
+ * hmcdrv_dev_name() - provides a naming hint for a device node in /dev
+ * @dev: device for which the naming/mode hint is
+ * @mode: file mode for device node created in /dev
+ *
+ * See: devtmpfs.c, function devtmpfs_create_node()
+ *
+ * Return: recommended device file name in /dev
+ */
+static char *hmcdrv_dev_name(struct device *dev, umode_t *mode)
+{
+ char *nodename = NULL;
+ const char *devname = dev_name(dev); /* kernel device name */
+
+ if (devname)
+ nodename = kasprintf(GFP_KERNEL, "%s", devname);
+
+ /* on device destroy (rmmod) the mode pointer may be NULL
+ */
+ if (mode)
+ *mode = hmcdrv_dev.mode;
+
+ return nodename;
+}
+
+#endif /* HMCDRV_DEV_CLASS */
+
+/*
+ * open()
+ */
+static int hmcdrv_dev_open(struct inode *inode, struct file *fp)
+{
+ int rc;
+
+ /* check for non-blocking access, which is really unsupported
+ */
+ if (fp->f_flags & O_NONBLOCK)
+ return -EINVAL;
+
+ /* Because it makes no sense to open this device read-only (then a
+ * FTP command cannot be emitted), we respond with an error.
+ */
+ if ((fp->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EINVAL;
+
+ /* prevent unloading this module as long as anyone holds the
+ * device file open - so increment the reference count here
+ */
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ fp->private_data = NULL; /* no command yet */
+ rc = hmcdrv_ftp_startup();
+ if (rc)
+ module_put(THIS_MODULE);
+
+ pr_debug("open file '/dev/%s' with return code %d\n",
+ fp->f_dentry->d_name.name, rc);
+ return rc;
+}
+
+/*
+ * release()
+ */
+static int hmcdrv_dev_release(struct inode *inode, struct file *fp)
+{
+ pr_debug("closing file '/dev/%s'\n", fp->f_dentry->d_name.name);
+ kfree(fp->private_data);
+ fp->private_data = NULL;
+ hmcdrv_ftp_shutdown();
+ module_put(THIS_MODULE);
+ return 0;
+}
+
+/*
+ * lseek()
+ */
+static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence)
+{
+ switch (whence) {
+ case SEEK_CUR: /* relative to current file position */
+ pos += fp->f_pos; /* new position stored in 'pos' */
+ break;
+
+ case SEEK_SET: /* absolute (relative to beginning of file) */
+ break; /* SEEK_SET */
+
+ /* We use SEEK_END as a special indicator for a SEEK_SET
+ * (set absolute position), combined with a FTP command
+ * clear.
+ */
+ case SEEK_END:
+ if (fp->private_data) {
+ kfree(fp->private_data);
+ fp->private_data = NULL;
+ }
+
+ break; /* SEEK_END */
+
+ default: /* SEEK_DATA, SEEK_HOLE: unsupported */
+ return -EINVAL;
+ }
+
+ if (pos < 0)
+ return -EINVAL;
+
+ if (fp->f_pos != pos)
+ ++fp->f_version;
+
+ fp->f_pos = pos;
+ return pos;
+}
+
+/*
+ * transfer (helper function)
+ */
+static ssize_t hmcdrv_dev_transfer(char __kernel *cmd, loff_t offset,
+ char __user *buf, size_t len)
+{
+ ssize_t retlen;
+ unsigned trials = HMCDRV_DEV_BUSY_RETRIES;
+
+ do {
+ retlen = hmcdrv_ftp_cmd(cmd, offset, buf, len);
+
+ if (retlen != -EBUSY)
+ break;
+
+ msleep(HMCDRV_DEV_BUSY_DELAY);
+
+ } while (--trials > 0);
+
+ return retlen;
+}
+
+/*
+ * read()
+ */
+static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
+ size_t len, loff_t *pos)
+{
+ ssize_t retlen;
+
+ if (((fp->f_flags & O_ACCMODE) == O_WRONLY) ||
+ (fp->private_data == NULL)) { /* no FTP cmd defined ? */
+ return -EBADF;
+ }
+
+ retlen = hmcdrv_dev_transfer((char *) fp->private_data,
+ *pos, ubuf, len);
+
+ pr_debug("read from file '/dev/%s' at %lld returns %zd/%zu\n",
+ fp->f_dentry->d_name.name, (long long) *pos, retlen, len);
+
+ if (retlen > 0)
+ *pos += retlen;
+
+ return retlen;
+}
+
+/*
+ * write()
+ */
+static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
+ size_t len, loff_t *pos)
+{
+ ssize_t retlen;
+
+ pr_debug("writing file '/dev/%s' at pos. %lld with length %zd\n",
+ fp->f_dentry->d_name.name, (long long) *pos, len);
+
+ if (!fp->private_data) { /* first expect a cmd write */
+ fp->private_data = kmalloc(len + 1, GFP_KERNEL);
+
+ if (!fp->private_data)
+ return -ENOMEM;
+
+ if (!copy_from_user(fp->private_data, ubuf, len)) {
+ ((char *)fp->private_data)[len] = '\0';
+ return len;
+ }
+
+ kfree(fp->private_data);
+ fp->private_data = NULL;
+ return -EFAULT;
+ }
+
+ retlen = hmcdrv_dev_transfer((char *) fp->private_data,
+ *pos, (char __user *) ubuf, len);
+ if (retlen > 0)
+ *pos += retlen;
+
+ pr_debug("write to file '/dev/%s' returned %zd\n",
+ fp->f_dentry->d_name.name, retlen);
+
+ return retlen;
+}
+
+/**
+ * hmcdrv_dev_init() - creates a HMC drive CD/DVD device
+ *
+ * This function creates a HMC drive CD/DVD kernel device and an associated
+ * device under /dev, using a dynamically allocated major number.
+ *
+ * Return: 0 on success, else an error code.
+ */
+int hmcdrv_dev_init(void)
+{
+ int rc;
+
+#ifdef HMCDRV_DEV_CLASS
+ struct device *dev;
+
+ rc = alloc_chrdev_region(&hmcdrv_dev_no, 0, 1, HMCDRV_DEV_NAME);
+
+ if (rc)
+ goto out_err;
+
+ cdev_init(&hmcdrv_dev.dev, &hmcdrv_dev_fops);
+ hmcdrv_dev.dev.owner = THIS_MODULE;
+ rc = cdev_add(&hmcdrv_dev.dev, hmcdrv_dev_no, 1);
+
+ if (rc)
+ goto out_unreg;
+
+ /* At this point the character device exists in the kernel (see
+ * /proc/devices), but not under /dev nor /sys/devices/virtual. So
+ * we have to create an associated class (see /sys/class).
+ */
+ hmcdrv_dev_class = class_create(THIS_MODULE, HMCDRV_DEV_CLASS);
+
+ if (IS_ERR(hmcdrv_dev_class)) {
+ rc = PTR_ERR(hmcdrv_dev_class);
+ goto out_devdel;
+ }
+
+ /* Finally a device node in /dev has to be established (as 'mkdev'
+ * does from the command line). Notice that assignment of a device
+ * node name/mode function is optional (only for mode != 0600).
+ */
+ hmcdrv_dev.mode = 0; /* "unset" */
+ hmcdrv_dev_class->devnode = hmcdrv_dev_name;
+
+ dev = device_create(hmcdrv_dev_class, NULL, hmcdrv_dev_no, NULL,
+ "%s", HMCDRV_DEV_NAME);
+ if (!IS_ERR(dev))
+ return 0;
+
+ rc = PTR_ERR(dev);
+ class_destroy(hmcdrv_dev_class);
+ hmcdrv_dev_class = NULL;
+
+out_devdel:
+ cdev_del(&hmcdrv_dev.dev);
+
+out_unreg:
+ unregister_chrdev_region(hmcdrv_dev_no, 1);
+
+out_err:
+
+#else /* !HMCDRV_DEV_CLASS */
+ hmcdrv_dev.dev.minor = MISC_DYNAMIC_MINOR;
+ hmcdrv_dev.dev.name = HMCDRV_DEV_NAME;
+ hmcdrv_dev.dev.fops = &hmcdrv_dev_fops;
+ hmcdrv_dev.dev.mode = 0; /* finally produces 0600 */
+ rc = misc_register(&hmcdrv_dev.dev);
+#endif /* HMCDRV_DEV_CLASS */
+
+ return rc;
+}
+
+/**
+ * hmcdrv_dev_exit() - destroys a HMC drive CD/DVD device
+ */
+void hmcdrv_dev_exit(void)
+{
+#ifdef HMCDRV_DEV_CLASS
+ if (!IS_ERR_OR_NULL(hmcdrv_dev_class)) {
+ device_destroy(hmcdrv_dev_class, hmcdrv_dev_no);
+ class_destroy(hmcdrv_dev_class);
+ }
+
+ cdev_del(&hmcdrv_dev.dev);
+ unregister_chrdev_region(hmcdrv_dev_no, 1);
+#else /* !HMCDRV_DEV_CLASS */
+ misc_deregister(&hmcdrv_dev.dev);
+#endif /* HMCDRV_DEV_CLASS */
+}
diff --git a/drivers/s390/char/hmcdrv_dev.h b/drivers/s390/char/hmcdrv_dev.h
new file mode 100644
index 000000000000..cb17f07e02de
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_dev.h
@@ -0,0 +1,14 @@
+/*
+ * SE/HMC Drive FTP Device
+ *
+ * Copyright IBM Corp. 2013
+ * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef __HMCDRV_DEV_H__
+#define __HMCDRV_DEV_H__
+
+int hmcdrv_dev_init(void);
+void hmcdrv_dev_exit(void);
+
+#endif /* __HMCDRV_DEV_H__ */
diff --git a/drivers/s390/char/hmcdrv_ftp.c b/drivers/s390/char/hmcdrv_ftp.c
new file mode 100644
index 000000000000..4bd63322fc29
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_ftp.c
@@ -0,0 +1,343 @@
+/*
+ * HMC Drive FTP Services
+ *
+ * Copyright IBM Corp. 2013
+ * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+
+#include <linux/ctype.h>
+#include <linux/crc16.h>
+
+#include "hmcdrv_ftp.h"
+#include "hmcdrv_cache.h"
+#include "sclp_ftp.h"
+#include "diag_ftp.h"
+
+/**
+ * struct hmcdrv_ftp_ops - HMC drive FTP operations
+ * @startup: startup function
+ * @shutdown: shutdown function
+ * @cmd: FTP transfer function
+ */
+struct hmcdrv_ftp_ops {
+ int (*startup)(void);
+ void (*shutdown)(void);
+ ssize_t (*transfer)(const struct hmcdrv_ftp_cmdspec *ftp,
+ size_t *fsize);
+};
+
+static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len);
+static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp);
+
+static struct hmcdrv_ftp_ops *hmcdrv_ftp_funcs; /* current operations */
+static DEFINE_MUTEX(hmcdrv_ftp_mutex); /* mutex for hmcdrv_ftp_funcs */
+static unsigned hmcdrv_ftp_refcnt; /* start/shutdown reference counter */
+
+/**
+ * hmcdrv_ftp_cmd_getid() - determine FTP command ID from a command string
+ * @cmd: FTP command string (NOT zero-terminated)
+ * @len: length of FTP command string in @cmd
+ */
+static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len)
+{
+ /* HMC FTP command descriptor */
+ struct hmcdrv_ftp_cmd_desc {
+ const char *str; /* command string */
+ enum hmcdrv_ftp_cmdid cmd; /* associated command as enum */
+ };
+
+ /* Description of all HMC drive FTP commands
+ *
+ * Notes:
+ * 1. Array size should be a prime number.
+ * 2. Do not change the order of commands in table (because the
+ * index is determined by CRC % ARRAY_SIZE).
+ * 3. Original command 'nlist' was renamed, else the CRC would
+ * collide with 'append' (see point 2).
+ */
+ static const struct hmcdrv_ftp_cmd_desc ftpcmds[7] = {
+
+ {.str = "get", /* [0] get (CRC = 0x68eb) */
+ .cmd = HMCDRV_FTP_GET},
+ {.str = "dir", /* [1] dir (CRC = 0x6a9e) */
+ .cmd = HMCDRV_FTP_DIR},
+ {.str = "delete", /* [2] delete (CRC = 0x53ae) */
+ .cmd = HMCDRV_FTP_DELETE},
+ {.str = "nls", /* [3] nls (CRC = 0xf87c) */
+ .cmd = HMCDRV_FTP_NLIST},
+ {.str = "put", /* [4] put (CRC = 0xac56) */
+ .cmd = HMCDRV_FTP_PUT},
+ {.str = "append", /* [5] append (CRC = 0xf56e) */
+ .cmd = HMCDRV_FTP_APPEND},
+ {.str = NULL} /* [6] unused */
+ };
+
+ const struct hmcdrv_ftp_cmd_desc *pdesc;
+
+ u16 crc = 0xffffU;
+
+ if (len == 0)
+ return HMCDRV_FTP_NOOP; /* error indiactor */
+
+ crc = crc16(crc, cmd, len);
+ pdesc = ftpcmds + (crc % ARRAY_SIZE(ftpcmds));
+ pr_debug("FTP command '%s' has CRC 0x%04x, at table pos. %lu\n",
+ cmd, crc, (crc % ARRAY_SIZE(ftpcmds)));
+
+ if (!pdesc->str || strncmp(pdesc->str, cmd, len))
+ return HMCDRV_FTP_NOOP;
+
+ pr_debug("FTP command '%s' found, with ID %d\n",
+ pdesc->str, pdesc->cmd);
+
+ return pdesc->cmd;
+}
+
+/**
+ * hmcdrv_ftp_parse() - HMC drive FTP command parser
+ * @cmd: FTP command string "<cmd> <filename>"
+ * @ftp: Pointer to FTP command specification buffer (output)
+ *
+ * Return: 0 on success, else a (negative) error code
+ */
+static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp)
+{
+ char *start;
+ int argc = 0;
+
+ ftp->id = HMCDRV_FTP_NOOP;
+ ftp->fname = NULL;
+
+ while (*cmd != '\0') {
+
+ while (isspace(*cmd))
+ ++cmd;
+
+ if (*cmd == '\0')
+ break;
+
+ start = cmd;
+
+ switch (argc) {
+ case 0: /* 1st argument (FTP command) */
+ while ((*cmd != '\0') && !isspace(*cmd))
+ ++cmd;
+ ftp->id = hmcdrv_ftp_cmd_getid(start, cmd - start);
+ break;
+ case 1: /* 2nd / last argument (rest of line) */
+ while ((*cmd != '\0') && !iscntrl(*cmd))
+ ++cmd;
+ ftp->fname = start;
+ /* fall through */
+ default:
+ *cmd = '\0';
+ break;
+ } /* switch */
+
+ ++argc;
+ } /* while */
+
+ if (!ftp->fname || (ftp->id == HMCDRV_FTP_NOOP))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * hmcdrv_ftp_do() - perform a HMC drive FTP, with data from kernel-space
+ * @ftp: pointer to FTP command specification
+ *
+ * Return: number of bytes read/written or a negative error code
+ */
+ssize_t hmcdrv_ftp_do(const struct hmcdrv_ftp_cmdspec *ftp)
+{
+ ssize_t len;
+
+ mutex_lock(&hmcdrv_ftp_mutex);
+
+ if (hmcdrv_ftp_funcs && hmcdrv_ftp_refcnt) {
+ pr_debug("starting transfer, cmd %d for '%s' at %lld with %zd bytes\n",
+ ftp->id, ftp->fname, (long long) ftp->ofs, ftp->len);
+ len = hmcdrv_cache_cmd(ftp, hmcdrv_ftp_funcs->transfer);
+ } else {
+ len = -ENXIO;
+ }
+
+ mutex_unlock(&hmcdrv_ftp_mutex);
+ return len;
+}
+EXPORT_SYMBOL(hmcdrv_ftp_do);
+
+/**
+ * hmcdrv_ftp_probe() - probe for the HMC drive FTP service
+ *
+ * Return: 0 if service is available, else an (negative) error code
+ */
+int hmcdrv_ftp_probe(void)
+{
+ int rc;
+
+ struct hmcdrv_ftp_cmdspec ftp = {
+ .id = HMCDRV_FTP_NOOP,
+ .ofs = 0,
+ .fname = "",
+ .len = PAGE_SIZE
+ };
+
+ ftp.buf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+
+ if (!ftp.buf)
+ return -ENOMEM;
+
+ rc = hmcdrv_ftp_startup();
+
+ if (rc)
+ return rc;
+
+ rc = hmcdrv_ftp_do(&ftp);
+ free_page((unsigned long) ftp.buf);
+ hmcdrv_ftp_shutdown();
+
+ switch (rc) {
+ case -ENOENT: /* no such file/media or currently busy, */
+ case -EBUSY: /* but service seems to be available */
+ rc = 0;
+ break;
+ default: /* leave 'rc' as it is for [0, -EPERM, -E...] */
+ if (rc > 0)
+ rc = 0; /* clear length (success) */
+ break;
+ } /* switch */
+
+ return rc;
+}
+EXPORT_SYMBOL(hmcdrv_ftp_probe);
+
+/**
+ * hmcdrv_ftp_cmd() - Perform a HMC drive FTP, with data from user-space
+ *
+ * @cmd: FTP command string "<cmd> <filename>"
+ * @offset: file position to read/write
+ * @buf: user-space buffer for read/written directory/file
+ * @len: size of @buf (read/dir) or number of bytes to write
+ *
+ * This function must not be called before hmcdrv_ftp_startup() was called.
+ *
+ * Return: number of bytes read/written or a negative error code
+ */
+ssize_t hmcdrv_ftp_cmd(char __kernel *cmd, loff_t offset,
+ char __user *buf, size_t len)
+{
+ int order;
+
+ struct hmcdrv_ftp_cmdspec ftp = {.len = len, .ofs = offset};
+ ssize_t retlen = hmcdrv_ftp_parse(cmd, &ftp);
+
+ if (retlen)
+ return retlen;
+
+ order = get_order(ftp.len);
+ ftp.buf = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, order);
+
+ if (!ftp.buf)
+ return -ENOMEM;
+
+ switch (ftp.id) {
+ case HMCDRV_FTP_DIR:
+ case HMCDRV_FTP_NLIST:
+ case HMCDRV_FTP_GET:
+ retlen = hmcdrv_ftp_do(&ftp);
+
+ if ((retlen >= 0) &&
+ copy_to_user(buf, ftp.buf, retlen))
+ retlen = -EFAULT;
+ break;
+
+ case HMCDRV_FTP_PUT:
+ case HMCDRV_FTP_APPEND:
+ if (!copy_from_user(ftp.buf, buf, ftp.len))
+ retlen = hmcdrv_ftp_do(&ftp);
+ else
+ retlen = -EFAULT;
+ break;
+
+ case HMCDRV_FTP_DELETE:
+ retlen = hmcdrv_ftp_do(&ftp);
+ break;
+
+ default:
+ retlen = -EOPNOTSUPP;
+ break;
+ }
+
+ free_pages((unsigned long) ftp.buf, order);
+ return retlen;
+}
+
+/**
+ * hmcdrv_ftp_startup() - startup of HMC drive FTP functionality for a
+ * dedicated (owner) instance
+ *
+ * Return: 0 on success, else an (negative) error code
+ */
+int hmcdrv_ftp_startup(void)
+{
+ static struct hmcdrv_ftp_ops hmcdrv_ftp_zvm = {
+ .startup = diag_ftp_startup,
+ .shutdown = diag_ftp_shutdown,
+ .transfer = diag_ftp_cmd
+ };
+
+ static struct hmcdrv_ftp_ops hmcdrv_ftp_lpar = {
+ .startup = sclp_ftp_startup,
+ .shutdown = sclp_ftp_shutdown,
+ .transfer = sclp_ftp_cmd
+ };
+
+ int rc = 0;
+
+ mutex_lock(&hmcdrv_ftp_mutex); /* block transfers while start-up */
+
+ if (hmcdrv_ftp_refcnt == 0) {
+ if (MACHINE_IS_VM)
+ hmcdrv_ftp_funcs = &hmcdrv_ftp_zvm;
+ else if (MACHINE_IS_LPAR || MACHINE_IS_KVM)
+ hmcdrv_ftp_funcs = &hmcdrv_ftp_lpar;
+ else
+ rc = -EOPNOTSUPP;
+
+ if (hmcdrv_ftp_funcs)
+ rc = hmcdrv_ftp_funcs->startup();
+ }
+
+ if (!rc)
+ ++hmcdrv_ftp_refcnt;
+
+ mutex_unlock(&hmcdrv_ftp_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(hmcdrv_ftp_startup);
+
+/**
+ * hmcdrv_ftp_shutdown() - shutdown of HMC drive FTP functionality for a
+ * dedicated (owner) instance
+ */
+void hmcdrv_ftp_shutdown(void)
+{
+ mutex_lock(&hmcdrv_ftp_mutex);
+ --hmcdrv_ftp_refcnt;
+
+ if ((hmcdrv_ftp_refcnt == 0) && hmcdrv_ftp_funcs)
+ hmcdrv_ftp_funcs->shutdown();
+
+ mutex_unlock(&hmcdrv_ftp_mutex);
+}
+EXPORT_SYMBOL(hmcdrv_ftp_shutdown);
diff --git a/drivers/s390/char/hmcdrv_ftp.h b/drivers/s390/char/hmcdrv_ftp.h
new file mode 100644
index 000000000000..f3643a7b3676
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_ftp.h
@@ -0,0 +1,63 @@
+/*
+ * SE/HMC Drive FTP Services
+ *
+ * Copyright IBM Corp. 2013
+ * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef __HMCDRV_FTP_H__
+#define __HMCDRV_FTP_H__
+
+#include <linux/types.h> /* size_t, loff_t */
+
+/*
+ * HMC drive FTP Service max. length of path (w/ EOS)
+ */
+#define HMCDRV_FTP_FIDENT_MAX 192
+
+/**
+ * enum hmcdrv_ftp_cmdid - HMC drive FTP commands
+ * @HMCDRV_FTP_NOOP: do nothing (only for probing)
+ * @HMCDRV_FTP_GET: read a file
+ * @HMCDRV_FTP_PUT: (over-) write a file
+ * @HMCDRV_FTP_APPEND: append to a file
+ * @HMCDRV_FTP_DIR: list directory long (ls -l)
+ * @HMCDRV_FTP_NLIST: list files, no directories (name list)
+ * @HMCDRV_FTP_DELETE: delete a file
+ * @HMCDRV_FTP_CANCEL: cancel operation (SCLP/LPAR only)
+ */
+enum hmcdrv_ftp_cmdid {
+ HMCDRV_FTP_NOOP = 0,
+ HMCDRV_FTP_GET = 1,
+ HMCDRV_FTP_PUT = 2,
+ HMCDRV_FTP_APPEND = 3,
+ HMCDRV_FTP_DIR = 4,
+ HMCDRV_FTP_NLIST = 5,
+ HMCDRV_FTP_DELETE = 6,
+ HMCDRV_FTP_CANCEL = 7
+};
+
+/**
+ * struct hmcdrv_ftp_cmdspec - FTP command specification
+ * @id: FTP command ID
+ * @ofs: offset in file
+ * @fname: filename (ASCII), null-terminated
+ * @buf: kernel-space transfer data buffer, 4k aligned
+ * @len: (max) number of bytes to transfer from/to @buf
+ */
+struct hmcdrv_ftp_cmdspec {
+ enum hmcdrv_ftp_cmdid id;
+ loff_t ofs;
+ const char *fname;
+ void __kernel *buf;
+ size_t len;
+};
+
+int hmcdrv_ftp_startup(void);
+void hmcdrv_ftp_shutdown(void);
+int hmcdrv_ftp_probe(void);
+ssize_t hmcdrv_ftp_do(const struct hmcdrv_ftp_cmdspec *ftp);
+ssize_t hmcdrv_ftp_cmd(char __kernel *cmd, loff_t offset,
+ char __user *buf, size_t len);
+
+#endif /* __HMCDRV_FTP_H__ */
diff --git a/drivers/s390/char/hmcdrv_mod.c b/drivers/s390/char/hmcdrv_mod.c
new file mode 100644
index 000000000000..505c6a78ee1a
--- /dev/null
+++ b/drivers/s390/char/hmcdrv_mod.c
@@ -0,0 +1,64 @@
+/*
+ * HMC Drive DVD Module
+ *
+ * Copyright IBM Corp. 2013
+ * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/stat.h>
+
+#include "hmcdrv_ftp.h"
+#include "hmcdrv_dev.h"
+#include "hmcdrv_cache.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Copyright 2013 IBM Corporation");
+MODULE_DESCRIPTION("HMC drive DVD access");
+
+/*
+ * module parameter 'cachesize'
+ */
+static size_t hmcdrv_mod_cachesize = HMCDRV_CACHE_SIZE_DFLT;
+module_param_named(cachesize, hmcdrv_mod_cachesize, ulong, S_IRUGO);
+
+/**
+ * hmcdrv_mod_init() - module init function
+ */
+static int __init hmcdrv_mod_init(void)
+{
+ int rc = hmcdrv_ftp_probe(); /* perform w/o cache */
+
+ if (rc)
+ return rc;
+
+ rc = hmcdrv_cache_startup(hmcdrv_mod_cachesize);
+
+ if (rc)
+ return rc;
+
+ rc = hmcdrv_dev_init();
+
+ if (rc)
+ hmcdrv_cache_shutdown();
+
+ return rc;
+}
+
+/**
+ * hmcdrv_mod_exit() - module exit function
+ */
+static void __exit hmcdrv_mod_exit(void)
+{
+ hmcdrv_dev_exit();
+ hmcdrv_cache_shutdown();
+}
+
+module_init(hmcdrv_mod_init);
+module_exit(hmcdrv_mod_exit);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index a68b5ec7d042..a88069f8c677 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -19,6 +19,7 @@
#define EVTYP_OPCMD 0x01
#define EVTYP_MSG 0x02
+#define EVTYP_DIAG_TEST 0x07
#define EVTYP_STATECHANGE 0x08
#define EVTYP_PMSGCMD 0x09
#define EVTYP_CNTLPROGOPCMD 0x20
@@ -32,6 +33,7 @@
#define EVTYP_OPCMD_MASK 0x80000000
#define EVTYP_MSG_MASK 0x40000000
+#define EVTYP_DIAG_TEST_MASK 0x02000000
#define EVTYP_STATECHANGE_MASK 0x01000000
#define EVTYP_PMSGCMD_MASK 0x00800000
#define EVTYP_CTLPROGOPCMD_MASK 0x00000001
diff --git a/drivers/s390/char/sclp_diag.h b/drivers/s390/char/sclp_diag.h
new file mode 100644
index 000000000000..59c4afa5e670
--- /dev/null
+++ b/drivers/s390/char/sclp_diag.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright IBM Corp. 2013
+ * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef _SCLP_DIAG_H
+#define _SCLP_DIAG_H
+
+#include <linux/types.h>
+
+/* return codes for Diagnostic Test FTP Service, as indicated in member
+ * sclp_diag_ftp::ldflg
+ */
+#define SCLP_DIAG_FTP_OK 0x80U /* success */
+#define SCLP_DIAG_FTP_LDFAIL 0x01U /* load failed */
+#define SCLP_DIAG_FTP_LDNPERM 0x02U /* not allowed */
+#define SCLP_DIAG_FTP_LDRUNS 0x03U /* LD runs */
+#define SCLP_DIAG_FTP_LDNRUNS 0x04U /* LD does not run */
+
+#define SCLP_DIAG_FTP_XPCX 0x80 /* PCX communication code */
+#define SCLP_DIAG_FTP_ROUTE 4 /* routing code for new FTP service */
+
+/*
+ * length of Diagnostic Test FTP Service event buffer
+ */
+#define SCLP_DIAG_FTP_EVBUF_LEN \
+ (offsetof(struct sclp_diag_evbuf, mdd) + \
+ sizeof(struct sclp_diag_ftp))
+
+/**
+ * struct sclp_diag_ftp - Diagnostic Test FTP Service model-dependent data
+ * @pcx: code for PCX communication (should be 0x80)
+ * @ldflg: load flag (see defines above)
+ * @cmd: FTP command
+ * @pgsize: page size (0 = 4kB, 1 = large page size)
+ * @srcflg: source flag
+ * @spare: reserved (zeroes)
+ * @offset: file offset
+ * @fsize: file size
+ * @length: buffer size resp. bytes transferred
+ * @failaddr: failing address
+ * @bufaddr: buffer address, virtual
+ * @asce: region or segment table designation
+ * @fident: file name (ASCII, zero-terminated)
+ */
+struct sclp_diag_ftp {
+ u8 pcx;
+ u8 ldflg;
+ u8 cmd;
+ u8 pgsize;
+ u8 srcflg;
+ u8 spare;
+ u64 offset;
+ u64 fsize;
+ u64 length;
+ u64 failaddr;
+ u64 bufaddr;
+ u64 asce;
+
+ u8 fident[256];
+} __packed;
+
+/**
+ * struct sclp_diag_evbuf - Diagnostic Test (ET7) Event Buffer
+ * @hdr: event buffer header
+ * @route: diagnostic route
+ * @mdd: model-dependent data (@route dependent)
+ */
+struct sclp_diag_evbuf {
+ struct evbuf_header hdr;
+ u16 route;
+
+ union {
+ struct sclp_diag_ftp ftp;
+ } mdd;
+} __packed;
+
+/**
+ * struct sclp_diag_sccb - Diagnostic Test (ET7) SCCB
+ * @hdr: SCCB header
+ * @evbuf: event buffer
+ */
+struct sclp_diag_sccb {
+
+ struct sccb_header hdr;
+ struct sclp_diag_evbuf evbuf;
+} __packed;
+
+#endif /* _SCLP_DIAG_H */
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 1918d9dff45d..5bd6cb145a87 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -281,7 +281,7 @@ out:
static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb)
{
- if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
+ if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK))
return 0;
if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
return 0;
diff --git a/drivers/s390/char/sclp_ftp.c b/drivers/s390/char/sclp_ftp.c
new file mode 100644
index 000000000000..6561cc5b2d5d
--- /dev/null
+++ b/drivers/s390/char/sclp_ftp.c
@@ -0,0 +1,275 @@
+/*
+ * SCLP Event Type (ET) 7 - Diagnostic Test FTP Services, useable on LPAR
+ *
+ * Copyright IBM Corp. 2013
+ * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ *
+ */
+
+#define KMSG_COMPONENT "hmcdrv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <asm/sysinfo.h>
+#include <asm/ebcdic.h>
+
+#include "sclp.h"
+#include "sclp_diag.h"
+#include "sclp_ftp.h"
+
+static DECLARE_COMPLETION(sclp_ftp_rx_complete);
+static u8 sclp_ftp_ldflg;
+static u64 sclp_ftp_fsize;
+static u64 sclp_ftp_length;
+
+/**
+ * sclp_ftp_txcb() - Diagnostic Test FTP services SCLP command callback
+ */
+static void sclp_ftp_txcb(struct sclp_req *req, void *data)
+{
+ struct completion *completion = data;
+
+#ifdef DEBUG
+ pr_debug("SCLP (ET7) TX-IRQ, SCCB @ 0x%p: %*phN\n",
+ req->sccb, 24, req->sccb);
+#endif
+ complete(completion);
+}
+
+/**
+ * sclp_ftp_rxcb() - Diagnostic Test FTP services receiver event callback
+ */
+static void sclp_ftp_rxcb(struct evbuf_header *evbuf)
+{
+ struct sclp_diag_evbuf *diag = (struct sclp_diag_evbuf *) evbuf;
+
+ /*
+ * Check for Diagnostic Test FTP Service
+ */
+ if (evbuf->type != EVTYP_DIAG_TEST ||
+ diag->route != SCLP_DIAG_FTP_ROUTE ||
+ diag->mdd.ftp.pcx != SCLP_DIAG_FTP_XPCX ||
+ evbuf->length < SCLP_DIAG_FTP_EVBUF_LEN)
+ return;
+
+#ifdef DEBUG
+ pr_debug("SCLP (ET7) RX-IRQ, Event @ 0x%p: %*phN\n",
+ evbuf, 24, evbuf);
+#endif
+
+ /*
+ * Because the event buffer is located in a page which is owned
+ * by the SCLP core, all data of interest must be copied. The
+ * error indication is in 'sclp_ftp_ldflg'
+ */
+ sclp_ftp_ldflg = diag->mdd.ftp.ldflg;
+ sclp_ftp_fsize = diag->mdd.ftp.fsize;
+ sclp_ftp_length = diag->mdd.ftp.length;
+
+ complete(&sclp_ftp_rx_complete);
+}
+
+/**
+ * sclp_ftp_et7() - start a Diagnostic Test FTP Service SCLP request
+ * @ftp: pointer to FTP descriptor
+ *
+ * Return: 0 on success, else a (negative) error code
+ */
+static int sclp_ftp_et7(const struct hmcdrv_ftp_cmdspec *ftp)
+{
+ struct completion completion;
+ struct sclp_diag_sccb *sccb;
+ struct sclp_req *req;
+ size_t len;
+ int rc;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!req || !sccb) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
+
+ sccb->hdr.length = SCLP_DIAG_FTP_EVBUF_LEN +
+ sizeof(struct sccb_header);
+ sccb->evbuf.hdr.type = EVTYP_DIAG_TEST;
+ sccb->evbuf.hdr.length = SCLP_DIAG_FTP_EVBUF_LEN;
+ sccb->evbuf.hdr.flags = 0; /* clear processed-buffer */
+ sccb->evbuf.route = SCLP_DIAG_FTP_ROUTE;
+ sccb->evbuf.mdd.ftp.pcx = SCLP_DIAG_FTP_XPCX;
+ sccb->evbuf.mdd.ftp.srcflg = 0;
+ sccb->evbuf.mdd.ftp.pgsize = 0;
+ sccb->evbuf.mdd.ftp.asce = _ASCE_REAL_SPACE;
+ sccb->evbuf.mdd.ftp.ldflg = SCLP_DIAG_FTP_LDFAIL;
+ sccb->evbuf.mdd.ftp.fsize = 0;
+ sccb->evbuf.mdd.ftp.cmd = ftp->id;
+ sccb->evbuf.mdd.ftp.offset = ftp->ofs;
+ sccb->evbuf.mdd.ftp.length = ftp->len;
+ sccb->evbuf.mdd.ftp.bufaddr = virt_to_phys(ftp->buf);
+
+ len = strlcpy(sccb->evbuf.mdd.ftp.fident, ftp->fname,
+ HMCDRV_FTP_FIDENT_MAX);
+ if (len >= HMCDRV_FTP_FIDENT_MAX) {
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ req->command = SCLP_CMDW_WRITE_EVENT_DATA;
+ req->sccb = sccb;
+ req->status = SCLP_REQ_FILLED;
+ req->callback = sclp_ftp_txcb;
+ req->callback_data = &completion;
+
+ init_completion(&completion);
+
+ rc = sclp_add_request(req);
+ if (rc)
+ goto out_free;
+
+ /* Wait for end of ftp sclp command. */
+ wait_for_completion(&completion);
+
+#ifdef DEBUG
+ pr_debug("status of SCLP (ET7) request is 0x%04x (0x%02x)\n",
+ sccb->hdr.response_code, sccb->evbuf.hdr.flags);
+#endif
+
+ /*
+ * Check if sclp accepted the request. The data transfer runs
+ * asynchronously and the completion is indicated with an
+ * sclp ET7 event.
+ */
+ if (req->status != SCLP_REQ_DONE ||
+ (sccb->evbuf.hdr.flags & 0x80) == 0 || /* processed-buffer */
+ (sccb->hdr.response_code & 0xffU) != 0x20U) {
+ rc = -EIO;
+ }
+
+out_free:
+ free_page((unsigned long) sccb);
+ kfree(req);
+ return rc;
+}
+
+/**
+ * sclp_ftp_cmd() - executes a HMC related SCLP Diagnose (ET7) FTP command
+ * @ftp: pointer to FTP command specification
+ * @fsize: return of file size (or NULL if undesirable)
+ *
+ * Attention: Notice that this function is not reentrant - so the caller
+ * must ensure locking.
+ *
+ * Return: number of bytes read/written or a (negative) error code
+ */
+ssize_t sclp_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
+{
+ ssize_t len;
+#ifdef DEBUG
+ unsigned long start_jiffies;
+
+ pr_debug("starting SCLP (ET7), cmd %d for '%s' at %lld with %zd bytes\n",
+ ftp->id, ftp->fname, (long long) ftp->ofs, ftp->len);
+ start_jiffies = jiffies;
+#endif
+
+ init_completion(&sclp_ftp_rx_complete);
+
+ /* Start ftp sclp command. */
+ len = sclp_ftp_et7(ftp);
+ if (len)
+ goto out_unlock;
+
+ /*
+ * There is no way to cancel the sclp ET7 request, the code
+ * needs to wait unconditionally until the transfer is complete.
+ */
+ wait_for_completion(&sclp_ftp_rx_complete);
+
+#ifdef DEBUG
+ pr_debug("completed SCLP (ET7) request after %lu ms (all)\n",
+ (jiffies - start_jiffies) * 1000 / HZ);
+ pr_debug("return code of SCLP (ET7) FTP Service is 0x%02x, with %lld/%lld bytes\n",
+ sclp_ftp_ldflg, sclp_ftp_length, sclp_ftp_fsize);
+#endif
+
+ switch (sclp_ftp_ldflg) {
+ case SCLP_DIAG_FTP_OK:
+ len = sclp_ftp_length;
+ if (fsize)
+ *fsize = sclp_ftp_fsize;
+ break;
+ case SCLP_DIAG_FTP_LDNPERM:
+ len = -EPERM;
+ break;
+ case SCLP_DIAG_FTP_LDRUNS:
+ len = -EBUSY;
+ break;
+ case SCLP_DIAG_FTP_LDFAIL:
+ len = -ENOENT;
+ break;
+ default:
+ len = -EIO;
+ break;
+ }
+
+out_unlock:
+ return len;
+}
+
+/*
+ * ET7 event listener
+ */
+static struct sclp_register sclp_ftp_event = {
+ .send_mask = EVTYP_DIAG_TEST_MASK, /* want tx events */
+ .receive_mask = EVTYP_DIAG_TEST_MASK, /* want rx events */
+ .receiver_fn = sclp_ftp_rxcb, /* async callback (rx) */
+ .state_change_fn = NULL,
+ .pm_event_fn = NULL,
+};
+
+/**
+ * sclp_ftp_startup() - startup of FTP services, when running on LPAR
+ */
+int sclp_ftp_startup(void)
+{
+#ifdef DEBUG
+ unsigned long info;
+#endif
+ int rc;
+
+ rc = sclp_register(&sclp_ftp_event);
+ if (rc)
+ return rc;
+
+#ifdef DEBUG
+ info = get_zeroed_page(GFP_KERNEL);
+
+ if (info != 0) {
+ struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
+
+ if (!stsi(info222, 2, 2, 2)) { /* get SYSIB 2.2.2 */
+ info222->name[sizeof(info222->name) - 1] = '\0';
+ EBCASC_500(info222->name, sizeof(info222->name) - 1);
+ pr_debug("SCLP (ET7) FTP Service working on LPAR %u (%s)\n",
+ info222->lpar_number, info222->name);
+ }
+
+ free_page(info);
+ }
+#endif /* DEBUG */
+ return 0;
+}
+
+/**
+ * sclp_ftp_shutdown() - shutdown of FTP services, when running on LPAR
+ */
+void sclp_ftp_shutdown(void)
+{
+ sclp_unregister(&sclp_ftp_event);
+}
diff --git a/drivers/s390/char/sclp_ftp.h b/drivers/s390/char/sclp_ftp.h
new file mode 100644
index 000000000000..98ba3183e7d9
--- /dev/null
+++ b/drivers/s390/char/sclp_ftp.h
@@ -0,0 +1,21 @@
+/*
+ * SCLP Event Type (ET) 7 - Diagnostic Test FTP Services, useable on LPAR
+ *
+ * Notice that all functions exported here are not reentrant.
+ * So usage should be exclusive, ensured by the caller (e.g. using a
+ * mutex).
+ *
+ * Copyright IBM Corp. 2013
+ * Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
+ */
+
+#ifndef __SCLP_FTP_H__
+#define __SCLP_FTP_H__
+
+#include "hmcdrv_ftp.h"
+
+int sclp_ftp_startup(void);
+void sclp_ftp_shutdown(void);
+ssize_t sclp_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize);
+
+#endif /* __SCLP_FTP_H__ */
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 3b13d58fe87b..35a84af875ee 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -33,7 +33,7 @@ static void sclp_rw_pm_event(struct sclp_register *reg,
/* Event type structure for write message and write priority message */
static struct sclp_register sclp_rw_event = {
- .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK,
+ .send_mask = EVTYP_MSG_MASK,
.pm_event_fn = sclp_rw_pm_event,
};
@@ -456,14 +456,9 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
return -EIO;
sccb = buffer->sccb;
- if (sclp_rw_event.sclp_receive_mask & EVTYP_MSG_MASK)
- /* Use normal write message */
- sccb->msg_buf.header.type = EVTYP_MSG;
- else if (sclp_rw_event.sclp_receive_mask & EVTYP_PMSGCMD_MASK)
- /* Use write priority message */
- sccb->msg_buf.header.type = EVTYP_PMSGCMD;
- else
- return -EOPNOTSUPP;
+ /* Use normal write message */
+ sccb->msg_buf.header.type = EVTYP_MSG;
+
buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
buffer->request.status = SCLP_REQ_FILLED;
buffer->request.callback = sclp_writedata_callback;
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index b9a9f721716d..ae67386c03d3 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -206,10 +206,6 @@ sclp_vt220_callback(struct sclp_req *request, void *data)
static int
__sclp_vt220_emit(struct sclp_vt220_request *request)
{
- if (!(sclp_vt220_register.sclp_receive_mask & EVTYP_VT220MSG_MASK)) {
- request->sclp_req.status = SCLP_REQ_FAILED;
- return -EIO;
- }
request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
request->sclp_req.status = SCLP_REQ_FILLED;
request->sclp_req.callback = sclp_vt220_callback;
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 6dc60725de92..77f9b9c2f701 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -402,7 +402,9 @@ __tapechar_ioctl(struct tape_device *device,
memset(&get, 0, sizeof(get));
get.mt_type = MT_ISUNKNOWN;
get.mt_resid = 0 /* device->devstat.rescnt */;
- get.mt_dsreg = device->tape_state;
+ get.mt_dsreg =
+ ((device->char_data.block_size << MT_ST_BLKSIZE_SHIFT)
+ & MT_ST_BLKSIZE_MASK);
/* FIXME: mt_gstat, mt_erreg, mt_fileno */
get.mt_gstat = 0;
get.mt_erreg = 0;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index a8848db7b09d..9bb48d70957c 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -338,7 +338,6 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
/* set the file options */
filp->private_data = logptr;
- filp->f_op = &vmlogrdr_fops;
/* start recording for this service*/
if (logptr->autorecording) {
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 1884653e4472..efcf48481c5f 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -28,6 +28,7 @@
#include <asm/processor.h>
#include <asm/irqflags.h>
#include <asm/checksum.h>
+#include <asm/switch_to.h>
#include "sclp.h"
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
@@ -149,18 +150,21 @@ static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
static int __init init_cpu_info(enum arch_id arch)
{
- struct save_area *sa;
+ struct save_area_ext *sa_ext;
/* get info for boot cpu from lowcore, stored in the HSA */
- sa = dump_save_area_create(0);
- if (!sa)
+ sa_ext = dump_save_area_create(0);
+ if (!sa_ext)
return -ENOMEM;
- if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
+ if (memcpy_hsa_kernel(&sa_ext->sa, sys_info.sa_base,
+ sys_info.sa_size) < 0) {
TRACE("could not copy from HSA\n");
- kfree(sa);
+ kfree(sa_ext);
return -EIO;
}
+ if (MACHINE_HAS_VX)
+ save_vx_regs_safe(sa_ext->vx_regs);
return 0;
}
@@ -258,7 +262,7 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
unsigned long sa_start, sa_end; /* save area range */
unsigned long prefix;
unsigned long sa_off, len, buf_off;
- struct save_area *save_area = dump_save_areas.areas[i];
+ struct save_area *save_area = &dump_save_areas.areas[i]->sa;
prefix = save_area->pref_reg;
sa_start = prefix + sys_info.sa_base;
@@ -612,7 +616,7 @@ static void __init zcore_header_init(int arch, struct zcore_header *hdr,
hdr->tod = get_tod_clock();
get_cpu_id(&hdr->cpu_id);
for (i = 0; i < dump_save_areas.count; i++) {
- prefix = dump_save_areas.areas[i]->pref_reg;
+ prefix = dump_save_areas.areas[i]->sa.pref_reg;
hdr->real_cpu_cnt++;
if (!prefix)
continue;
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 00bfbee0af9e..56eb4ee4deba 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -87,7 +87,7 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy)
struct airq_struct *airq;
struct hlist_head *head;
- __this_cpu_write(s390_idle.nohz_delay, 1);
+ set_cpu_flag(CIF_NOHZ_DELAY);
tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
head = &airq_lists[tpi_info->isc];
rcu_read_lock();
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 2905d8b0ec95..d5a6f287d2fe 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -561,7 +561,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
struct subchannel *sch;
struct irb *irb;
- __this_cpu_write(s390_idle.nohz_delay, 1);
+ set_cpu_flag(CIF_NOHZ_DELAY);
tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
irb = &__get_cpu_var(cio_irb);
sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 4038437ff033..99485415dcc2 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -664,6 +664,17 @@ static ssize_t ap_hwtype_show(struct device *dev,
}
static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
+
+static ssize_t ap_raw_hwtype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->raw_hwtype);
+}
+
+static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
+
static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -734,6 +745,7 @@ static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
static struct attribute *ap_dev_attrs[] = {
&dev_attr_hwtype.attr,
+ &dev_attr_raw_hwtype.attr,
&dev_attr_depth.attr,
&dev_attr_request_count.attr,
&dev_attr_requestq_count.attr,
@@ -1188,6 +1200,10 @@ static int ap_select_domain(void)
ap_qid_t qid;
int rc, i, j;
+ /* IF APXA isn't installed, only 16 domains could be defined */
+ if (!ap_configuration->ap_extended && (ap_domain_index > 15))
+ return -EINVAL;
+
/*
* We want to use a single domain. Either the one specified with
* the "domain=" parameter or the domain with the maximum number
@@ -1413,9 +1429,13 @@ static void ap_scan_bus(struct work_struct *unused)
continue;
}
break;
+ case 11:
+ ap_dev->device_type = 10;
+ break;
default:
ap_dev->device_type = device_type;
}
+ ap_dev->raw_hwtype = device_type;
rc = ap_query_functions(qid, &device_functions);
if (!rc)
@@ -1900,9 +1920,15 @@ static void ap_reset_all(void)
{
int i, j;
- for (i = 0; i < AP_DOMAINS; i++)
- for (j = 0; j < AP_DEVICES; j++)
+ for (i = 0; i < AP_DOMAINS; i++) {
+ if (!ap_test_config_domain(i))
+ continue;
+ for (j = 0; j < AP_DEVICES; j++) {
+ if (!ap_test_config_card_id(j))
+ continue;
ap_reset_queue(AP_MKQID(j, i));
+ }
+ }
}
static struct reset_call ap_reset_call = {
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 6405ae24a7a6..055a0f956d17 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -31,7 +31,7 @@
#include <linux/types.h>
#define AP_DEVICES 64 /* Number of AP devices. */
-#define AP_DOMAINS 16 /* Number of AP domains. */
+#define AP_DOMAINS 256 /* Number of AP domains. */
#define AP_MAX_RESET 90 /* Maximum number of resets. */
#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */
#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
@@ -45,9 +45,9 @@ extern int ap_domain_index;
*/
typedef unsigned int ap_qid_t;
-#define AP_MKQID(_device,_queue) (((_device) & 63) << 8 | ((_queue) & 15))
+#define AP_MKQID(_device, _queue) (((_device) & 63) << 8 | ((_queue) & 255))
#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63)
-#define AP_QID_QUEUE(_qid) ((_qid) & 15)
+#define AP_QID_QUEUE(_qid) ((_qid) & 255)
/**
* structy ap_queue_status - Holds the AP queue status.
@@ -161,6 +161,7 @@ struct ap_device {
ap_qid_t qid; /* AP queue id. */
int queue_depth; /* AP queue depth.*/
int device_type; /* AP device type. */
+ int raw_hwtype; /* AP raw hardware type. */
unsigned int functions; /* AP device function bitfield. */
int unregistered; /* marks AP device as unregistered */
struct timer_list timeout; /* Timer for request timeouts. */
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 0e18c5dcd91f..08f1830cbfc4 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -343,10 +343,11 @@ struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
break;
}
}
+ if (!found || !try_module_get(zops->owner))
+ zops = NULL;
+
spin_unlock_bh(&zcrypt_ops_list_lock);
- if (!found)
- return NULL;
return zops;
}
@@ -359,8 +360,6 @@ struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
request_module("%s", name);
zops = __ops_lookup(name, variant);
}
- if ((!zops) || (!try_module_get(zops->owner)))
- return NULL;
return zops;
}
EXPORT_SYMBOL(zcrypt_msgtype_request);
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index fbc6701bef30..213e54ee8a66 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -481,7 +481,6 @@ claw_open(struct net_device *dev)
spin_unlock_irqrestore(
get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
schedule();
- set_current_state(TASK_RUNNING);
remove_wait_queue(&privptr->channel[i].wait, &wait);
if(rc != 0)
ccw_check_return_code(privptr->channel[i].cdev, rc);
@@ -828,7 +827,6 @@ claw_release(struct net_device *dev)
spin_unlock_irqrestore(
get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
schedule();
- set_current_state(TASK_RUNNING);
remove_wait_queue(&privptr->channel[i].wait, &wait);
if (rc != 0) {
ccw_check_return_code(privptr->channel[i].cdev, rc);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index e85e64a07d02..296619b7426c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -587,6 +587,16 @@ config VMWARE_PVSCSI
To compile this driver as a module, choose M here: the
module will be called vmw_pvscsi.
+config XEN_SCSI_FRONTEND
+ tristate "XEN SCSI frontend driver"
+ depends on SCSI && XEN
+ select XEN_XENBUS_FRONTEND
+ help
+ The XEN SCSI frontend driver allows the kernel to access SCSI Devices
+ within another guest OS (usually Dom0).
+ Only needed if the kernel is running in a XEN guest and generic
+ SCSI access to a device is needed.
+
config HYPERV_STORAGE
tristate "Microsoft Hyper-V virtual storage driver"
depends on SCSI && HYPERV
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 5f0d299b0093..59f1ce6df2d6 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -141,6 +141,7 @@ obj-$(CONFIG_SCSI_ESAS2R) += esas2r/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o
obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
+obj-$(CONFIG_XEN_SCSI_FRONTEND) += xen-scsifront.o
obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 00ee0ed642aa..4a8ac7d8c76b 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1884,7 +1884,6 @@ retry:
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_bh(&p->fcoe_rx_list.lock);
schedule();
- set_current_state(TASK_RUNNING);
goto retry;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index dabd25429c58..db3dbd999cb6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4875,7 +4875,6 @@ qla2x00_do_dpc(void *data)
"DPC handler sleeping.\n");
schedule();
- __set_current_state(TASK_RUNNING);
if (!base_vha->flags.init_done || ha->flags.mbox_busy)
goto end_loop;
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
new file mode 100644
index 000000000000..34199d206ba6
--- /dev/null
+++ b/drivers/scsi/xen-scsifront.c
@@ -0,0 +1,1026 @@
+/*
+ * Xen SCSI frontend driver
+ *
+ * Copyright (c) 2008, FUJITSU Limited
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/blkdev.h>
+#include <linux/pfn.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/vscsiif.h>
+#include <xen/interface/io/protocols.h>
+
+#include <asm/xen/hypervisor.h>
+
+
+#define GRANT_INVALID_REF 0
+
+#define VSCSIFRONT_OP_ADD_LUN 1
+#define VSCSIFRONT_OP_DEL_LUN 2
+
+/* Tuning point. */
+#define VSCSIIF_DEFAULT_CMD_PER_LUN 10
+#define VSCSIIF_MAX_TARGET 64
+#define VSCSIIF_MAX_LUN 255
+
+#define VSCSIIF_RING_SIZE __CONST_RING_SIZE(vscsiif, PAGE_SIZE)
+#define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE
+
+#define vscsiif_grants_sg(_sg) (PFN_UP((_sg) * \
+ sizeof(struct scsiif_request_segment)))
+
+struct vscsifrnt_shadow {
+ /* command between backend and frontend */
+ unsigned char act;
+ uint16_t rqid;
+
+ unsigned int nr_grants; /* number of grants in gref[] */
+ struct scsiif_request_segment *sg; /* scatter/gather elements */
+
+ /* Do reset or abort function. */
+ wait_queue_head_t wq_reset; /* reset work queue */
+ int wait_reset; /* reset work queue condition */
+ int32_t rslt_reset; /* reset response status: */
+ /* SUCCESS or FAILED or: */
+#define RSLT_RESET_WAITING 0
+#define RSLT_RESET_ERR -1
+
+ /* Requested struct scsi_cmnd is stored from kernel. */
+ struct scsi_cmnd *sc;
+ int gref[vscsiif_grants_sg(SG_ALL) + SG_ALL];
+};
+
+struct vscsifrnt_info {
+ struct xenbus_device *dev;
+
+ struct Scsi_Host *host;
+ int host_active;
+
+ unsigned int evtchn;
+ unsigned int irq;
+
+ grant_ref_t ring_ref;
+ struct vscsiif_front_ring ring;
+ struct vscsiif_response ring_rsp;
+
+ spinlock_t shadow_lock;
+ DECLARE_BITMAP(shadow_free_bitmap, VSCSIIF_MAX_REQS);
+ struct vscsifrnt_shadow *shadow[VSCSIIF_MAX_REQS];
+
+ wait_queue_head_t wq_sync;
+ unsigned int wait_ring_available:1;
+
+ char dev_state_path[64];
+ struct task_struct *curr;
+};
+
+static DEFINE_MUTEX(scsifront_mutex);
+
+static void scsifront_wake_up(struct vscsifrnt_info *info)
+{
+ info->wait_ring_available = 0;
+ wake_up(&info->wq_sync);
+}
+
+static int scsifront_get_rqid(struct vscsifrnt_info *info)
+{
+ unsigned long flags;
+ int free;
+
+ spin_lock_irqsave(&info->shadow_lock, flags);
+
+ free = find_first_bit(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
+ __clear_bit(free, info->shadow_free_bitmap);
+
+ spin_unlock_irqrestore(&info->shadow_lock, flags);
+
+ return free;
+}
+
+static int _scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
+{
+ int empty = bitmap_empty(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
+
+ __set_bit(id, info->shadow_free_bitmap);
+ info->shadow[id] = NULL;
+
+ return empty || info->wait_ring_available;
+}
+
+static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
+{
+ unsigned long flags;
+ int kick;
+
+ spin_lock_irqsave(&info->shadow_lock, flags);
+ kick = _scsifront_put_rqid(info, id);
+ spin_unlock_irqrestore(&info->shadow_lock, flags);
+
+ if (kick)
+ scsifront_wake_up(info);
+}
+
+static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info)
+{
+ struct vscsiif_front_ring *ring = &(info->ring);
+ struct vscsiif_request *ring_req;
+ uint32_t id;
+
+ id = scsifront_get_rqid(info); /* use id in response */
+ if (id >= VSCSIIF_MAX_REQS)
+ return NULL;
+
+ ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
+
+ ring->req_prod_pvt++;
+
+ ring_req->rqid = (uint16_t)id;
+
+ return ring_req;
+}
+
+static void scsifront_do_request(struct vscsifrnt_info *info)
+{
+ struct vscsiif_front_ring *ring = &(info->ring);
+ int notify;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
+ if (notify)
+ notify_remote_via_irq(info->irq);
+}
+
+static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id)
+{
+ struct vscsifrnt_shadow *s = info->shadow[id];
+ int i;
+
+ if (s->sc->sc_data_direction == DMA_NONE)
+ return;
+
+ for (i = 0; i < s->nr_grants; i++) {
+ if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) {
+ shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
+ "grant still in use by backend\n");
+ BUG();
+ }
+ gnttab_end_foreign_access(s->gref[i], 0, 0UL);
+ }
+
+ kfree(s->sg);
+}
+
+static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
+ struct vscsiif_response *ring_rsp)
+{
+ struct scsi_cmnd *sc;
+ uint32_t id;
+ uint8_t sense_len;
+
+ id = ring_rsp->rqid;
+ sc = info->shadow[id]->sc;
+
+ BUG_ON(sc == NULL);
+
+ scsifront_gnttab_done(info, id);
+ scsifront_put_rqid(info, id);
+
+ sc->result = ring_rsp->rslt;
+ scsi_set_resid(sc, ring_rsp->residual_len);
+
+ sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE,
+ ring_rsp->sense_len);
+
+ if (sense_len)
+ memcpy(sc->sense_buffer, ring_rsp->sense_buffer, sense_len);
+
+ sc->scsi_done(sc);
+}
+
+static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
+ struct vscsiif_response *ring_rsp)
+{
+ uint16_t id = ring_rsp->rqid;
+ unsigned long flags;
+ struct vscsifrnt_shadow *shadow = info->shadow[id];
+ int kick;
+
+ spin_lock_irqsave(&info->shadow_lock, flags);
+ shadow->wait_reset = 1;
+ switch (shadow->rslt_reset) {
+ case RSLT_RESET_WAITING:
+ shadow->rslt_reset = ring_rsp->rslt;
+ break;
+ case RSLT_RESET_ERR:
+ kick = _scsifront_put_rqid(info, id);
+ spin_unlock_irqrestore(&info->shadow_lock, flags);
+ kfree(shadow);
+ if (kick)
+ scsifront_wake_up(info);
+ return;
+ default:
+ shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
+ "bad reset state %d, possibly leaking %u\n",
+ shadow->rslt_reset, id);
+ break;
+ }
+ spin_unlock_irqrestore(&info->shadow_lock, flags);
+
+ wake_up(&shadow->wq_reset);
+}
+
+static int scsifront_cmd_done(struct vscsifrnt_info *info)
+{
+ struct vscsiif_response *ring_rsp;
+ RING_IDX i, rp;
+ int more_to_do = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(info->host->host_lock, flags);
+
+ rp = info->ring.sring->rsp_prod;
+ rmb(); /* ordering required respective to dom0 */
+ for (i = info->ring.rsp_cons; i != rp; i++) {
+
+ ring_rsp = RING_GET_RESPONSE(&info->ring, i);
+
+ if (WARN(ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
+ test_bit(ring_rsp->rqid, info->shadow_free_bitmap),
+ "illegal rqid %u returned by backend!\n",
+ ring_rsp->rqid))
+ continue;
+
+ if (info->shadow[ring_rsp->rqid]->act == VSCSIIF_ACT_SCSI_CDB)
+ scsifront_cdb_cmd_done(info, ring_rsp);
+ else
+ scsifront_sync_cmd_done(info, ring_rsp);
+ }
+
+ info->ring.rsp_cons = i;
+
+ if (i != info->ring.req_prod_pvt)
+ RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
+ else
+ info->ring.sring->rsp_event = i + 1;
+
+ info->wait_ring_available = 0;
+
+ spin_unlock_irqrestore(info->host->host_lock, flags);
+
+ wake_up(&info->wq_sync);
+
+ return more_to_do;
+}
+
+static irqreturn_t scsifront_irq_fn(int irq, void *dev_id)
+{
+ struct vscsifrnt_info *info = dev_id;
+
+ while (scsifront_cmd_done(info))
+ /* Yield point for this unbounded loop. */
+ cond_resched();
+
+ return IRQ_HANDLED;
+}
+
+static int map_data_for_request(struct vscsifrnt_info *info,
+ struct scsi_cmnd *sc,
+ struct vscsiif_request *ring_req,
+ struct vscsifrnt_shadow *shadow)
+{
+ grant_ref_t gref_head;
+ struct page *page;
+ int err, ref, ref_cnt = 0;
+ int grant_ro = (sc->sc_data_direction == DMA_TO_DEVICE);
+ unsigned int i, off, len, bytes;
+ unsigned int data_len = scsi_bufflen(sc);
+ unsigned int data_grants = 0, seg_grants = 0;
+ struct scatterlist *sg;
+ unsigned long mfn;
+ struct scsiif_request_segment *seg;
+
+ ring_req->nr_segments = 0;
+ if (sc->sc_data_direction == DMA_NONE || !data_len)
+ return 0;
+
+ scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i)
+ data_grants += PFN_UP(sg->offset + sg->length);
+
+ if (data_grants > VSCSIIF_SG_TABLESIZE) {
+ if (data_grants > info->host->sg_tablesize) {
+ shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
+ "Unable to map request_buffer for command!\n");
+ return -E2BIG;
+ }
+ seg_grants = vscsiif_grants_sg(data_grants);
+ shadow->sg = kcalloc(data_grants,
+ sizeof(struct scsiif_request_segment), GFP_ATOMIC);
+ if (!shadow->sg)
+ return -ENOMEM;
+ }
+ seg = shadow->sg ? : ring_req->seg;
+
+ err = gnttab_alloc_grant_references(seg_grants + data_grants,
+ &gref_head);
+ if (err) {
+ kfree(shadow->sg);
+ shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
+ "gnttab_alloc_grant_references() error\n");
+ return -ENOMEM;
+ }
+
+ if (seg_grants) {
+ page = virt_to_page(seg);
+ off = (unsigned long)seg & ~PAGE_MASK;
+ len = sizeof(struct scsiif_request_segment) * data_grants;
+ while (len > 0) {
+ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+
+ ref = gnttab_claim_grant_reference(&gref_head);
+ BUG_ON(ref == -ENOSPC);
+
+ mfn = pfn_to_mfn(page_to_pfn(page));
+ gnttab_grant_foreign_access_ref(ref,
+ info->dev->otherend_id, mfn, 1);
+ shadow->gref[ref_cnt] = ref;
+ ring_req->seg[ref_cnt].gref = ref;
+ ring_req->seg[ref_cnt].offset = (uint16_t)off;
+ ring_req->seg[ref_cnt].length = (uint16_t)bytes;
+
+ page++;
+ len -= bytes;
+ off = 0;
+ ref_cnt++;
+ }
+ BUG_ON(seg_grants < ref_cnt);
+ seg_grants = ref_cnt;
+ }
+
+ scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i) {
+ page = sg_page(sg);
+ off = sg->offset;
+ len = sg->length;
+
+ while (len > 0 && data_len > 0) {
+ /*
+ * sg sends a scatterlist that is larger than
+ * the data_len it wants transferred for certain
+ * IO sizes.
+ */
+ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+ bytes = min(bytes, data_len);
+
+ ref = gnttab_claim_grant_reference(&gref_head);
+ BUG_ON(ref == -ENOSPC);
+
+ mfn = pfn_to_mfn(page_to_pfn(page));
+ gnttab_grant_foreign_access_ref(ref,
+ info->dev->otherend_id, mfn, grant_ro);
+
+ shadow->gref[ref_cnt] = ref;
+ seg->gref = ref;
+ seg->offset = (uint16_t)off;
+ seg->length = (uint16_t)bytes;
+
+ page++;
+ seg++;
+ len -= bytes;
+ data_len -= bytes;
+ off = 0;
+ ref_cnt++;
+ }
+ }
+
+ if (seg_grants)
+ ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
+ else
+ ring_req->nr_segments = (uint8_t)ref_cnt;
+ shadow->nr_grants = ref_cnt;
+
+ return 0;
+}
+
+static struct vscsiif_request *scsifront_command2ring(
+ struct vscsifrnt_info *info, struct scsi_cmnd *sc,
+ struct vscsifrnt_shadow *shadow)
+{
+ struct vscsiif_request *ring_req;
+
+ memset(shadow, 0, sizeof(*shadow));
+
+ ring_req = scsifront_pre_req(info);
+ if (!ring_req)
+ return NULL;
+
+ info->shadow[ring_req->rqid] = shadow;
+ shadow->rqid = ring_req->rqid;
+
+ ring_req->id = sc->device->id;
+ ring_req->lun = sc->device->lun;
+ ring_req->channel = sc->device->channel;
+ ring_req->cmd_len = sc->cmd_len;
+
+ BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
+
+ memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
+
+ ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
+ ring_req->timeout_per_command = sc->request->timeout / HZ;
+
+ return ring_req;
+}
+
+static int scsifront_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *sc)
+{
+ struct vscsifrnt_info *info = shost_priv(shost);
+ struct vscsiif_request *ring_req;
+ struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc);
+ unsigned long flags;
+ int err;
+ uint16_t rqid;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (RING_FULL(&info->ring))
+ goto busy;
+
+ ring_req = scsifront_command2ring(info, sc, shadow);
+ if (!ring_req)
+ goto busy;
+
+ sc->result = 0;
+
+ rqid = ring_req->rqid;
+ ring_req->act = VSCSIIF_ACT_SCSI_CDB;
+
+ shadow->sc = sc;
+ shadow->act = VSCSIIF_ACT_SCSI_CDB;
+
+ err = map_data_for_request(info, sc, ring_req, shadow);
+ if (err < 0) {
+ pr_debug("%s: err %d\n", __func__, err);
+ scsifront_put_rqid(info, rqid);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ if (err == -ENOMEM)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ sc->result = DID_ERROR << 16;
+ sc->scsi_done(sc);
+ return 0;
+ }
+
+ scsifront_do_request(info);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ return 0;
+
+busy:
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ pr_debug("%s: busy\n", __func__);
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+/*
+ * Any exception handling (reset or abort) must be forwarded to the backend.
+ * We have to wait until an answer is returned. This answer contains the
+ * result to be returned to the requestor.
+ */
+static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
+{
+ struct Scsi_Host *host = sc->device->host;
+ struct vscsifrnt_info *info = shost_priv(host);
+ struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
+ struct vscsiif_request *ring_req;
+ int err = 0;
+
+ shadow = kmalloc(sizeof(*shadow), GFP_NOIO);
+ if (!shadow)
+ return FAILED;
+
+ spin_lock_irq(host->host_lock);
+
+ for (;;) {
+ if (!RING_FULL(&info->ring)) {
+ ring_req = scsifront_command2ring(info, sc, shadow);
+ if (ring_req)
+ break;
+ }
+ if (err) {
+ spin_unlock_irq(host->host_lock);
+ kfree(shadow);
+ return FAILED;
+ }
+ info->wait_ring_available = 1;
+ spin_unlock_irq(host->host_lock);
+ err = wait_event_interruptible(info->wq_sync,
+ !info->wait_ring_available);
+ spin_lock_irq(host->host_lock);
+ }
+
+ ring_req->act = act;
+ ring_req->ref_rqid = s->rqid;
+
+ shadow->act = act;
+ shadow->rslt_reset = RSLT_RESET_WAITING;
+ init_waitqueue_head(&shadow->wq_reset);
+
+ ring_req->nr_segments = 0;
+
+ scsifront_do_request(info);
+
+ spin_unlock_irq(host->host_lock);
+ err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset);
+ spin_lock_irq(host->host_lock);
+
+ if (!err) {
+ err = shadow->rslt_reset;
+ scsifront_put_rqid(info, shadow->rqid);
+ kfree(shadow);
+ } else {
+ spin_lock(&info->shadow_lock);
+ shadow->rslt_reset = RSLT_RESET_ERR;
+ spin_unlock(&info->shadow_lock);
+ err = FAILED;
+ }
+
+ spin_unlock_irq(host->host_lock);
+ return err;
+}
+
+static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
+{
+ pr_debug("%s\n", __func__);
+ return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_ABORT);
+}
+
+static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
+{
+ pr_debug("%s\n", __func__);
+ return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_RESET);
+}
+
+static int scsifront_sdev_configure(struct scsi_device *sdev)
+{
+ struct vscsifrnt_info *info = shost_priv(sdev->host);
+
+ if (info && current == info->curr)
+ xenbus_printf(XBT_NIL, info->dev->nodename,
+ info->dev_state_path, "%d", XenbusStateConnected);
+
+ return 0;
+}
+
+static void scsifront_sdev_destroy(struct scsi_device *sdev)
+{
+ struct vscsifrnt_info *info = shost_priv(sdev->host);
+
+ if (info && current == info->curr)
+ xenbus_printf(XBT_NIL, info->dev->nodename,
+ info->dev_state_path, "%d", XenbusStateClosed);
+}
+
+static struct scsi_host_template scsifront_sht = {
+ .module = THIS_MODULE,
+ .name = "Xen SCSI frontend driver",
+ .queuecommand = scsifront_queuecommand,
+ .eh_abort_handler = scsifront_eh_abort_handler,
+ .eh_device_reset_handler = scsifront_dev_reset_handler,
+ .slave_configure = scsifront_sdev_configure,
+ .slave_destroy = scsifront_sdev_destroy,
+ .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN,
+ .can_queue = VSCSIIF_MAX_REQS,
+ .this_id = -1,
+ .cmd_size = sizeof(struct vscsifrnt_shadow),
+ .sg_tablesize = VSCSIIF_SG_TABLESIZE,
+ .use_clustering = DISABLE_CLUSTERING,
+ .proc_name = "scsifront",
+};
+
+static int scsifront_alloc_ring(struct vscsifrnt_info *info)
+{
+ struct xenbus_device *dev = info->dev;
+ struct vscsiif_sring *sring;
+ int err = -ENOMEM;
+
+ /***** Frontend to Backend ring start *****/
+ sring = (struct vscsiif_sring *)__get_free_page(GFP_KERNEL);
+ if (!sring) {
+ xenbus_dev_fatal(dev, err,
+ "fail to allocate shared ring (Front to Back)");
+ return err;
+ }
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, virt_to_mfn(sring));
+ if (err < 0) {
+ free_page((unsigned long)sring);
+ xenbus_dev_fatal(dev, err,
+ "fail to grant shared ring (Front to Back)");
+ return err;
+ }
+ info->ring_ref = err;
+
+ err = xenbus_alloc_evtchn(dev, &info->evtchn);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "xenbus_alloc_evtchn");
+ goto free_gnttab;
+ }
+
+ err = bind_evtchn_to_irq(info->evtchn);
+ if (err <= 0) {
+ xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq");
+ goto free_gnttab;
+ }
+
+ info->irq = err;
+
+ err = request_threaded_irq(info->irq, NULL, scsifront_irq_fn,
+ IRQF_ONESHOT, "scsifront", info);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "request_threaded_irq");
+ goto free_irq;
+ }
+
+ return 0;
+
+/* free resource */
+free_irq:
+ unbind_from_irqhandler(info->irq, info);
+free_gnttab:
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
+
+ return err;
+}
+
+static int scsifront_init_ring(struct vscsifrnt_info *info)
+{
+ struct xenbus_device *dev = info->dev;
+ struct xenbus_transaction xbt;
+ int err;
+
+ pr_debug("%s\n", __func__);
+
+ err = scsifront_alloc_ring(info);
+ if (err)
+ return err;
+ pr_debug("%s: %u %u\n", __func__, info->ring_ref, info->evtchn);
+
+again:
+ err = xenbus_transaction_start(&xbt);
+ if (err)
+ xenbus_dev_fatal(dev, err, "starting transaction");
+
+ err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u",
+ info->ring_ref);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "%s", "writing ring-ref");
+ goto fail;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
+ info->evtchn);
+
+ if (err) {
+ xenbus_dev_fatal(dev, err, "%s", "writing event-channel");
+ goto fail;
+ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ if (err) {
+ if (err == -EAGAIN)
+ goto again;
+ xenbus_dev_fatal(dev, err, "completing transaction");
+ goto free_sring;
+ }
+
+ return 0;
+
+fail:
+ xenbus_transaction_end(xbt, 1);
+free_sring:
+ unbind_from_irqhandler(info->irq, info);
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
+
+ return err;
+}
+
+
+static int scsifront_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ struct vscsifrnt_info *info;
+ struct Scsi_Host *host;
+ int err = -ENOMEM;
+ char name[TASK_COMM_LEN];
+
+ host = scsi_host_alloc(&scsifront_sht, sizeof(*info));
+ if (!host) {
+ xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
+ return err;
+ }
+ info = (struct vscsifrnt_info *)host->hostdata;
+
+ dev_set_drvdata(&dev->dev, info);
+ info->dev = dev;
+
+ bitmap_fill(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
+
+ err = scsifront_init_ring(info);
+ if (err) {
+ scsi_host_put(host);
+ return err;
+ }
+
+ init_waitqueue_head(&info->wq_sync);
+ spin_lock_init(&info->shadow_lock);
+
+ snprintf(name, TASK_COMM_LEN, "vscsiif.%d", host->host_no);
+
+ host->max_id = VSCSIIF_MAX_TARGET;
+ host->max_channel = 0;
+ host->max_lun = VSCSIIF_MAX_LUN;
+ host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512;
+ host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE;
+
+ err = scsi_add_host(host, &dev->dev);
+ if (err) {
+ dev_err(&dev->dev, "fail to add scsi host %d\n", err);
+ goto free_sring;
+ }
+ info->host = host;
+ info->host_active = 1;
+
+ xenbus_switch_state(dev, XenbusStateInitialised);
+
+ return 0;
+
+free_sring:
+ unbind_from_irqhandler(info->irq, info);
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
+ scsi_host_put(host);
+ return err;
+}
+
+static int scsifront_remove(struct xenbus_device *dev)
+{
+ struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
+
+ pr_debug("%s: %s removed\n", __func__, dev->nodename);
+
+ mutex_lock(&scsifront_mutex);
+ if (info->host_active) {
+ /* Scsi_host not yet removed */
+ scsi_remove_host(info->host);
+ info->host_active = 0;
+ }
+ mutex_unlock(&scsifront_mutex);
+
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
+ unbind_from_irqhandler(info->irq, info);
+
+ scsi_host_put(info->host);
+
+ return 0;
+}
+
+static void scsifront_disconnect(struct vscsifrnt_info *info)
+{
+ struct xenbus_device *dev = info->dev;
+ struct Scsi_Host *host = info->host;
+
+ pr_debug("%s: %s disconnect\n", __func__, dev->nodename);
+
+ /*
+ * When this function is executed, all devices of
+ * Frontend have been deleted.
+ * Therefore, it need not block I/O before remove_host.
+ */
+
+ mutex_lock(&scsifront_mutex);
+ if (info->host_active) {
+ scsi_remove_host(host);
+ info->host_active = 0;
+ }
+ mutex_unlock(&scsifront_mutex);
+
+ xenbus_frontend_closed(dev);
+}
+
+static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
+{
+ struct xenbus_device *dev = info->dev;
+ int i, err = 0;
+ char str[64];
+ char **dir;
+ unsigned int dir_n = 0;
+ unsigned int device_state;
+ unsigned int hst, chn, tgt, lun;
+ struct scsi_device *sdev;
+
+ dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
+ if (IS_ERR(dir))
+ return;
+
+ /* mark current task as the one allowed to modify device states */
+ BUG_ON(info->curr);
+ info->curr = current;
+
+ for (i = 0; i < dir_n; i++) {
+ /* read status */
+ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]);
+ err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u",
+ &device_state);
+ if (XENBUS_EXIST_ERR(err))
+ continue;
+
+ /* virtual SCSI device */
+ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
+ err = xenbus_scanf(XBT_NIL, dev->otherend, str,
+ "%u:%u:%u:%u", &hst, &chn, &tgt, &lun);
+ if (XENBUS_EXIST_ERR(err))
+ continue;
+
+ /*
+ * Front device state path, used in slave_configure called
+ * on successfull scsi_add_device, and in slave_destroy called
+ * on remove of a device.
+ */
+ snprintf(info->dev_state_path, sizeof(info->dev_state_path),
+ "vscsi-devs/%s/state", dir[i]);
+
+ switch (op) {
+ case VSCSIFRONT_OP_ADD_LUN:
+ if (device_state != XenbusStateInitialised)
+ break;
+
+ if (scsi_add_device(info->host, chn, tgt, lun)) {
+ dev_err(&dev->dev, "scsi_add_device\n");
+ xenbus_printf(XBT_NIL, dev->nodename,
+ info->dev_state_path,
+ "%d", XenbusStateClosed);
+ }
+ break;
+ case VSCSIFRONT_OP_DEL_LUN:
+ if (device_state != XenbusStateClosing)
+ break;
+
+ sdev = scsi_device_lookup(info->host, chn, tgt, lun);
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ info->curr = NULL;
+
+ kfree(dir);
+}
+
+static void scsifront_read_backend_params(struct xenbus_device *dev,
+ struct vscsifrnt_info *info)
+{
+ unsigned int sg_grant;
+ int ret;
+ struct Scsi_Host *host = info->host;
+
+ ret = xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg-grant", "%u",
+ &sg_grant);
+ if (ret == 1 && sg_grant) {
+ sg_grant = min_t(unsigned int, sg_grant, SG_ALL);
+ sg_grant = max_t(unsigned int, sg_grant, VSCSIIF_SG_TABLESIZE);
+ host->sg_tablesize = min_t(unsigned int, sg_grant,
+ VSCSIIF_SG_TABLESIZE * PAGE_SIZE /
+ sizeof(struct scsiif_request_segment));
+ host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512;
+ }
+ dev_info(&dev->dev, "using up to %d SG entries\n", host->sg_tablesize);
+}
+
+static void scsifront_backend_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
+
+ pr_debug("%s: %p %u %u\n", __func__, dev, dev->state, backend_state);
+
+ switch (backend_state) {
+ case XenbusStateUnknown:
+ case XenbusStateInitialising:
+ case XenbusStateInitWait:
+ case XenbusStateInitialised:
+ break;
+
+ case XenbusStateConnected:
+ scsifront_read_backend_params(dev, info);
+ if (xenbus_read_driver_state(dev->nodename) ==
+ XenbusStateInitialised)
+ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
+
+ if (dev->state != XenbusStateConnected)
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's Closing state -- fallthrough */
+ case XenbusStateClosing:
+ scsifront_disconnect(info);
+ break;
+
+ case XenbusStateReconfiguring:
+ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN);
+ xenbus_switch_state(dev, XenbusStateReconfiguring);
+ break;
+
+ case XenbusStateReconfigured:
+ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+ }
+}
+
+static const struct xenbus_device_id scsifront_ids[] = {
+ { "vscsi" },
+ { "" }
+};
+
+static struct xenbus_driver scsifront_driver = {
+ .ids = scsifront_ids,
+ .probe = scsifront_probe,
+ .remove = scsifront_remove,
+ .otherend_changed = scsifront_backend_changed,
+};
+
+static int __init scsifront_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ return xenbus_register_frontend(&scsifront_driver);
+}
+module_init(scsifront_init);
+
+static void __exit scsifront_exit(void)
+{
+ xenbus_unregister_driver(&scsifront_driver);
+}
+module_exit(scsifront_exit);
+
+MODULE_DESCRIPTION("Xen SCSI frontend driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("xen:vscsi");
+MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 713a97226787..ad4f5790a76f 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -339,7 +339,7 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
goto out;
}
- ret = asma->file->f_op->llseek(asma->file, offset, origin);
+ ret = vfs_llseek(asma->file, offset, origin);
if (ret < 0)
goto out;
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
index ee3a7380e53b..a402fdaf54ca 100644
--- a/drivers/staging/android/ion/compat_ion.c
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -125,7 +125,7 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
long ret;
- if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+ if (!filp->f_op->unlocked_ioctl)
return -ENOTTY;
switch (cmd) {
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index 0bf0d24d12d5..28b93d39a94e 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -411,69 +411,18 @@ static void fix_up_readers(struct logger_log *log, size_t len)
}
/*
- * do_write_log - writes 'len' bytes from 'buf' to 'log'
- *
- * The caller needs to hold log->mutex.
- */
-static void do_write_log(struct logger_log *log, const void *buf, size_t count)
-{
- size_t len;
-
- len = min(count, log->size - log->w_off);
- memcpy(log->buffer + log->w_off, buf, len);
-
- if (count != len)
- memcpy(log->buffer, buf + len, count - len);
-
- log->w_off = logger_offset(log, log->w_off + count);
-
-}
-
-/*
- * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
- * the log 'log'
- *
- * The caller needs to hold log->mutex.
- *
- * Returns 'count' on success, negative error code on failure.
- */
-static ssize_t do_write_log_from_user(struct logger_log *log,
- const void __user *buf, size_t count)
-{
- size_t len;
-
- len = min(count, log->size - log->w_off);
- if (len && copy_from_user(log->buffer + log->w_off, buf, len))
- return -EFAULT;
-
- if (count != len)
- if (copy_from_user(log->buffer, buf + len, count - len))
- /*
- * Note that by not updating w_off, this abandons the
- * portion of the new entry that *was* successfully
- * copied, just above. This is intentional to avoid
- * message corruption from missing fragments.
- */
- return -EFAULT;
-
- log->w_off = logger_offset(log, log->w_off + count);
-
- return count;
-}
-
-/*
- * logger_aio_write - our write method, implementing support for write(),
+ * logger_write_iter - our write method, implementing support for write(),
* writev(), and aio_write(). Writes are our fast path, and we try to optimize
* them above all else.
*/
-static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t ppos)
+static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct logger_log *log = file_get_log(iocb->ki_filp);
- size_t orig;
struct logger_entry header;
struct timespec now;
- ssize_t ret = 0;
+ size_t len, count;
+
+ count = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD);
now = current_kernel_time();
@@ -482,7 +431,7 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
header.sec = now.tv_sec;
header.nsec = now.tv_nsec;
header.euid = current_euid();
- header.len = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD);
+ header.len = count;
header.hdr_size = sizeof(struct logger_entry);
/* null writes succeed, return zero */
@@ -491,8 +440,6 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
mutex_lock(&log->mutex);
- orig = log->w_off;
-
/*
* Fix up any readers, pulling them forward to the first readable
* entry after (what will be) the new write offset. We do this now
@@ -501,33 +448,35 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
*/
fix_up_readers(log, sizeof(struct logger_entry) + header.len);
- do_write_log(log, &header, sizeof(struct logger_entry));
-
- while (nr_segs-- > 0) {
- size_t len;
- ssize_t nr;
+ len = min(sizeof(header), log->size - log->w_off);
+ memcpy(log->buffer + log->w_off, &header, len);
+ memcpy(log->buffer, (char *)&header + len, sizeof(header) - len);
- /* figure out how much of this vector we can keep */
- len = min_t(size_t, iov->iov_len, header.len - ret);
+ len = min(count, log->size - log->w_off);
- /* write out this segment's payload */
- nr = do_write_log_from_user(log, iov->iov_base, len);
- if (unlikely(nr < 0)) {
- log->w_off = orig;
- mutex_unlock(&log->mutex);
- return nr;
- }
+ if (copy_from_iter(log->buffer + log->w_off, len, from) != len) {
+ /*
+ * Note that by not updating w_off, this abandons the
+ * portion of the new entry that *was* successfully
+ * copied, just above. This is intentional to avoid
+ * message corruption from missing fragments.
+ */
+ mutex_unlock(&log->mutex);
+ return -EFAULT;
+ }
- iov++;
- ret += nr;
+ if (copy_from_iter(log->buffer, count - len, from) != count - len) {
+ mutex_unlock(&log->mutex);
+ return -EFAULT;
}
+ log->w_off = logger_offset(log, log->w_off + count);
mutex_unlock(&log->mutex);
/* wake up any blocked readers */
wake_up_interruptible(&log->wq);
- return ret;
+ return len;
}
static struct logger_log *get_log_from_minor(int minor)
@@ -736,7 +685,7 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations logger_fops = {
.owner = THIS_MODULE,
.read = logger_read,
- .aio_write = logger_aio_write,
+ .write_iter = logger_write_iter,
.poll = logger_poll,
.unlocked_ioctl = logger_ioctl,
.compat_ioctl = logger_ioctl,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 86f1a91e896f..14c9c8d18d02 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -3215,7 +3215,6 @@ kiblnd_connd (void *arg)
schedule_timeout(timeout);
- set_current_state(TASK_RUNNING);
remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
}
@@ -3432,7 +3431,6 @@ kiblnd_scheduler(void *arg)
busy_loops = 0;
remove_wait_queue(&sched->ibs_waitq, &wait);
- set_current_state(TASK_RUNNING);
spin_lock_irqsave(&sched->ibs_lock, flags);
}
@@ -3507,7 +3505,6 @@ kiblnd_failover_thread(void *arg)
rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
cfs_time_seconds(1));
- set_current_state(TASK_RUNNING);
remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
write_lock_irqsave(glock, flags);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index bcfee7c21942..d29f5f134b89 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -2232,7 +2232,6 @@ ksocknal_connd (void *arg)
nloops = 0;
schedule_timeout(timeout);
- set_current_state(TASK_RUNNING);
remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
spin_lock_bh(connd_lock);
}
diff --git a/drivers/staging/lustre/lustre/libcfs/fail.c b/drivers/staging/lustre/lustre/libcfs/fail.c
index 1bf9c90b4789..e73ca3df9734 100644
--- a/drivers/staging/lustre/lustre/libcfs/fail.c
+++ b/drivers/staging/lustre/lustre/libcfs/fail.c
@@ -131,7 +131,6 @@ int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
id, ms);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(ms) / 1000);
- set_current_state(TASK_RUNNING);
CERROR("cfs_fail_timeout id %x awake\n", id);
}
return ret;
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 3323eb5e77b0..655cf5037b0b 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -19,8 +19,6 @@ menuconfig STAGING_MEDIA
if STAGING_MEDIA
# Please keep them in alphabetic order
-source "drivers/staging/media/as102/Kconfig"
-
source "drivers/staging/media/bcm2048/Kconfig"
source "drivers/staging/media/cxd2099/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 7db83f373f63..6dbe578178cd 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,4 +1,3 @@
-obj-$(CONFIG_DVB_AS102) += as102/
obj-$(CONFIG_I2C_BCM2048) += bcm2048/
obj-$(CONFIG_DVB_CXD2099) += cxd2099/
obj-$(CONFIG_LIRC_STAGING) += lirc/
diff --git a/drivers/staging/media/as102/as102_fe.c b/drivers/staging/media/as102/as102_fe.c
deleted file mode 100644
index b686b7617cdc..000000000000
--- a/drivers/staging/media/as102/as102_fe.c
+++ /dev/null
@@ -1,571 +0,0 @@
-/*
- * Abilis Systems Single DVB-T Receiver
- * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
- * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include "as102_drv.h"
-#include "as10x_types.h"
-#include "as10x_cmd.h"
-
-static void as10x_fe_copy_tps_parameters(struct dtv_frontend_properties *dst,
- struct as10x_tps *src);
-
-static void as102_fe_copy_tune_parameters(struct as10x_tune_args *dst,
- struct dtv_frontend_properties *src);
-
-static int as102_fe_set_frontend(struct dvb_frontend *fe)
-{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- int ret = 0;
- struct as102_dev_t *dev;
- struct as10x_tune_args tune_args = { 0 };
-
- dev = (struct as102_dev_t *) fe->tuner_priv;
- if (dev == NULL)
- return -ENODEV;
-
- if (mutex_lock_interruptible(&dev->bus_adap.lock))
- return -EBUSY;
-
- as102_fe_copy_tune_parameters(&tune_args, p);
-
- /* send abilis command: SET_TUNE */
- ret = as10x_cmd_set_tune(&dev->bus_adap, &tune_args);
- if (ret != 0)
- dprintk(debug, "as10x_cmd_set_tune failed. (err = %d)\n", ret);
-
- mutex_unlock(&dev->bus_adap.lock);
-
- return (ret < 0) ? -EINVAL : 0;
-}
-
-static int as102_fe_get_frontend(struct dvb_frontend *fe)
-{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- int ret = 0;
- struct as102_dev_t *dev;
- struct as10x_tps tps = { 0 };
-
- dev = (struct as102_dev_t *) fe->tuner_priv;
- if (dev == NULL)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&dev->bus_adap.lock))
- return -EBUSY;
-
- /* send abilis command: GET_TPS */
- ret = as10x_cmd_get_tps(&dev->bus_adap, &tps);
-
- if (ret == 0)
- as10x_fe_copy_tps_parameters(p, &tps);
-
- mutex_unlock(&dev->bus_adap.lock);
-
- return (ret < 0) ? -EINVAL : 0;
-}
-
-static int as102_fe_get_tune_settings(struct dvb_frontend *fe,
- struct dvb_frontend_tune_settings *settings) {
-
-#if 0
- dprintk(debug, "step_size = %d\n", settings->step_size);
- dprintk(debug, "max_drift = %d\n", settings->max_drift);
- dprintk(debug, "min_delay_ms = %d -> %d\n", settings->min_delay_ms,
- 1000);
-#endif
-
- settings->min_delay_ms = 1000;
-
- return 0;
-}
-
-
-static int as102_fe_read_status(struct dvb_frontend *fe, fe_status_t *status)
-{
- int ret = 0;
- struct as102_dev_t *dev;
- struct as10x_tune_status tstate = { 0 };
-
- dev = (struct as102_dev_t *) fe->tuner_priv;
- if (dev == NULL)
- return -ENODEV;
-
- if (mutex_lock_interruptible(&dev->bus_adap.lock))
- return -EBUSY;
-
- /* send abilis command: GET_TUNE_STATUS */
- ret = as10x_cmd_get_tune_status(&dev->bus_adap, &tstate);
- if (ret < 0) {
- dprintk(debug, "as10x_cmd_get_tune_status failed (err = %d)\n",
- ret);
- goto out;
- }
-
- dev->signal_strength = tstate.signal_strength;
- dev->ber = tstate.BER;
-
- switch (tstate.tune_state) {
- case TUNE_STATUS_SIGNAL_DVB_OK:
- *status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
- break;
- case TUNE_STATUS_STREAM_DETECTED:
- *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_SYNC;
- break;
- case TUNE_STATUS_STREAM_TUNED:
- *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_SYNC |
- FE_HAS_LOCK;
- break;
- default:
- *status = TUNE_STATUS_NOT_TUNED;
- }
-
- dprintk(debug, "tuner status: 0x%02x, strength %d, per: %d, ber: %d\n",
- tstate.tune_state, tstate.signal_strength,
- tstate.PER, tstate.BER);
-
- if (*status & FE_HAS_LOCK) {
- if (as10x_cmd_get_demod_stats(&dev->bus_adap,
- (struct as10x_demod_stats *) &dev->demod_stats) < 0) {
- memset(&dev->demod_stats, 0, sizeof(dev->demod_stats));
- dprintk(debug,
- "as10x_cmd_get_demod_stats failed (probably not tuned)\n");
- } else {
- dprintk(debug,
- "demod status: fc: 0x%08x, bad fc: 0x%08x, "
- "bytes corrected: 0x%08x , MER: 0x%04x\n",
- dev->demod_stats.frame_count,
- dev->demod_stats.bad_frame_count,
- dev->demod_stats.bytes_fixed_by_rs,
- dev->demod_stats.mer);
- }
- } else {
- memset(&dev->demod_stats, 0, sizeof(dev->demod_stats));
- }
-
-out:
- mutex_unlock(&dev->bus_adap.lock);
- return ret;
-}
-
-/*
- * Note:
- * - in AS102 SNR=MER
- * - the SNR will be returned in linear terms, i.e. not in dB
- * - the accuracy equals ±2dB for a SNR range from 4dB to 30dB
- * - the accuracy is >2dB for SNR values outside this range
- */
-static int as102_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
-{
- struct as102_dev_t *dev;
-
- dev = (struct as102_dev_t *) fe->tuner_priv;
- if (dev == NULL)
- return -ENODEV;
-
- *snr = dev->demod_stats.mer;
-
- return 0;
-}
-
-static int as102_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
-{
- struct as102_dev_t *dev;
-
- dev = (struct as102_dev_t *) fe->tuner_priv;
- if (dev == NULL)
- return -ENODEV;
-
- *ber = dev->ber;
-
- return 0;
-}
-
-static int as102_fe_read_signal_strength(struct dvb_frontend *fe,
- u16 *strength)
-{
- struct as102_dev_t *dev;
-
- dev = (struct as102_dev_t *) fe->tuner_priv;
- if (dev == NULL)
- return -ENODEV;
-
- *strength = (((0xffff * 400) * dev->signal_strength + 41000) * 2);
-
- return 0;
-}
-
-static int as102_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
-{
- struct as102_dev_t *dev;
-
- dev = (struct as102_dev_t *) fe->tuner_priv;
- if (dev == NULL)
- return -ENODEV;
-
- if (dev->demod_stats.has_started)
- *ucblocks = dev->demod_stats.bad_frame_count;
- else
- *ucblocks = 0;
-
- return 0;
-}
-
-static int as102_fe_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
-{
- struct as102_dev_t *dev;
- int ret;
-
- dev = (struct as102_dev_t *) fe->tuner_priv;
- if (dev == NULL)
- return -ENODEV;
-
- if (mutex_lock_interruptible(&dev->bus_adap.lock))
- return -EBUSY;
-
- if (acquire) {
- if (elna_enable)
- as10x_cmd_set_context(&dev->bus_adap,
- CONTEXT_LNA, dev->elna_cfg);
-
- ret = as10x_cmd_turn_on(&dev->bus_adap);
- } else {
- ret = as10x_cmd_turn_off(&dev->bus_adap);
- }
-
- mutex_unlock(&dev->bus_adap.lock);
-
- return ret;
-}
-
-static struct dvb_frontend_ops as102_fe_ops = {
- .delsys = { SYS_DVBT },
- .info = {
- .name = "Unknown AS102 device",
- .frequency_min = 174000000,
- .frequency_max = 862000000,
- .frequency_stepsize = 166667,
- .caps = FE_CAN_INVERSION_AUTO
- | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4
- | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO
- | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QPSK
- | FE_CAN_QAM_AUTO
- | FE_CAN_TRANSMISSION_MODE_AUTO
- | FE_CAN_GUARD_INTERVAL_AUTO
- | FE_CAN_HIERARCHY_AUTO
- | FE_CAN_RECOVER
- | FE_CAN_MUTE_TS
- },
-
- .set_frontend = as102_fe_set_frontend,
- .get_frontend = as102_fe_get_frontend,
- .get_tune_settings = as102_fe_get_tune_settings,
-
- .read_status = as102_fe_read_status,
- .read_snr = as102_fe_read_snr,
- .read_ber = as102_fe_read_ber,
- .read_signal_strength = as102_fe_read_signal_strength,
- .read_ucblocks = as102_fe_read_ucblocks,
- .ts_bus_ctrl = as102_fe_ts_bus_ctrl,
-};
-
-int as102_dvb_unregister_fe(struct dvb_frontend *fe)
-{
- /* unregister frontend */
- dvb_unregister_frontend(fe);
-
- /* detach frontend */
- dvb_frontend_detach(fe);
-
- return 0;
-}
-
-int as102_dvb_register_fe(struct as102_dev_t *as102_dev,
- struct dvb_frontend *dvb_fe)
-{
- int errno;
- struct dvb_adapter *dvb_adap;
-
- if (as102_dev == NULL)
- return -EINVAL;
-
- /* extract dvb_adapter */
- dvb_adap = &as102_dev->dvb_adap;
-
- /* init frontend callback ops */
- memcpy(&dvb_fe->ops, &as102_fe_ops, sizeof(struct dvb_frontend_ops));
- strncpy(dvb_fe->ops.info.name, as102_dev->name,
- sizeof(dvb_fe->ops.info.name));
-
- /* register dvb frontend */
- errno = dvb_register_frontend(dvb_adap, dvb_fe);
- if (errno == 0)
- dvb_fe->tuner_priv = as102_dev;
-
- return errno;
-}
-
-static void as10x_fe_copy_tps_parameters(struct dtv_frontend_properties *fe_tps,
- struct as10x_tps *as10x_tps)
-{
-
- /* extract constellation */
- switch (as10x_tps->modulation) {
- case CONST_QPSK:
- fe_tps->modulation = QPSK;
- break;
- case CONST_QAM16:
- fe_tps->modulation = QAM_16;
- break;
- case CONST_QAM64:
- fe_tps->modulation = QAM_64;
- break;
- }
-
- /* extract hierarchy */
- switch (as10x_tps->hierarchy) {
- case HIER_NONE:
- fe_tps->hierarchy = HIERARCHY_NONE;
- break;
- case HIER_ALPHA_1:
- fe_tps->hierarchy = HIERARCHY_1;
- break;
- case HIER_ALPHA_2:
- fe_tps->hierarchy = HIERARCHY_2;
- break;
- case HIER_ALPHA_4:
- fe_tps->hierarchy = HIERARCHY_4;
- break;
- }
-
- /* extract code rate HP */
- switch (as10x_tps->code_rate_HP) {
- case CODE_RATE_1_2:
- fe_tps->code_rate_HP = FEC_1_2;
- break;
- case CODE_RATE_2_3:
- fe_tps->code_rate_HP = FEC_2_3;
- break;
- case CODE_RATE_3_4:
- fe_tps->code_rate_HP = FEC_3_4;
- break;
- case CODE_RATE_5_6:
- fe_tps->code_rate_HP = FEC_5_6;
- break;
- case CODE_RATE_7_8:
- fe_tps->code_rate_HP = FEC_7_8;
- break;
- }
-
- /* extract code rate LP */
- switch (as10x_tps->code_rate_LP) {
- case CODE_RATE_1_2:
- fe_tps->code_rate_LP = FEC_1_2;
- break;
- case CODE_RATE_2_3:
- fe_tps->code_rate_LP = FEC_2_3;
- break;
- case CODE_RATE_3_4:
- fe_tps->code_rate_LP = FEC_3_4;
- break;
- case CODE_RATE_5_6:
- fe_tps->code_rate_LP = FEC_5_6;
- break;
- case CODE_RATE_7_8:
- fe_tps->code_rate_LP = FEC_7_8;
- break;
- }
-
- /* extract guard interval */
- switch (as10x_tps->guard_interval) {
- case GUARD_INT_1_32:
- fe_tps->guard_interval = GUARD_INTERVAL_1_32;
- break;
- case GUARD_INT_1_16:
- fe_tps->guard_interval = GUARD_INTERVAL_1_16;
- break;
- case GUARD_INT_1_8:
- fe_tps->guard_interval = GUARD_INTERVAL_1_8;
- break;
- case GUARD_INT_1_4:
- fe_tps->guard_interval = GUARD_INTERVAL_1_4;
- break;
- }
-
- /* extract transmission mode */
- switch (as10x_tps->transmission_mode) {
- case TRANS_MODE_2K:
- fe_tps->transmission_mode = TRANSMISSION_MODE_2K;
- break;
- case TRANS_MODE_8K:
- fe_tps->transmission_mode = TRANSMISSION_MODE_8K;
- break;
- }
-}
-
-static uint8_t as102_fe_get_code_rate(fe_code_rate_t arg)
-{
- uint8_t c;
-
- switch (arg) {
- case FEC_1_2:
- c = CODE_RATE_1_2;
- break;
- case FEC_2_3:
- c = CODE_RATE_2_3;
- break;
- case FEC_3_4:
- c = CODE_RATE_3_4;
- break;
- case FEC_5_6:
- c = CODE_RATE_5_6;
- break;
- case FEC_7_8:
- c = CODE_RATE_7_8;
- break;
- default:
- c = CODE_RATE_UNKNOWN;
- break;
- }
-
- return c;
-}
-
-static void as102_fe_copy_tune_parameters(struct as10x_tune_args *tune_args,
- struct dtv_frontend_properties *params)
-{
-
- /* set frequency */
- tune_args->freq = params->frequency / 1000;
-
- /* fix interleaving_mode */
- tune_args->interleaving_mode = INTLV_NATIVE;
-
- switch (params->bandwidth_hz) {
- case 8000000:
- tune_args->bandwidth = BW_8_MHZ;
- break;
- case 7000000:
- tune_args->bandwidth = BW_7_MHZ;
- break;
- case 6000000:
- tune_args->bandwidth = BW_6_MHZ;
- break;
- default:
- tune_args->bandwidth = BW_8_MHZ;
- }
-
- switch (params->guard_interval) {
- case GUARD_INTERVAL_1_32:
- tune_args->guard_interval = GUARD_INT_1_32;
- break;
- case GUARD_INTERVAL_1_16:
- tune_args->guard_interval = GUARD_INT_1_16;
- break;
- case GUARD_INTERVAL_1_8:
- tune_args->guard_interval = GUARD_INT_1_8;
- break;
- case GUARD_INTERVAL_1_4:
- tune_args->guard_interval = GUARD_INT_1_4;
- break;
- case GUARD_INTERVAL_AUTO:
- default:
- tune_args->guard_interval = GUARD_UNKNOWN;
- break;
- }
-
- switch (params->modulation) {
- case QPSK:
- tune_args->modulation = CONST_QPSK;
- break;
- case QAM_16:
- tune_args->modulation = CONST_QAM16;
- break;
- case QAM_64:
- tune_args->modulation = CONST_QAM64;
- break;
- default:
- tune_args->modulation = CONST_UNKNOWN;
- break;
- }
-
- switch (params->transmission_mode) {
- case TRANSMISSION_MODE_2K:
- tune_args->transmission_mode = TRANS_MODE_2K;
- break;
- case TRANSMISSION_MODE_8K:
- tune_args->transmission_mode = TRANS_MODE_8K;
- break;
- default:
- tune_args->transmission_mode = TRANS_MODE_UNKNOWN;
- }
-
- switch (params->hierarchy) {
- case HIERARCHY_NONE:
- tune_args->hierarchy = HIER_NONE;
- break;
- case HIERARCHY_1:
- tune_args->hierarchy = HIER_ALPHA_1;
- break;
- case HIERARCHY_2:
- tune_args->hierarchy = HIER_ALPHA_2;
- break;
- case HIERARCHY_4:
- tune_args->hierarchy = HIER_ALPHA_4;
- break;
- case HIERARCHY_AUTO:
- tune_args->hierarchy = HIER_UNKNOWN;
- break;
- }
-
- dprintk(debug, "tuner parameters: freq: %d bw: 0x%02x gi: 0x%02x\n",
- params->frequency,
- tune_args->bandwidth,
- tune_args->guard_interval);
-
- /*
- * Detect a hierarchy selection
- * if HP/LP are both set to FEC_NONE, HP will be selected.
- */
- if ((tune_args->hierarchy != HIER_NONE) &&
- ((params->code_rate_LP == FEC_NONE) ||
- (params->code_rate_HP == FEC_NONE))) {
-
- if (params->code_rate_LP == FEC_NONE) {
- tune_args->hier_select = HIER_HIGH_PRIORITY;
- tune_args->code_rate =
- as102_fe_get_code_rate(params->code_rate_HP);
- }
-
- if (params->code_rate_HP == FEC_NONE) {
- tune_args->hier_select = HIER_LOW_PRIORITY;
- tune_args->code_rate =
- as102_fe_get_code_rate(params->code_rate_LP);
- }
-
- dprintk(debug,
- "\thierarchy: 0x%02x selected: %s code_rate_%s: 0x%02x\n",
- tune_args->hierarchy,
- tune_args->hier_select == HIER_HIGH_PRIORITY ?
- "HP" : "LP",
- tune_args->hier_select == HIER_HIGH_PRIORITY ?
- "HP" : "LP",
- tune_args->code_rate);
- } else {
- tune_args->code_rate =
- as102_fe_get_code_rate(params->code_rate_HP);
- }
-}
diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig
index 12f321dd2399..4de2f082491d 100644
--- a/drivers/staging/media/davinci_vpfe/Kconfig
+++ b/drivers/staging/media/davinci_vpfe/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_DM365_VPFE
tristate "DM365 VPFE Media Controller Capture Driver"
depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
help
Support for DM365 VPFE based Media Controller Capture driver.
diff --git a/drivers/staging/media/dt3155v4l/Kconfig b/drivers/staging/media/dt3155v4l/Kconfig
index 226a1ca90b3c..2d496001b6e8 100644
--- a/drivers/staging/media/dt3155v4l/Kconfig
+++ b/drivers/staging/media/dt3155v4l/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_DT3155
tristate "DT3155 frame grabber, Video4Linux interface"
depends on PCI && VIDEO_DEV && VIDEO_V4L2
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
default n
---help---
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index 726cc3a31856..7aca44f28c5a 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -414,6 +414,7 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
data_buf = memdup_user(buf, n_bytes);
if (IS_ERR(data_buf)) {
retval = PTR_ERR(data_buf);
+ data_buf = NULL;
goto exit;
}
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 86ad811fda24..c20ef56202bf 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -392,6 +392,7 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
data_buf = memdup_user((void const __user *)buf, n_bytes);
if (IS_ERR(data_buf)) {
retval = PTR_ERR(data_buf);
+ data_buf = NULL;
goto exit;
}
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index 8afc6fee40c5..b78643f907e7 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_OMAP4
bool "OMAP 4 Camera support"
depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
+ depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
---help---
Driver for an OMAP 4 ISS controller.
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index 73cec14cbf56..8b1f53331433 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -410,41 +410,19 @@ static ssize_t vme_user_write(struct file *file, const char __user *buf,
static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
{
- loff_t absolute = -1;
unsigned int minor = MINOR(file_inode(file)->i_rdev);
size_t image_size;
+ loff_t res;
if (minor == CONTROL_MINOR)
return -EINVAL;
mutex_lock(&image[minor].mutex);
image_size = vme_get_size(image[minor].resource);
-
- switch (whence) {
- case SEEK_SET:
- absolute = off;
- break;
- case SEEK_CUR:
- absolute = file->f_pos + off;
- break;
- case SEEK_END:
- absolute = image_size + off;
- break;
- default:
- mutex_unlock(&image[minor].mutex);
- return -EINVAL;
- }
-
- if ((absolute < 0) || (absolute >= image_size)) {
- mutex_unlock(&image[minor].mutex);
- return -EINVAL;
- }
-
- file->f_pos = absolute;
-
+ res = fixed_size_llseek(file, off, whence, image_size);
mutex_unlock(&image[minor].mutex);
- return absolute;
+ return res;
}
/*
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c
index 8096fcbe2dc1..d7b198c400c7 100644
--- a/drivers/tty/bfin_jtag_comm.c
+++ b/drivers/tty/bfin_jtag_comm.c
@@ -77,7 +77,6 @@ bfin_jc_emudat_manager(void *arg)
pr_debug("waiting for readers\n");
__set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
- __set_current_state(TASK_RUNNING);
continue;
}
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index 5618b5fc7500..f575a9b5ede7 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -452,7 +452,7 @@ void __init hvc_vio_init_early(void)
return;
#endif
/* Check whether the user has requested a different console. */
- if (!strstr(cmd_line, "console="))
+ if (!strstr(boot_command_line, "console="))
add_preferred_console("hvc", 0, NULL);
hvc_instantiate(0, 0, ops);
}
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 2967f0388d2c..f1e57425e39f 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -347,8 +347,6 @@ static int xen_console_remove(struct xencons_info *info)
}
#ifdef CONFIG_HVC_XEN_FRONTEND
-static struct xenbus_driver xencons_driver;
-
static int xencons_remove(struct xenbus_device *dev)
{
return xen_console_remove(dev_get_drvdata(&dev->dev));
@@ -499,13 +497,14 @@ static const struct xenbus_device_id xencons_ids[] = {
{ "" }
};
-
-static DEFINE_XENBUS_DRIVER(xencons, "xenconsole",
+static struct xenbus_driver xencons_driver = {
+ .name = "xenconsole",
+ .ids = xencons_ids,
.probe = xencons_probe,
.remove = xencons_remove,
.resume = xencons_resume,
.otherend_changed = xencons_backend_changed,
-);
+};
#endif /* CONFIG_HVC_XEN_FRONTEND */
static int __init xen_hvc_init(void)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 2f6f9b5e4891..16a2c0237dd6 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2186,8 +2186,9 @@ static int __tty_fasync(int fd, struct file *filp, int on)
}
get_pid(pid);
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
- retval = __f_setown(filp, pid, type, 0);
+ __f_setown(filp, pid, type, 0);
put_pid(pid);
+ retval = 0;
}
out:
return retval;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 4ad11e03cf54..7c6771d027a2 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -164,10 +164,9 @@ struct ffs_desc_helper {
static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
-static struct inode *__must_check
+static struct dentry *
ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
- const struct file_operations *fops,
- struct dentry **dentry_p);
+ const struct file_operations *fops);
/* Devices management *******************************************************/
@@ -1119,10 +1118,9 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
}
/* Create "regular" file */
-static struct inode *ffs_sb_create_file(struct super_block *sb,
+static struct dentry *ffs_sb_create_file(struct super_block *sb,
const char *name, void *data,
- const struct file_operations *fops,
- struct dentry **dentry_p)
+ const struct file_operations *fops)
{
struct ffs_data *ffs = sb->s_fs_info;
struct dentry *dentry;
@@ -1141,10 +1139,7 @@ static struct inode *ffs_sb_create_file(struct super_block *sb,
}
d_add(dentry, inode);
- if (dentry_p)
- *dentry_p = dentry;
-
- return inode;
+ return dentry;
}
/* Super block */
@@ -1189,7 +1184,7 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
/* EP0 file */
if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
- &ffs_ep0_operations, NULL)))
+ &ffs_ep0_operations)))
return -ENOMEM;
return 0;
@@ -1561,9 +1556,10 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
sprintf(epfiles->name, "ep%02x", ffs->eps_addrmap[i]);
else
sprintf(epfiles->name, "ep%u", i);
- if (!unlikely(ffs_sb_create_file(ffs->sb, epfiles->name, epfile,
- &ffs_epfile_operations,
- &epfile->dentry))) {
+ epfile->dentry = ffs_sb_create_file(ffs->sb, epfiles->name,
+ epfile,
+ &ffs_epfile_operations);
+ if (unlikely(!epfile->dentry)) {
ffs_epfiles_destroy(epfiles, i - 1);
return -ENOMEM;
}
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index edefec2cc584..c744e4975d74 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -198,7 +198,6 @@ struct ep_data {
struct list_head epfiles;
wait_queue_head_t wait;
struct dentry *dentry;
- struct inode *inode;
};
static inline void get_ep (struct ep_data *data)
@@ -1618,10 +1617,9 @@ static void destroy_ep_files (struct dev_data *dev)
}
-static struct inode *
+static struct dentry *
gadgetfs_create_file (struct super_block *sb, char const *name,
- void *data, const struct file_operations *fops,
- struct dentry **dentry_p);
+ void *data, const struct file_operations *fops);
static int activate_ep_files (struct dev_data *dev)
{
@@ -1649,10 +1647,9 @@ static int activate_ep_files (struct dev_data *dev)
if (!data->req)
goto enomem1;
- data->inode = gadgetfs_create_file (dev->sb, data->name,
- data, &ep_config_operations,
- &data->dentry);
- if (!data->inode)
+ data->dentry = gadgetfs_create_file (dev->sb, data->name,
+ data, &ep_config_operations);
+ if (!data->dentry)
goto enomem2;
list_add_tail (&data->epfiles, &dev->epfiles);
}
@@ -2012,10 +2009,9 @@ gadgetfs_make_inode (struct super_block *sb,
/* creates in fs root directory, so non-renamable and non-linkable.
* so inode and dentry are paired, until device reconfig.
*/
-static struct inode *
+static struct dentry *
gadgetfs_create_file (struct super_block *sb, char const *name,
- void *data, const struct file_operations *fops,
- struct dentry **dentry_p)
+ void *data, const struct file_operations *fops)
{
struct dentry *dentry;
struct inode *inode;
@@ -2031,8 +2027,7 @@ gadgetfs_create_file (struct super_block *sb, char const *name,
return NULL;
}
d_add (dentry, inode);
- *dentry_p = dentry;
- return inode;
+ return dentry;
}
static const struct super_operations gadget_fs_operations = {
@@ -2080,9 +2075,8 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
goto Enomem;
dev->sb = sb;
- if (!gadgetfs_create_file (sb, CHIP,
- dev, &dev_init_operations,
- &dev->dentry)) {
+ dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations);
+ if (!dev->dentry) {
put_dev(dev);
goto Enomem;
}
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index f7825332a325..9558da3f06a0 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -876,15 +876,11 @@ static void vfio_pci_remove(struct pci_dev *pdev)
{
struct vfio_pci_device *vdev;
- mutex_lock(&driver_lock);
-
vdev = vfio_del_group_dev(&pdev->dev);
if (vdev) {
iommu_group_put(pdev->dev.iommu_group);
kfree(vdev);
}
-
- mutex_unlock(&driver_lock);
}
static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
@@ -927,108 +923,90 @@ static struct pci_driver vfio_pci_driver = {
.err_handler = &vfio_err_handlers,
};
-/*
- * Test whether a reset is necessary and possible. We mark devices as
- * needs_reset when they are released, but don't have a function-local reset
- * available. If any of these exist in the affected devices, we want to do
- * a bus/slot reset. We also need all of the affected devices to be unused,
- * so we abort if any device has a non-zero refcnt. driver_lock prevents a
- * device from being opened during the scan or unbound from vfio-pci.
- */
-static int vfio_pci_test_bus_reset(struct pci_dev *pdev, void *data)
-{
- bool *needs_reset = data;
- struct pci_driver *pci_drv = ACCESS_ONCE(pdev->driver);
- int ret = -EBUSY;
-
- if (pci_drv == &vfio_pci_driver) {
- struct vfio_device *device;
- struct vfio_pci_device *vdev;
-
- device = vfio_device_get_from_dev(&pdev->dev);
- if (!device)
- return ret;
-
- vdev = vfio_device_data(device);
- if (vdev) {
- if (vdev->needs_reset)
- *needs_reset = true;
-
- if (!vdev->refcnt)
- ret = 0;
- }
-
- vfio_device_put(device);
- }
-
- /*
- * TODO: vfio-core considers groups to be viable even if some devices
- * are attached to known drivers, like pci-stub or pcieport. We can't
- * freeze devices from being unbound to those drivers like we can
- * here though, so it would be racy to test for them. We also can't
- * use device_lock() to prevent changes as that would interfere with
- * PCI-core taking device_lock during bus reset. For now, we require
- * devices to be bound to vfio-pci to get a bus/slot reset on release.
- */
-
- return ret;
-}
+struct vfio_devices {
+ struct vfio_device **devices;
+ int cur_index;
+ int max_index;
+};
-/* Clear needs_reset on all affected devices after successful bus/slot reset */
-static int vfio_pci_clear_needs_reset(struct pci_dev *pdev, void *data)
+static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
{
+ struct vfio_devices *devs = data;
struct pci_driver *pci_drv = ACCESS_ONCE(pdev->driver);
- if (pci_drv == &vfio_pci_driver) {
- struct vfio_device *device;
- struct vfio_pci_device *vdev;
+ if (pci_drv != &vfio_pci_driver)
+ return -EBUSY;
- device = vfio_device_get_from_dev(&pdev->dev);
- if (!device)
- return 0;
+ if (devs->cur_index == devs->max_index)
+ return -ENOSPC;
- vdev = vfio_device_data(device);
- if (vdev)
- vdev->needs_reset = false;
-
- vfio_device_put(device);
- }
+ devs->devices[devs->cur_index] = vfio_device_get_from_dev(&pdev->dev);
+ if (!devs->devices[devs->cur_index])
+ return -EINVAL;
+ devs->cur_index++;
return 0;
}
/*
* Attempt to do a bus/slot reset if there are devices affected by a reset for
* this device that are needs_reset and all of the affected devices are unused
- * (!refcnt). Callers of this function are required to hold driver_lock such
- * that devices can not be unbound from vfio-pci or opened by a user while we
- * test for and perform a bus/slot reset.
+ * (!refcnt). Callers are required to hold driver_lock when calling this to
+ * prevent device opens and concurrent bus reset attempts. We prevent device
+ * unbinds by acquiring and holding a reference to the vfio_device.
+ *
+ * NB: vfio-core considers a group to be viable even if some devices are
+ * bound to drivers like pci-stub or pcieport. Here we require all devices
+ * to be bound to vfio_pci since that's the only way we can be sure they
+ * stay put.
*/
static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
{
+ struct vfio_devices devs = { .cur_index = 0 };
+ int i = 0, ret = -EINVAL;
bool needs_reset = false, slot = false;
- int ret;
+ struct vfio_pci_device *tmp;
if (!pci_probe_reset_slot(vdev->pdev->slot))
slot = true;
else if (pci_probe_reset_bus(vdev->pdev->bus))
return;
- if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_test_bus_reset,
- &needs_reset, slot) || !needs_reset)
+ if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
+ &i, slot) || !i)
return;
- if (slot)
- ret = pci_try_reset_slot(vdev->pdev->slot);
- else
- ret = pci_try_reset_bus(vdev->pdev->bus);
-
- if (ret)
+ devs.max_index = i;
+ devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
+ if (!devs.devices)
return;
- vfio_pci_for_each_slot_or_bus(vdev->pdev,
- vfio_pci_clear_needs_reset, NULL, slot);
+ if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_get_devs, &devs, slot))
+ goto put_devs;
+
+ for (i = 0; i < devs.cur_index; i++) {
+ tmp = vfio_device_data(devs.devices[i]);
+ if (tmp->needs_reset)
+ needs_reset = true;
+ if (tmp->refcnt)
+ goto put_devs;
+ }
+
+ if (needs_reset)
+ ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
+ pci_try_reset_bus(vdev->pdev->bus);
+
+put_devs:
+ for (i = 0; i < devs.cur_index; i++) {
+ if (!ret) {
+ tmp = vfio_device_data(devs.devices[i]);
+ tmp->needs_reset = false;
+ }
+ vfio_device_put(devs.devices[i]);
+ }
+
+ kfree(devs.devices);
}
static void __exit vfio_pci_cleanup(void)
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 9dd49c9839ac..553212f037c3 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/eventfd.h>
+#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/file.h>
#include <linux/poll.h>
@@ -548,6 +549,20 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
return PTR_ERR(trigger);
}
+ /*
+ * The MSIx vector table resides in device memory which may be cleared
+ * via backdoor resets. We don't allow direct access to the vector
+ * table so even if a userspace driver attempts to save/restore around
+ * such a reset it would be unsuccessful. To avoid this, restore the
+ * cached value of the message prior to enabling.
+ */
+ if (msix) {
+ struct msi_msg msg;
+
+ get_cached_msi_msg(irq, &msg);
+ write_msi_msg(irq, &msg);
+ }
+
ret = request_irq(irq, vfio_msihandler, 0,
vdev->ctx[vector].name, trigger);
if (ret) {
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0734fbe5b651..583ccdb2c58f 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -57,7 +57,8 @@ struct vfio_iommu {
struct list_head domain_list;
struct mutex lock;
struct rb_root dma_list;
- bool v2;
+ bool v2;
+ bool nesting;
};
struct vfio_domain {
@@ -705,6 +706,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
goto out_free;
}
+ if (iommu->nesting) {
+ int attr = 1;
+
+ ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING,
+ &attr);
+ if (ret)
+ goto out_domain;
+ }
+
ret = iommu_attach_group(domain->domain, iommu_group);
if (ret)
goto out_domain;
@@ -819,17 +829,26 @@ static void *vfio_iommu_type1_open(unsigned long arg)
{
struct vfio_iommu *iommu;
- if (arg != VFIO_TYPE1_IOMMU && arg != VFIO_TYPE1v2_IOMMU)
- return ERR_PTR(-EINVAL);
-
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return ERR_PTR(-ENOMEM);
+ switch (arg) {
+ case VFIO_TYPE1_IOMMU:
+ break;
+ case VFIO_TYPE1_NESTING_IOMMU:
+ iommu->nesting = true;
+ case VFIO_TYPE1v2_IOMMU:
+ iommu->v2 = true;
+ break;
+ default:
+ kfree(iommu);
+ return ERR_PTR(-EINVAL);
+ }
+
INIT_LIST_HEAD(&iommu->domain_list);
iommu->dma_list = RB_ROOT;
mutex_init(&iommu->lock);
- iommu->v2 = (arg == VFIO_TYPE1v2_IOMMU);
return iommu;
}
@@ -885,6 +904,7 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
switch (arg) {
case VFIO_TYPE1_IOMMU:
case VFIO_TYPE1v2_IOMMU:
+ case VFIO_TYPE1_NESTING_IOMMU:
return 1;
case VFIO_DMA_CC_IOMMU:
if (!iommu)
diff --git a/drivers/vfio/vfio_spapr_eeh.c b/drivers/vfio/vfio_spapr_eeh.c
index 86dfceb9201f..5fa42db769ee 100644
--- a/drivers/vfio/vfio_spapr_eeh.c
+++ b/drivers/vfio/vfio_spapr_eeh.c
@@ -92,7 +92,7 @@ long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
return ret;
}
-EXPORT_SYMBOL(vfio_spapr_iommu_eeh_ioctl);
+EXPORT_SYMBOL_GPL(vfio_spapr_iommu_eeh_ioctl);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index a6f7cc0a0883..9a23698b6fe8 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -265,7 +265,6 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
static struct platform_driver pm860x_backlight_driver = {
.driver = {
.name = "88pm860x-backlight",
- .owner = THIS_MODULE,
},
.probe = pm860x_backlight_probe,
};
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c
index 86234c31d79c..50774e657700 100644
--- a/drivers/video/backlight/aat2870_bl.c
+++ b/drivers/video/backlight/aat2870_bl.c
@@ -211,7 +211,6 @@ static int aat2870_bl_remove(struct platform_device *pdev)
static struct platform_driver aat2870_bl_driver = {
.driver = {
.name = "aat2870-backlight",
- .owner = THIS_MODULE,
},
.probe = aat2870_bl_probe,
.remove = aat2870_bl_remove,
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index f37097a261a2..dd88ba1d71ce 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -67,6 +67,7 @@ static int adp5520_bl_set(struct backlight_device *bl, int brightness)
static int adp5520_bl_update_status(struct backlight_device *bl)
{
int brightness = bl->props.brightness;
+
if (bl->props.power != FB_BLANK_UNBLANK)
brightness = 0;
@@ -374,7 +375,6 @@ static SIMPLE_DEV_PM_OPS(adp5520_bl_pm_ops, adp5520_bl_suspend,
static struct platform_driver adp5520_bl_driver = {
.driver = {
.name = "adp5520-backlight",
- .owner = THIS_MODULE,
.pm = &adp5520_bl_pm_ops,
},
.probe = adp5520_bl_probe,
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index be8d83deca7d..71147f4461b8 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -181,6 +181,7 @@ static int adp8860_clr_bits(struct i2c_client *client, int reg, uint8_t bit_mask
static void adp8860_led_work(struct work_struct *work)
{
struct adp8860_led *led = container_of(work, struct adp8860_led, work);
+
adp8860_write(led->client, ADP8860_ISC1 - led->id + 1,
led->new_brightness >> 1);
}
@@ -362,6 +363,7 @@ static int adp8860_bl_set(struct backlight_device *bl, int brightness)
static int adp8860_bl_update_status(struct backlight_device *bl)
{
int brightness = bl->props.brightness;
+
if (bl->props.power != FB_BLANK_UNBLANK)
brightness = 0;
@@ -499,6 +501,7 @@ static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev,
{
struct adp8860_bl *data = dev_get_drvdata(dev);
int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
+
if (ret)
return ret;
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 251af4d38d86..037e43083343 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -144,6 +144,7 @@ static int adp8870_read(struct i2c_client *client, int reg, uint8_t *val)
static int adp8870_write(struct i2c_client *client, u8 reg, u8 val)
{
int ret = i2c_smbus_write_byte_data(client, reg, val);
+
if (ret)
dev_err(&client->dev, "failed to write\n");
@@ -195,6 +196,7 @@ static int adp8870_clr_bits(struct i2c_client *client, int reg, uint8_t bit_mask
static void adp8870_led_work(struct work_struct *work)
{
struct adp8870_led *led = container_of(work, struct adp8870_led, work);
+
adp8870_write(led->client, ADP8870_ISC1 + led->id - 1,
led->new_brightness >> 1);
}
@@ -399,6 +401,7 @@ static int adp8870_bl_set(struct backlight_device *bl, int brightness)
static int adp8870_bl_update_status(struct backlight_device *bl)
{
int brightness = bl->props.brightness;
+
if (bl->props.power != FB_BLANK_UNBLANK)
brightness = 0;
@@ -649,6 +652,7 @@ static ssize_t adp8870_bl_l1_daylight_max_store(struct device *dev,
{
struct adp8870_bl *data = dev_get_drvdata(dev);
int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
+
if (ret)
return ret;
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 4726c8be626f..5f897f99cc9b 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -325,11 +325,11 @@ static int ams369fg06_power_on(struct ams369fg06 *lcd)
if (!pd->reset) {
dev_err(lcd->dev, "reset is NULL.\n");
return -EINVAL;
- } else {
- pd->reset(lcd->ld);
- msleep(pd->reset_delay);
}
+ pd->reset(lcd->ld);
+ msleep(pd->reset_delay);
+
ret = ams369fg06_ldi_init(lcd);
if (ret) {
dev_err(lcd->dev, "failed to initialize ldi.\n");
diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
index bb1fc45b7549..734a9158946b 100644
--- a/drivers/video/backlight/as3711_bl.c
+++ b/drivers/video/backlight/as3711_bl.c
@@ -467,7 +467,6 @@ static int as3711_backlight_probe(struct platform_device *pdev)
static struct platform_driver as3711_backlight_driver = {
.driver = {
.name = "as3711-backlight",
- .owner = THIS_MODULE,
},
.probe = as3711_backlight_probe,
};
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 51d18d637e2b..d7c37a8ccd1f 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -143,6 +143,7 @@ static void lcdtg_i2c_send_byte(struct corgi_lcd *lcd,
uint8_t base, uint8_t data)
{
int i;
+
for (i = 0; i < 8; i++) {
if (data & 0x80)
lcdtg_i2c_send_bit(lcd, base | POWER0_COM_DOUT);
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index f3fed9ef745f..3e3880fc8c8e 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -235,6 +235,7 @@ static int cr_backlight_probe(struct platform_device *pdev)
static int cr_backlight_remove(struct platform_device *pdev)
{
struct cr_panel *crp = platform_get_drvdata(pdev);
+
crp->cr_backlight_device->props.power = FB_BLANK_POWERDOWN;
crp->cr_backlight_device->props.brightness = 0;
crp->cr_backlight_device->props.max_brightness = 0;
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 12c5d840c590..f793738f06fb 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -162,7 +162,6 @@ static int da903x_backlight_probe(struct platform_device *pdev)
static struct platform_driver da903x_backlight_driver = {
.driver = {
.name = "da903x-backlight",
- .owner = THIS_MODULE,
},
.probe = da903x_backlight_probe,
};
diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c
index 20d55becaa74..d4bd74bd5070 100644
--- a/drivers/video/backlight/da9052_bl.c
+++ b/drivers/video/backlight/da9052_bl.c
@@ -173,7 +173,6 @@ static struct platform_driver da9052_wled_driver = {
.id_table = da9052_wled_ids,
.driver = {
.name = "da9052-wled",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index 0d1f633c6480..0067931821c6 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -128,7 +128,6 @@ static SIMPLE_DEV_PM_OPS(ep93xxbl_pm_ops, ep93xxbl_suspend, ep93xxbl_resume);
static struct platform_driver ep93xxbl_driver = {
.driver = {
.name = "ep93xx-bl",
- .owner = THIS_MODULE,
.pm = &ep93xxbl_pm_ops,
},
.probe = ep93xxbl_probe,
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 5d8d65200db7..67dfb939a514 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -52,24 +52,6 @@ static int genericbl_get_intensity(struct backlight_device *bd)
return genericbl_intensity;
}
-/*
- * Called when the battery is low to limit the backlight intensity.
- * If limit==0 clear any limit, otherwise limit the intensity
- */
-void genericbl_limit_intensity(int limit)
-{
- struct backlight_device *bd = generic_backlight_device;
-
- mutex_lock(&bd->ops_lock);
- if (limit)
- bd->props.state |= GENERICBL_BATTLOW;
- else
- bd->props.state &= ~GENERICBL_BATTLOW;
- backlight_update_status(generic_backlight_device);
- mutex_unlock(&bd->ops_lock);
-}
-EXPORT_SYMBOL(genericbl_limit_intensity);
-
static const struct backlight_ops genericbl_ops = {
.options = BL_CORE_SUSPENDRESUME,
.get_brightness = genericbl_get_intensity,
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index aaead04a2d54..439feb2389a8 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -151,7 +151,6 @@ static struct of_device_id gpio_backlight_of_match[] = {
static struct platform_driver gpio_backlight_driver = {
.driver = {
.name = "gpio-backlight",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(gpio_backlight_of_match),
},
.probe = gpio_backlight_probe,
diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c
index ea67fe199e34..e7f0890cc211 100644
--- a/drivers/video/backlight/ili922x.c
+++ b/drivers/video/backlight/ili922x.c
@@ -495,17 +495,18 @@ static int ili922x_probe(struct spi_device *spi)
"no LCD found: Chip ID 0x%x, ret %d\n",
reg, ret);
return -ENODEV;
- } else {
- dev_info(&spi->dev, "ILI%x found, SPI freq %d, mode %d\n",
- reg, spi->max_speed_hz, spi->mode);
}
+ dev_info(&spi->dev, "ILI%x found, SPI freq %d, mode %d\n",
+ reg, spi->max_speed_hz, spi->mode);
+
ret = ili922x_read_status(spi, &reg);
if (ret) {
dev_err(&spi->dev, "reading RS failed...\n");
return ret;
- } else
- dev_dbg(&spi->dev, "status: 0x%x\n", reg);
+ }
+
+ dev_dbg(&spi->dev, "status: 0x%x\n", reg);
ili922x_display_init(spi);
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 6ce96b4a8796..7e6ff5346892 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -41,11 +41,11 @@ static int jornada_bl_get_brightness(struct backlight_device *bd)
dev_err(&bd->dev, "get brightness timeout\n");
jornada_ssp_end();
return -ETIMEDOUT;
- } else {
- /* exchange txdummy for value */
- ret = jornada_ssp_byte(TXDUMMY);
}
+ /* exchange txdummy for value */
+ ret = jornada_ssp_byte(TXDUMMY);
+
jornada_ssp_end();
return BL_MAX_BRIGHT - ret;
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index 228bc319de19..dfa0fa0d5c78 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -27,11 +27,7 @@
static int jornada_lcd_get_power(struct lcd_device *ld)
{
- /* LDD2 in PPC = LCD POWER */
- if (PPSR & PPC_LDD2)
- return FB_BLANK_UNBLANK; /* PW ON */
- else
- return FB_BLANK_POWERDOWN; /* PW OFF */
+ return PPSR & PPC_LDD2 ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
}
static int jornada_lcd_get_contrast(struct lcd_device *ld)
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index ccb44e8e4927..f71eaf10c4eb 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -566,11 +566,11 @@ static int ld9040_power_on(struct ld9040 *lcd)
if (!pd->reset) {
dev_err(lcd->dev, "reset is NULL.\n");
return -EINVAL;
- } else {
- pd->reset(lcd->ld);
- msleep(pd->reset_delay);
}
+ pd->reset(lcd->ld);
+ msleep(pd->reset_delay);
+
ret = ld9040_ldi_init(lcd);
if (ret) {
dev_err(lcd->dev, "failed to initialize ldi.\n");
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c
index cff1fbe89a1b..0e2337f367b6 100644
--- a/drivers/video/backlight/lm3533_bl.c
+++ b/drivers/video/backlight/lm3533_bl.c
@@ -397,7 +397,6 @@ static void lm3533_bl_shutdown(struct platform_device *pdev)
static struct platform_driver lm3533_bl_driver = {
.driver = {
.name = "lm3533-backlight",
- .owner = THIS_MODULE,
.pm = &lm3533_bl_pm_ops,
},
.probe = lm3533_bl_probe,
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index 5f36808d214f..cd50df5807ea 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -254,7 +254,6 @@ static void lm3639_torch_brightness_set(struct led_classdev *cdev,
return;
out:
dev_err(pchip->dev, "i2c failed to access register\n");
- return;
}
/* flash */
@@ -293,7 +292,6 @@ static void lm3639_flash_brightness_set(struct led_classdev *cdev,
return;
out:
dev_err(pchip->dev, "i2c failed to access register\n");
- return;
}
static const struct regmap_config lm3639_regmap = {
diff --git a/drivers/video/backlight/lms501kf03.c b/drivers/video/backlight/lms501kf03.c
index 77258b7b04be..7e3810308c3e 100644
--- a/drivers/video/backlight/lms501kf03.c
+++ b/drivers/video/backlight/lms501kf03.c
@@ -232,19 +232,19 @@ static int lms501kf03_power_on(struct lms501kf03 *lcd)
if (!pd->power_on) {
dev_err(lcd->dev, "power_on is NULL.\n");
return -EINVAL;
- } else {
- pd->power_on(lcd->ld, 1);
- msleep(pd->power_on_delay);
}
+ pd->power_on(lcd->ld, 1);
+ msleep(pd->power_on_delay);
+
if (!pd->reset) {
dev_err(lcd->dev, "reset is NULL.\n");
return -EINVAL;
- } else {
- pd->reset(lcd->ld);
- msleep(pd->reset_delay);
}
+ pd->reset(lcd->ld);
+ msleep(pd->reset_delay);
+
ret = lms501kf03_ldi_init(lcd);
if (ret) {
dev_err(lcd->dev, "failed to initialize ldi.\n");
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index dcdd5443efcf..25fb8e3d75b1 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -268,6 +268,7 @@ static int lp855x_bl_update_status(struct backlight_device *bl)
} else if (lp->mode == REGISTER_BASED) {
u8 val = bl->props.brightness;
+
lp855x_write_byte(lp, lp->cfg->reg_brightness, val);
}
@@ -308,6 +309,7 @@ static ssize_t lp855x_get_chip_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lp855x *lp = dev_get_drvdata(dev);
+
return scnprintf(buf, PAGE_SIZE, "%s\n", lp->chipname);
}
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index d6c4f6a2d43e..e418d5b1aa55 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -315,7 +315,6 @@ static struct platform_driver lp8788_bl_driver = {
.remove = lp8788_backlight_remove,
.driver = {
.name = LP8788_DEV_BACKLIGHT,
- .owner = THIS_MODULE,
},
};
module_platform_driver(lp8788_bl_driver);
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 66fa08c920d2..7b738d60ecc2 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -197,7 +197,6 @@ static int max8925_backlight_probe(struct platform_device *pdev)
static struct platform_driver max8925_backlight_driver = {
.driver = {
.name = "max8925-backlight",
- .owner = THIS_MODULE,
},
.probe = max8925_backlight_probe,
};
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index a0dcd88ac74f..546d94df21d5 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -120,6 +120,7 @@ static int omapbl_update_status(struct backlight_device *dev)
static int omapbl_get_intensity(struct backlight_device *dev)
{
struct omap_backlight *bl = bl_get_data(dev);
+
return bl->current_intensity;
}
diff --git a/drivers/video/backlight/ot200_bl.c b/drivers/video/backlight/ot200_bl.c
index f5a5202dd79d..3acdb9f646ed 100644
--- a/drivers/video/backlight/ot200_bl.c
+++ b/drivers/video/backlight/ot200_bl.c
@@ -152,7 +152,6 @@ static int ot200_backlight_remove(struct platform_device *pdev)
static struct platform_driver ot200_backlight_driver = {
.driver = {
.name = "ot200-backlight",
- .owner = THIS_MODULE,
},
.probe = ot200_backlight_probe,
.remove = ot200_backlight_remove,
diff --git a/drivers/video/backlight/pandora_bl.c b/drivers/video/backlight/pandora_bl.c
index 2e3f82063c03..5d8bb8b20183 100644
--- a/drivers/video/backlight/pandora_bl.c
+++ b/drivers/video/backlight/pandora_bl.c
@@ -142,7 +142,6 @@ static int pandora_backlight_probe(struct platform_device *pdev)
static struct platform_driver pandora_backlight_driver = {
.driver = {
.name = "pandora-backlight",
- .owner = THIS_MODULE,
},
.probe = pandora_backlight_probe,
};
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index b95d3b0aaffe..85bd573b6d15 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -90,6 +90,7 @@ static int pcf50633_bl_update_status(struct backlight_device *bl)
static int pcf50633_bl_get_brightness(struct backlight_device *bl)
{
struct pcf50633_bl *pcf_bl = bl_get_data(bl);
+
return pcf_bl->brightness;
}
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
index c3d2e209fc8f..872a3bf21faf 100644
--- a/drivers/video/backlight/platform_lcd.c
+++ b/drivers/video/backlight/platform_lcd.c
@@ -148,7 +148,6 @@ MODULE_DEVICE_TABLE(of, platform_lcd_of_match);
static struct platform_driver platform_lcd_driver = {
.driver = {
.name = "platform-lcd",
- .owner = THIS_MODULE,
.pm = &platform_lcd_pm_ops,
.of_match_table = of_match_ptr(platform_lcd_of_match),
},
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index b85983e97f0a..cb5ae4c08469 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -390,7 +390,6 @@ static const struct dev_pm_ops pwm_backlight_pm_ops = {
static struct platform_driver pwm_backlight_driver = {
.driver = {
.name = "pwm-backlight",
- .owner = THIS_MODULE,
.pm = &pwm_backlight_pm_ops,
.of_match_table = of_match_ptr(pwm_backlight_of_match),
},
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index f3a65c8940ed..28bfa127fee4 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -507,19 +507,19 @@ static int s6e63m0_power_on(struct s6e63m0 *lcd)
if (!pd->power_on) {
dev_err(lcd->dev, "power_on is NULL.\n");
return -EINVAL;
- } else {
- pd->power_on(lcd->ld, 1);
- msleep(pd->power_on_delay);
}
+ pd->power_on(lcd->ld, 1);
+ msleep(pd->power_on_delay);
+
if (!pd->reset) {
dev_err(lcd->dev, "reset is NULL.\n");
return -EINVAL;
- } else {
- pd->reset(lcd->ld);
- msleep(pd->reset_delay);
}
+ pd->reset(lcd->ld);
+ msleep(pd->reset_delay);
+
ret = s6e63m0_ldi_init(lcd);
if (ret) {
dev_err(lcd->dev, "failed to initialize ldi.\n");
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 908016fc5829..30afce33ef2a 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -300,12 +300,14 @@ static int tdo24m_power(struct tdo24m *lcd, int power)
static int tdo24m_set_power(struct lcd_device *ld, int power)
{
struct tdo24m *lcd = lcd_get_data(ld);
+
return tdo24m_power(lcd, power);
}
static int tdo24m_get_power(struct lcd_device *ld)
{
struct tdo24m *lcd = lcd_get_data(ld);
+
return lcd->power;
}
diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
index 2e04d93aa0ef..61d72bffd402 100644
--- a/drivers/video/backlight/tps65217_bl.c
+++ b/drivers/video/backlight/tps65217_bl.c
@@ -323,7 +323,6 @@ static int tps65217_bl_probe(struct platform_device *pdev)
static struct platform_driver tps65217_bl_driver = {
.probe = tps65217_bl_probe,
.driver = {
- .owner = THIS_MODULE,
.name = "tps65217-bl",
},
};
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 8b9455e93069..6eab0d6c262a 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -111,6 +111,7 @@ static int wm831x_backlight_update_status(struct backlight_device *bl)
static int wm831x_backlight_get_brightness(struct backlight_device *bl)
{
struct wm831x_backlight_data *data = bl_get_data(bl);
+
return data->current_brightness;
}
@@ -217,7 +218,6 @@ static int wm831x_backlight_probe(struct platform_device *pdev)
static struct platform_driver wm831x_backlight_driver = {
.driver = {
.name = "wm831x-backlight",
- .owner = THIS_MODULE,
},
.probe = wm831x_backlight_probe,
};
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 901014bbc821..09dc44736c1a 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -684,12 +684,13 @@ static const struct xenbus_device_id xenfb_ids[] = {
{ "" }
};
-static DEFINE_XENBUS_DRIVER(xenfb, ,
+static struct xenbus_driver xenfb_driver = {
+ .ids = xenfb_ids,
.probe = xenfb_probe,
.remove = xenfb_remove,
.resume = xenfb_resume,
.otherend_changed = xenfb_backend_changed,
-);
+};
static int __init xenfb_init(void)
{
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 1d1330a78af3..e3d5bf0a5021 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -95,6 +95,16 @@ config GPIO_WATCHDOG
If you say yes here you get support for watchdog device
controlled through GPIO-line.
+config MENF21BMC_WATCHDOG
+ tristate "MEN 14F021P00 BMC Watchdog"
+ depends on MFD_MENF21BMC
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the MEN 14F021P00 BMC Watchdog.
+
+ This driver can also be built as a module. If so the module
+ will be called menf21bmc_wdt.
+
config WM831X_WATCHDOG
tristate "WM831x watchdog"
depends on MFD_WM831X
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 468c3204c3b1..de1701470c14 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -178,3 +178,4 @@ obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o
+obj-$(CONFIG_MENF21BMC_WATCHDOG) += menf21bmc_wdt.o
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 996b2f7d330e..665e0e7dfe1e 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -301,6 +301,28 @@ static struct miscdevice wdt_miscdev = {
.fops = &wdt_fops,
};
+static int wdt_restart_handle(struct notifier_block *this, unsigned long mode,
+ void *cmd)
+{
+ /*
+ * Cobalt devices have no way of rebooting themselves other
+ * than getting the watchdog to pull reset, so we restart the
+ * watchdog on reboot with no heartbeat.
+ */
+ wdt_change(WDT_ENABLE);
+
+ /* loop until the watchdog fires */
+ while (true)
+ ;
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block wdt_restart_handler = {
+ .notifier_call = wdt_restart_handle,
+ .priority = 128,
+};
+
/*
* Notifier for system down
*/
@@ -311,15 +333,6 @@ static int wdt_notify_sys(struct notifier_block *this,
if (code == SYS_DOWN || code == SYS_HALT)
wdt_turnoff();
- if (code == SYS_RESTART) {
- /*
- * Cobalt devices have no way of rebooting themselves other
- * than getting the watchdog to pull reset, so we restart the
- * watchdog on reboot with no heartbeat
- */
- wdt_change(WDT_ENABLE);
- pr_info("Watchdog timer is now enabled with no heartbeat - should reboot in ~1 second\n");
- }
return NOTIFY_DONE;
}
@@ -338,6 +351,7 @@ static void __exit alim7101_wdt_unload(void)
/* Deregister */
misc_deregister(&wdt_miscdev);
unregister_reboot_notifier(&wdt_notifier);
+ unregister_restart_handler(&wdt_restart_handler);
pci_dev_put(alim7101_pmu);
}
@@ -390,11 +404,17 @@ static int __init alim7101_wdt_init(void)
goto err_out;
}
+ rc = register_restart_handler(&wdt_restart_handler);
+ if (rc) {
+ pr_err("cannot register restart handler (err=%d)\n", rc);
+ goto err_out_reboot;
+ }
+
rc = misc_register(&wdt_miscdev);
if (rc) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
wdt_miscdev.minor, rc);
- goto err_out_reboot;
+ goto err_out_restart;
}
if (nowayout)
@@ -404,6 +424,8 @@ static int __init alim7101_wdt_init(void)
timeout, nowayout);
return 0;
+err_out_restart:
+ unregister_restart_handler(&wdt_restart_handler);
err_out_reboot:
unregister_reboot_notifier(&wdt_notifier);
err_out:
diff --git a/drivers/watchdog/menf21bmc_wdt.c b/drivers/watchdog/menf21bmc_wdt.c
new file mode 100644
index 000000000000..2042874d5ce3
--- /dev/null
+++ b/drivers/watchdog/menf21bmc_wdt.c
@@ -0,0 +1,203 @@
+/*
+ * MEN 14F021P00 Board Management Controller (BMC) Watchdog Driver.
+ *
+ * Copyright (C) 2014 MEN Mikro Elektronik Nuernberg GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+
+#define DEVNAME "menf21bmc_wdt"
+
+#define BMC_CMD_WD_ON 0x11
+#define BMC_CMD_WD_OFF 0x12
+#define BMC_CMD_WD_TRIG 0x13
+#define BMC_CMD_WD_TIME 0x14
+#define BMC_CMD_WD_STATE 0x17
+#define BMC_WD_OFF_VAL 0x69
+#define BMC_CMD_RST_RSN 0x92
+
+#define BMC_WD_TIMEOUT_MIN 1 /* in sec */
+#define BMC_WD_TIMEOUT_MAX 6553 /* in sec */
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+struct menf21bmc_wdt {
+ struct watchdog_device wdt;
+ struct i2c_client *i2c_client;
+};
+
+static int menf21bmc_wdt_set_bootstatus(struct menf21bmc_wdt *data)
+{
+ int rst_rsn;
+
+ rst_rsn = i2c_smbus_read_byte_data(data->i2c_client, BMC_CMD_RST_RSN);
+ if (rst_rsn < 0)
+ return rst_rsn;
+
+ if (rst_rsn == 0x02)
+ data->wdt.bootstatus |= WDIOF_CARDRESET;
+ else if (rst_rsn == 0x05)
+ data->wdt.bootstatus |= WDIOF_EXTERN1;
+ else if (rst_rsn == 0x06)
+ data->wdt.bootstatus |= WDIOF_EXTERN2;
+ else if (rst_rsn == 0x0A)
+ data->wdt.bootstatus |= WDIOF_POWERUNDER;
+
+ return 0;
+}
+
+static int menf21bmc_wdt_start(struct watchdog_device *wdt)
+{
+ struct menf21bmc_wdt *drv_data = watchdog_get_drvdata(wdt);
+
+ return i2c_smbus_write_byte(drv_data->i2c_client, BMC_CMD_WD_ON);
+}
+
+static int menf21bmc_wdt_stop(struct watchdog_device *wdt)
+{
+ struct menf21bmc_wdt *drv_data = watchdog_get_drvdata(wdt);
+
+ return i2c_smbus_write_byte_data(drv_data->i2c_client,
+ BMC_CMD_WD_OFF, BMC_WD_OFF_VAL);
+}
+
+static int
+menf21bmc_wdt_settimeout(struct watchdog_device *wdt, unsigned int timeout)
+{
+ int ret;
+ struct menf21bmc_wdt *drv_data = watchdog_get_drvdata(wdt);
+
+ /*
+ * BMC Watchdog does have a resolution of 100ms.
+ * Watchdog API defines the timeout in seconds, so we have to
+ * multiply the value.
+ */
+ ret = i2c_smbus_write_word_data(drv_data->i2c_client,
+ BMC_CMD_WD_TIME, timeout * 10);
+ if (ret < 0)
+ return ret;
+
+ wdt->timeout = timeout;
+
+ return 0;
+}
+
+static int menf21bmc_wdt_ping(struct watchdog_device *wdt)
+{
+ struct menf21bmc_wdt *drv_data = watchdog_get_drvdata(wdt);
+
+ return i2c_smbus_write_byte(drv_data->i2c_client, BMC_CMD_WD_TRIG);
+}
+
+static const struct watchdog_info menf21bmc_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = DEVNAME,
+};
+
+static const struct watchdog_ops menf21bmc_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = menf21bmc_wdt_start,
+ .stop = menf21bmc_wdt_stop,
+ .ping = menf21bmc_wdt_ping,
+ .set_timeout = menf21bmc_wdt_settimeout,
+};
+
+static int menf21bmc_wdt_probe(struct platform_device *pdev)
+{
+ int ret, bmc_timeout;
+ struct menf21bmc_wdt *drv_data;
+ struct i2c_client *i2c_client = to_i2c_client(pdev->dev.parent);
+
+ drv_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct menf21bmc_wdt), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ drv_data->wdt.ops = &menf21bmc_wdt_ops;
+ drv_data->wdt.info = &menf21bmc_wdt_info;
+ drv_data->wdt.min_timeout = BMC_WD_TIMEOUT_MIN;
+ drv_data->wdt.max_timeout = BMC_WD_TIMEOUT_MAX;
+ drv_data->i2c_client = i2c_client;
+
+ /*
+ * Get the current wdt timeout value from the BMC because
+ * the BMC will save the value set before if the system restarts.
+ */
+ bmc_timeout = i2c_smbus_read_word_data(drv_data->i2c_client,
+ BMC_CMD_WD_TIME);
+ if (bmc_timeout < 0) {
+ dev_err(&pdev->dev, "failed to get current WDT timeout\n");
+ return bmc_timeout;
+ }
+
+ watchdog_init_timeout(&drv_data->wdt, bmc_timeout / 10, &pdev->dev);
+ watchdog_set_nowayout(&drv_data->wdt, nowayout);
+ watchdog_set_drvdata(&drv_data->wdt, drv_data);
+ platform_set_drvdata(pdev, drv_data);
+
+ ret = menf21bmc_wdt_set_bootstatus(drv_data);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to set Watchdog bootstatus\n");
+ return ret;
+ }
+
+ ret = watchdog_register_device(&drv_data->wdt);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register Watchdog device\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "MEN 14F021P00 BMC Watchdog device enabled\n");
+
+ return 0;
+}
+
+static int menf21bmc_wdt_remove(struct platform_device *pdev)
+{
+ struct menf21bmc_wdt *drv_data = platform_get_drvdata(pdev);
+
+ dev_warn(&pdev->dev,
+ "Unregister MEN 14F021P00 BMC Watchdog device, board may reset\n");
+
+ watchdog_unregister_device(&drv_data->wdt);
+
+ return 0;
+}
+
+static void menf21bmc_wdt_shutdown(struct platform_device *pdev)
+{
+ struct menf21bmc_wdt *drv_data = platform_get_drvdata(pdev);
+
+ i2c_smbus_write_word_data(drv_data->i2c_client,
+ BMC_CMD_WD_OFF, BMC_WD_OFF_VAL);
+}
+
+static struct platform_driver menf21bmc_wdt = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DEVNAME,
+ },
+ .probe = menf21bmc_wdt_probe,
+ .remove = menf21bmc_wdt_remove,
+ .shutdown = menf21bmc_wdt_shutdown,
+};
+
+module_platform_driver(menf21bmc_wdt);
+
+MODULE_DESCRIPTION("MEN 14F021P00 BMC Watchdog driver");
+MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:menf21bmc_wdt");
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 4aa3a8a876fe..a64405b82596 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -15,12 +15,12 @@
#include <linux/module.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/notifier.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/watchdog.h>
#include <linux/moduleparam.h>
-#include <asm/system_misc.h>
-
#define REG_COUNT 0x4
#define REG_MODE 0x8
#define REG_ENABLE 0xC
@@ -29,17 +29,22 @@ struct moxart_wdt_dev {
struct watchdog_device dev;
void __iomem *base;
unsigned int clock_frequency;
+ struct notifier_block restart_handler;
};
-static struct moxart_wdt_dev *moxart_restart_ctx;
-
static int heartbeat;
-static void moxart_wdt_restart(enum reboot_mode reboot_mode, const char *cmd)
+static int moxart_restart_handle(struct notifier_block *this,
+ unsigned long mode, void *cmd)
{
- writel(1, moxart_restart_ctx->base + REG_COUNT);
- writel(0x5ab9, moxart_restart_ctx->base + REG_MODE);
- writel(0x03, moxart_restart_ctx->base + REG_ENABLE);
+ struct moxart_wdt_dev *moxart_wdt = container_of(this,
+ struct moxart_wdt_dev,
+ restart_handler);
+ writel(1, moxart_wdt->base + REG_COUNT);
+ writel(0x5ab9, moxart_wdt->base + REG_MODE);
+ writel(0x03, moxart_wdt->base + REG_ENABLE);
+
+ return NOTIFY_DONE;
}
static int moxart_wdt_stop(struct watchdog_device *wdt_dev)
@@ -136,8 +141,12 @@ static int moxart_wdt_probe(struct platform_device *pdev)
if (err)
return err;
- moxart_restart_ctx = moxart_wdt;
- arm_pm_restart = moxart_wdt_restart;
+ moxart_wdt->restart_handler.notifier_call = moxart_restart_handle;
+ moxart_wdt->restart_handler.priority = 128;
+ err = register_restart_handler(&moxart_wdt->restart_handler);
+ if (err)
+ dev_err(dev, "cannot register restart notifier (err=%d)\n",
+ err);
dev_dbg(dev, "Watchdog enabled (heartbeat=%d sec, nowayout=%d)\n",
moxart_wdt->dev.timeout, nowayout);
@@ -149,9 +158,8 @@ static int moxart_wdt_remove(struct platform_device *pdev)
{
struct moxart_wdt_dev *moxart_wdt = platform_get_drvdata(pdev);
- arm_pm_restart = NULL;
+ unregister_restart_handler(&moxart_wdt->restart_handler);
moxart_wdt_stop(&moxart_wdt->dev);
- watchdog_unregister_device(&moxart_wdt->dev);
return 0;
}
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index 60deb9d304c0..480bb557f353 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -21,14 +21,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/types.h>
#include <linux/watchdog.h>
-#include <asm/system_misc.h>
-
#define WDT_MAX_TIMEOUT 16
#define WDT_MIN_TIMEOUT 1
#define WDT_MODE_TIMEOUT(n) ((n) << 3)
@@ -50,6 +49,7 @@ static unsigned int timeout = WDT_MAX_TIMEOUT;
struct sunxi_wdt_dev {
struct watchdog_device wdt_dev;
void __iomem *wdt_base;
+ struct notifier_block restart_handler;
};
/*
@@ -74,24 +74,29 @@ static const int wdt_timeout_map[] = {
[16] = 0xB, /* 16s */
};
-static void __iomem *reboot_wdt_base;
-static void sun4i_wdt_restart(enum reboot_mode mode, const char *cmd)
+static int sunxi_restart_handle(struct notifier_block *this, unsigned long mode,
+ void *cmd)
{
+ struct sunxi_wdt_dev *sunxi_wdt = container_of(this,
+ struct sunxi_wdt_dev,
+ restart_handler);
+ void __iomem *wdt_base = sunxi_wdt->wdt_base;
+
/* Enable timer and set reset bit in the watchdog */
- writel(WDT_MODE_EN | WDT_MODE_RST_EN, reboot_wdt_base + WDT_MODE);
+ writel(WDT_MODE_EN | WDT_MODE_RST_EN, wdt_base + WDT_MODE);
/*
* Restart the watchdog. The default (and lowest) interval
* value for the watchdog is 0.5s.
*/
- writel(WDT_CTRL_RELOAD, reboot_wdt_base + WDT_CTRL);
+ writel(WDT_CTRL_RELOAD, wdt_base + WDT_CTRL);
while (1) {
mdelay(5);
- writel(WDT_MODE_EN | WDT_MODE_RST_EN,
- reboot_wdt_base + WDT_MODE);
+ writel(WDT_MODE_EN | WDT_MODE_RST_EN, wdt_base + WDT_MODE);
}
+ return NOTIFY_DONE;
}
static int sunxi_wdt_ping(struct watchdog_device *wdt_dev)
@@ -205,8 +210,12 @@ static int sunxi_wdt_probe(struct platform_device *pdev)
if (unlikely(err))
return err;
- reboot_wdt_base = sunxi_wdt->wdt_base;
- arm_pm_restart = sun4i_wdt_restart;
+ sunxi_wdt->restart_handler.notifier_call = sunxi_restart_handle;
+ sunxi_wdt->restart_handler.priority = 128;
+ err = register_restart_handler(&sunxi_wdt->restart_handler);
+ if (err)
+ dev_err(&pdev->dev,
+ "cannot register restart handler (err=%d)\n", err);
dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)",
sunxi_wdt->wdt_dev.timeout, nowayout);
@@ -218,7 +227,7 @@ static int sunxi_wdt_remove(struct platform_device *pdev)
{
struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev);
- arm_pm_restart = NULL;
+ unregister_restart_handler(&sunxi_wdt->restart_handler);
watchdog_unregister_device(&sunxi_wdt->wdt_dev);
watchdog_set_drvdata(&sunxi_wdt->wdt_dev, NULL);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 8bc01838daf9..b812462083fc 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -172,6 +172,15 @@ config XEN_PCIDEV_BACKEND
If in doubt, say m.
+config XEN_SCSI_BACKEND
+ tristate "XEN SCSI backend driver"
+ depends on XEN && XEN_BACKEND && TARGET_CORE
+ help
+ The SCSI backend driver allows the kernel to export its SCSI Devices
+ to other guests via a high-performance shared-memory interface.
+ Only needed for systems running as XEN driver domains (e.g. Dom0) and
+ if guests need generic access to SCSI devices.
+
config XEN_PRIVCMD
tristate
depends on XEN
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 84044b554e33..2140398a2a8c 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_XEN_ACPI_HOTPLUG_MEMORY) += xen-acpi-memhotplug.o
obj-$(CONFIG_XEN_ACPI_HOTPLUG_CPU) += xen-acpi-cpuhotplug.o
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
obj-$(CONFIG_XEN_EFI) += efi.o
+obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
xen-gntalloc-y := gntalloc.o
diff --git a/drivers/xen/efi.c b/drivers/xen/efi.c
index 31f618a49661..1f850c97482f 100644
--- a/drivers/xen/efi.c
+++ b/drivers/xen/efi.c
@@ -27,6 +27,8 @@
#include <xen/interface/platform.h>
#include <xen/xen.h>
+#include <asm/page.h>
+
#include <asm/xen/hypercall.h>
#define INIT_EFI_OP(name) \
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 5b5c5ff273fd..b4bca2d4a7e5 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -900,8 +900,8 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
return irq;
}
-static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
- unsigned int remote_port)
+int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
+ unsigned int remote_port)
{
struct evtchn_bind_interdomain bind_interdomain;
int err;
@@ -914,6 +914,7 @@ static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
}
+EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
static int find_virq(unsigned int virq, unsigned int cpu)
{
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index c254ae036f18..7786291ba229 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -592,7 +592,7 @@ static int grow_gnttab_list(unsigned int more_frames)
return 0;
grow_nomem:
- for ( ; i >= nr_glist_frames; i--)
+ while (i-- > nr_glist_frames)
free_page((unsigned long) gnttab_list[i]);
return -ENOMEM;
}
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index c214daab4829..ad8d30c088fe 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -719,11 +719,13 @@ static const struct xenbus_device_id xen_pcibk_ids[] = {
{""},
};
-static DEFINE_XENBUS_DRIVER(xen_pcibk, DRV_NAME,
+static struct xenbus_driver xen_pcibk_driver = {
+ .name = DRV_NAME,
+ .ids = xen_pcibk_ids,
.probe = xen_pcibk_xenbus_probe,
.remove = xen_pcibk_xenbus_remove,
.otherend_changed = xen_pcibk_frontend_changed,
-);
+};
const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend;
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
new file mode 100644
index 000000000000..3e32146472a5
--- /dev/null
+++ b/drivers/xen/xen-scsiback.c
@@ -0,0 +1,2126 @@
+/*
+ * Xen SCSI backend driver
+ *
+ * Copyright (c) 2008, FUJITSU Limited
+ *
+ * Based on the blkback driver code.
+ * Adaption to kernel taget core infrastructure taken from vhost/scsi.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <stdarg.h>
+
+#include <linux/module.h>
+#include <linux/utsname.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/gfp.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/configfs.h>
+
+#include <generated/utsrelease.h>
+
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+#include <target/target_core_fabric_configfs.h>
+
+#include <asm/hypervisor.h>
+
+#include <xen/xen.h>
+#include <xen/balloon.h>
+#include <xen/events.h>
+#include <xen/xenbus.h>
+#include <xen/grant_table.h>
+#include <xen/page.h>
+
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/vscsiif.h>
+
+#define DPRINTK(_f, _a...) \
+ pr_debug("(file=%s, line=%d) " _f, __FILE__ , __LINE__ , ## _a)
+
+#define VSCSI_VERSION "v0.1"
+#define VSCSI_NAMELEN 32
+
+struct ids_tuple {
+ unsigned int hst; /* host */
+ unsigned int chn; /* channel */
+ unsigned int tgt; /* target */
+ unsigned int lun; /* LUN */
+};
+
+struct v2p_entry {
+ struct ids_tuple v; /* translate from */
+ struct scsiback_tpg *tpg; /* translate to */
+ unsigned int lun;
+ struct kref kref;
+ struct list_head l;
+};
+
+struct vscsibk_info {
+ struct xenbus_device *dev;
+
+ domid_t domid;
+ unsigned int irq;
+
+ struct vscsiif_back_ring ring;
+ int ring_error;
+
+ spinlock_t ring_lock;
+ atomic_t nr_unreplied_reqs;
+
+ spinlock_t v2p_lock;
+ struct list_head v2p_entry_lists;
+
+ wait_queue_head_t waiting_to_free;
+};
+
+/* theoretical maximum of grants for one request */
+#define VSCSI_MAX_GRANTS (SG_ALL + VSCSIIF_SG_TABLESIZE)
+
+/*
+ * VSCSI_GRANT_BATCH is the maximum number of grants to be processed in one
+ * call to map/unmap grants. Don't choose it too large, as there are arrays
+ * with VSCSI_GRANT_BATCH elements allocated on the stack.
+ */
+#define VSCSI_GRANT_BATCH 16
+
+struct vscsibk_pend {
+ uint16_t rqid;
+
+ uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
+ uint8_t cmd_len;
+
+ uint8_t sc_data_direction;
+ uint16_t n_sg; /* real length of SG list */
+ uint16_t n_grants; /* SG pages and potentially SG list */
+ uint32_t data_len;
+ uint32_t result;
+
+ struct vscsibk_info *info;
+ struct v2p_entry *v2p;
+ struct scatterlist *sgl;
+
+ uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
+
+ grant_handle_t grant_handles[VSCSI_MAX_GRANTS];
+ struct page *pages[VSCSI_MAX_GRANTS];
+
+ struct se_cmd se_cmd;
+};
+
+struct scsiback_tmr {
+ atomic_t tmr_complete;
+ wait_queue_head_t tmr_wait;
+};
+
+struct scsiback_nexus {
+ /* Pointer to TCM session for I_T Nexus */
+ struct se_session *tvn_se_sess;
+};
+
+struct scsiback_tport {
+ /* SCSI protocol the tport is providing */
+ u8 tport_proto_id;
+ /* Binary World Wide unique Port Name for pvscsi Target port */
+ u64 tport_wwpn;
+ /* ASCII formatted WWPN for pvscsi Target port */
+ char tport_name[VSCSI_NAMELEN];
+ /* Returned by scsiback_make_tport() */
+ struct se_wwn tport_wwn;
+};
+
+struct scsiback_tpg {
+ /* scsiback port target portal group tag for TCM */
+ u16 tport_tpgt;
+ /* track number of TPG Port/Lun Links wrt explicit I_T Nexus shutdown */
+ int tv_tpg_port_count;
+ /* xen-pvscsi references to tpg_nexus, protected by tv_tpg_mutex */
+ int tv_tpg_fe_count;
+ /* list for scsiback_list */
+ struct list_head tv_tpg_list;
+ /* Used to protect access for tpg_nexus */
+ struct mutex tv_tpg_mutex;
+ /* Pointer to the TCM pvscsi I_T Nexus for this TPG endpoint */
+ struct scsiback_nexus *tpg_nexus;
+ /* Pointer back to scsiback_tport */
+ struct scsiback_tport *tport;
+ /* Returned by scsiback_make_tpg() */
+ struct se_portal_group se_tpg;
+ /* alias used in xenstore */
+ char param_alias[VSCSI_NAMELEN];
+ /* list of info structures related to this target portal group */
+ struct list_head info_list;
+};
+
+#define SCSIBACK_INVALID_HANDLE (~0)
+
+static bool log_print_stat;
+module_param(log_print_stat, bool, 0644);
+
+static int scsiback_max_buffer_pages = 1024;
+module_param_named(max_buffer_pages, scsiback_max_buffer_pages, int, 0644);
+MODULE_PARM_DESC(max_buffer_pages,
+"Maximum number of free pages to keep in backend buffer");
+
+static struct kmem_cache *scsiback_cachep;
+static DEFINE_SPINLOCK(free_pages_lock);
+static int free_pages_num;
+static LIST_HEAD(scsiback_free_pages);
+
+/* Global spinlock to protect scsiback TPG list */
+static DEFINE_MUTEX(scsiback_mutex);
+static LIST_HEAD(scsiback_list);
+
+/* Local pointer to allocated TCM configfs fabric module */
+static struct target_fabric_configfs *scsiback_fabric_configfs;
+
+static void scsiback_get(struct vscsibk_info *info)
+{
+ atomic_inc(&info->nr_unreplied_reqs);
+}
+
+static void scsiback_put(struct vscsibk_info *info)
+{
+ if (atomic_dec_and_test(&info->nr_unreplied_reqs))
+ wake_up(&info->waiting_to_free);
+}
+
+static void put_free_pages(struct page **page, int num)
+{
+ unsigned long flags;
+ int i = free_pages_num + num, n = num;
+
+ if (num == 0)
+ return;
+ if (i > scsiback_max_buffer_pages) {
+ n = min(num, i - scsiback_max_buffer_pages);
+ free_xenballooned_pages(n, page + num - n);
+ n = num - n;
+ }
+ spin_lock_irqsave(&free_pages_lock, flags);
+ for (i = 0; i < n; i++)
+ list_add(&page[i]->lru, &scsiback_free_pages);
+ free_pages_num += n;
+ spin_unlock_irqrestore(&free_pages_lock, flags);
+}
+
+static int get_free_page(struct page **page)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&free_pages_lock, flags);
+ if (list_empty(&scsiback_free_pages)) {
+ spin_unlock_irqrestore(&free_pages_lock, flags);
+ return alloc_xenballooned_pages(1, page, false);
+ }
+ page[0] = list_first_entry(&scsiback_free_pages, struct page, lru);
+ list_del(&page[0]->lru);
+ free_pages_num--;
+ spin_unlock_irqrestore(&free_pages_lock, flags);
+ return 0;
+}
+
+static unsigned long vaddr_page(struct page *page)
+{
+ unsigned long pfn = page_to_pfn(page);
+
+ return (unsigned long)pfn_to_kaddr(pfn);
+}
+
+static unsigned long vaddr(struct vscsibk_pend *req, int seg)
+{
+ return vaddr_page(req->pages[seg]);
+}
+
+static void scsiback_print_status(char *sense_buffer, int errors,
+ struct vscsibk_pend *pending_req)
+{
+ struct scsiback_tpg *tpg = pending_req->v2p->tpg;
+
+ pr_err("xen-pvscsi[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x drv=%02x\n",
+ tpg->tport->tport_name, pending_req->v2p->lun,
+ pending_req->cmnd[0], status_byte(errors), msg_byte(errors),
+ host_byte(errors), driver_byte(errors));
+
+ if (CHECK_CONDITION & status_byte(errors))
+ __scsi_print_sense("xen-pvscsi", sense_buffer,
+ SCSI_SENSE_BUFFERSIZE);
+}
+
+static void scsiback_fast_flush_area(struct vscsibk_pend *req)
+{
+ struct gnttab_unmap_grant_ref unmap[VSCSI_GRANT_BATCH];
+ struct page *pages[VSCSI_GRANT_BATCH];
+ unsigned int i, invcount = 0;
+ grant_handle_t handle;
+ int err;
+
+ kfree(req->sgl);
+ req->sgl = NULL;
+ req->n_sg = 0;
+
+ if (!req->n_grants)
+ return;
+
+ for (i = 0; i < req->n_grants; i++) {
+ handle = req->grant_handles[i];
+ if (handle == SCSIBACK_INVALID_HANDLE)
+ continue;
+ gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
+ GNTMAP_host_map, handle);
+ req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
+ pages[invcount] = req->pages[i];
+ put_page(pages[invcount]);
+ invcount++;
+ if (invcount < VSCSI_GRANT_BATCH)
+ continue;
+ err = gnttab_unmap_refs(unmap, NULL, pages, invcount);
+ BUG_ON(err);
+ invcount = 0;
+ }
+
+ if (invcount) {
+ err = gnttab_unmap_refs(unmap, NULL, pages, invcount);
+ BUG_ON(err);
+ }
+
+ put_free_pages(req->pages, req->n_grants);
+ req->n_grants = 0;
+}
+
+static void scsiback_free_translation_entry(struct kref *kref)
+{
+ struct v2p_entry *entry = container_of(kref, struct v2p_entry, kref);
+ struct scsiback_tpg *tpg = entry->tpg;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tpg->tv_tpg_fe_count--;
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ kfree(entry);
+}
+
+static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
+ uint32_t resid, struct vscsibk_pend *pending_req)
+{
+ struct vscsiif_response *ring_res;
+ struct vscsibk_info *info = pending_req->info;
+ int notify;
+ struct scsi_sense_hdr sshdr;
+ unsigned long flags;
+ unsigned len;
+
+ spin_lock_irqsave(&info->ring_lock, flags);
+
+ ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
+ info->ring.rsp_prod_pvt++;
+
+ ring_res->rslt = result;
+ ring_res->rqid = pending_req->rqid;
+
+ if (sense_buffer != NULL &&
+ scsi_normalize_sense(sense_buffer, VSCSIIF_SENSE_BUFFERSIZE,
+ &sshdr)) {
+ len = min_t(unsigned, 8 + sense_buffer[7],
+ VSCSIIF_SENSE_BUFFERSIZE);
+ memcpy(ring_res->sense_buffer, sense_buffer, len);
+ ring_res->sense_len = len;
+ } else {
+ ring_res->sense_len = 0;
+ }
+
+ ring_res->residual_len = resid;
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
+ spin_unlock_irqrestore(&info->ring_lock, flags);
+
+ if (notify)
+ notify_remote_via_irq(info->irq);
+
+ if (pending_req->v2p)
+ kref_put(&pending_req->v2p->kref,
+ scsiback_free_translation_entry);
+}
+
+static void scsiback_cmd_done(struct vscsibk_pend *pending_req)
+{
+ struct vscsibk_info *info = pending_req->info;
+ unsigned char *sense_buffer;
+ unsigned int resid;
+ int errors;
+
+ sense_buffer = pending_req->sense_buffer;
+ resid = pending_req->se_cmd.residual_count;
+ errors = pending_req->result;
+
+ if (errors && log_print_stat)
+ scsiback_print_status(sense_buffer, errors, pending_req);
+
+ scsiback_fast_flush_area(pending_req);
+ scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
+ scsiback_put(info);
+}
+
+static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
+{
+ struct se_cmd *se_cmd = &pending_req->se_cmd;
+ struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess;
+ int rc;
+
+ memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
+
+ memset(se_cmd, 0, sizeof(*se_cmd));
+
+ scsiback_get(pending_req->info);
+ rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd,
+ pending_req->sense_buffer, pending_req->v2p->lun,
+ pending_req->data_len, 0,
+ pending_req->sc_data_direction, 0,
+ pending_req->sgl, pending_req->n_sg,
+ NULL, 0, NULL, 0);
+ if (rc < 0) {
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+ transport_generic_free_cmd(se_cmd, 0);
+ }
+}
+
+static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
+ struct page **pg, grant_handle_t *grant, int cnt)
+{
+ int err, i;
+
+ if (!cnt)
+ return 0;
+
+ err = gnttab_map_refs(map, NULL, pg, cnt);
+ BUG_ON(err);
+ for (i = 0; i < cnt; i++) {
+ if (unlikely(map[i].status != GNTST_okay)) {
+ pr_err("xen-pvscsi: invalid buffer -- could not remap it\n");
+ map[i].handle = SCSIBACK_INVALID_HANDLE;
+ err = -ENOMEM;
+ } else {
+ get_page(pg[i]);
+ }
+ grant[i] = map[i].handle;
+ }
+ return err;
+}
+
+static int scsiback_gnttab_data_map_list(struct vscsibk_pend *pending_req,
+ struct scsiif_request_segment *seg, struct page **pg,
+ grant_handle_t *grant, int cnt, u32 flags)
+{
+ int mapcount = 0, i, err = 0;
+ struct gnttab_map_grant_ref map[VSCSI_GRANT_BATCH];
+ struct vscsibk_info *info = pending_req->info;
+
+ for (i = 0; i < cnt; i++) {
+ if (get_free_page(pg + mapcount)) {
+ put_free_pages(pg, mapcount);
+ pr_err("xen-pvscsi: no grant page\n");
+ return -ENOMEM;
+ }
+ gnttab_set_map_op(&map[mapcount], vaddr_page(pg[mapcount]),
+ flags, seg[i].gref, info->domid);
+ mapcount++;
+ if (mapcount < VSCSI_GRANT_BATCH)
+ continue;
+ err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount);
+ pg += mapcount;
+ grant += mapcount;
+ pending_req->n_grants += mapcount;
+ if (err)
+ return err;
+ mapcount = 0;
+ }
+ err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount);
+ pending_req->n_grants += mapcount;
+ return err;
+}
+
+static int scsiback_gnttab_data_map(struct vscsiif_request *ring_req,
+ struct vscsibk_pend *pending_req)
+{
+ u32 flags;
+ int i, err, n_segs, i_seg = 0;
+ struct page **pg;
+ struct scsiif_request_segment *seg;
+ unsigned long end_seg = 0;
+ unsigned int nr_segments = (unsigned int)ring_req->nr_segments;
+ unsigned int nr_sgl = 0;
+ struct scatterlist *sg;
+ grant_handle_t *grant;
+
+ pending_req->n_sg = 0;
+ pending_req->n_grants = 0;
+ pending_req->data_len = 0;
+
+ nr_segments &= ~VSCSIIF_SG_GRANT;
+ if (!nr_segments)
+ return 0;
+
+ if (nr_segments > VSCSIIF_SG_TABLESIZE) {
+ DPRINTK("xen-pvscsi: invalid parameter nr_seg = %d\n",
+ ring_req->nr_segments);
+ return -EINVAL;
+ }
+
+ if (ring_req->nr_segments & VSCSIIF_SG_GRANT) {
+ err = scsiback_gnttab_data_map_list(pending_req, ring_req->seg,
+ pending_req->pages, pending_req->grant_handles,
+ nr_segments, GNTMAP_host_map | GNTMAP_readonly);
+ if (err)
+ return err;
+ nr_sgl = nr_segments;
+ nr_segments = 0;
+ for (i = 0; i < nr_sgl; i++) {
+ n_segs = ring_req->seg[i].length /
+ sizeof(struct scsiif_request_segment);
+ if ((unsigned)ring_req->seg[i].offset +
+ (unsigned)ring_req->seg[i].length > PAGE_SIZE ||
+ n_segs * sizeof(struct scsiif_request_segment) !=
+ ring_req->seg[i].length)
+ return -EINVAL;
+ nr_segments += n_segs;
+ }
+ if (nr_segments > SG_ALL) {
+ DPRINTK("xen-pvscsi: invalid nr_seg = %d\n",
+ nr_segments);
+ return -EINVAL;
+ }
+ }
+
+ /* free of (sgl) in fast_flush_area()*/
+ pending_req->sgl = kmalloc_array(nr_segments,
+ sizeof(struct scatterlist), GFP_KERNEL);
+ if (!pending_req->sgl)
+ return -ENOMEM;
+
+ sg_init_table(pending_req->sgl, nr_segments);
+ pending_req->n_sg = nr_segments;
+
+ flags = GNTMAP_host_map;
+ if (pending_req->sc_data_direction == DMA_TO_DEVICE)
+ flags |= GNTMAP_readonly;
+
+ pg = pending_req->pages + nr_sgl;
+ grant = pending_req->grant_handles + nr_sgl;
+ if (!nr_sgl) {
+ seg = ring_req->seg;
+ err = scsiback_gnttab_data_map_list(pending_req, seg,
+ pg, grant, nr_segments, flags);
+ if (err)
+ return err;
+ } else {
+ for (i = 0; i < nr_sgl; i++) {
+ seg = (struct scsiif_request_segment *)(
+ vaddr(pending_req, i) + ring_req->seg[i].offset);
+ n_segs = ring_req->seg[i].length /
+ sizeof(struct scsiif_request_segment);
+ err = scsiback_gnttab_data_map_list(pending_req, seg,
+ pg, grant, n_segs, flags);
+ if (err)
+ return err;
+ pg += n_segs;
+ grant += n_segs;
+ }
+ end_seg = vaddr(pending_req, 0) + ring_req->seg[0].offset;
+ seg = (struct scsiif_request_segment *)end_seg;
+ end_seg += ring_req->seg[0].length;
+ pg = pending_req->pages + nr_sgl;
+ }
+
+ for_each_sg(pending_req->sgl, sg, nr_segments, i) {
+ sg_set_page(sg, pg[i], seg->length, seg->offset);
+ pending_req->data_len += seg->length;
+ seg++;
+ if (nr_sgl && (unsigned long)seg >= end_seg) {
+ i_seg++;
+ end_seg = vaddr(pending_req, i_seg) +
+ ring_req->seg[i_seg].offset;
+ seg = (struct scsiif_request_segment *)end_seg;
+ end_seg += ring_req->seg[i_seg].length;
+ }
+ if (sg->offset >= PAGE_SIZE ||
+ sg->length > PAGE_SIZE ||
+ sg->offset + sg->length > PAGE_SIZE)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void scsiback_disconnect(struct vscsibk_info *info)
+{
+ wait_event(info->waiting_to_free,
+ atomic_read(&info->nr_unreplied_reqs) == 0);
+
+ unbind_from_irqhandler(info->irq, info);
+ info->irq = 0;
+ xenbus_unmap_ring_vfree(info->dev, info->ring.sring);
+}
+
+static void scsiback_device_action(struct vscsibk_pend *pending_req,
+ enum tcm_tmreq_table act, int tag)
+{
+ int rc, err = FAILED;
+ struct scsiback_tpg *tpg = pending_req->v2p->tpg;
+ struct se_cmd *se_cmd = &pending_req->se_cmd;
+ struct scsiback_tmr *tmr;
+
+ tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL);
+ if (!tmr)
+ goto out;
+
+ init_waitqueue_head(&tmr->tmr_wait);
+
+ transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo,
+ tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, MSG_SIMPLE_TAG,
+ &pending_req->sense_buffer[0]);
+
+ rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL);
+ if (rc < 0)
+ goto out;
+
+ se_cmd->se_tmr_req->ref_task_tag = tag;
+
+ if (transport_lookup_tmr_lun(se_cmd, pending_req->v2p->lun) < 0)
+ goto out;
+
+ transport_generic_handle_tmr(se_cmd);
+ wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete));
+
+ err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
+ SUCCESS : FAILED;
+
+out:
+ if (tmr) {
+ transport_generic_free_cmd(&pending_req->se_cmd, 1);
+ kfree(tmr);
+ }
+
+ scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
+
+ kmem_cache_free(scsiback_cachep, pending_req);
+}
+
+/*
+ Perform virtual to physical translation
+*/
+static struct v2p_entry *scsiback_do_translation(struct vscsibk_info *info,
+ struct ids_tuple *v)
+{
+ struct v2p_entry *entry;
+ struct list_head *head = &(info->v2p_entry_lists);
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->v2p_lock, flags);
+ list_for_each_entry(entry, head, l) {
+ if ((entry->v.chn == v->chn) &&
+ (entry->v.tgt == v->tgt) &&
+ (entry->v.lun == v->lun)) {
+ kref_get(&entry->kref);
+ goto out;
+ }
+ }
+ entry = NULL;
+
+out:
+ spin_unlock_irqrestore(&info->v2p_lock, flags);
+ return entry;
+}
+
+static int prepare_pending_reqs(struct vscsibk_info *info,
+ struct vscsiif_request *ring_req,
+ struct vscsibk_pend *pending_req)
+{
+ struct v2p_entry *v2p;
+ struct ids_tuple vir;
+
+ pending_req->rqid = ring_req->rqid;
+ pending_req->info = info;
+
+ vir.chn = ring_req->channel;
+ vir.tgt = ring_req->id;
+ vir.lun = ring_req->lun;
+
+ v2p = scsiback_do_translation(info, &vir);
+ if (!v2p) {
+ pending_req->v2p = NULL;
+ DPRINTK("xen-pvscsi: doesn't exist.\n");
+ return -ENODEV;
+ }
+ pending_req->v2p = v2p;
+
+ /* request range check from frontend */
+ pending_req->sc_data_direction = ring_req->sc_data_direction;
+ if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
+ (pending_req->sc_data_direction != DMA_TO_DEVICE) &&
+ (pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
+ (pending_req->sc_data_direction != DMA_NONE)) {
+ DPRINTK("xen-pvscsi: invalid parameter data_dir = %d\n",
+ pending_req->sc_data_direction);
+ return -EINVAL;
+ }
+
+ pending_req->cmd_len = ring_req->cmd_len;
+ if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
+ DPRINTK("xen-pvscsi: invalid parameter cmd_len = %d\n",
+ pending_req->cmd_len);
+ return -EINVAL;
+ }
+ memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
+
+ return 0;
+}
+
+static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+{
+ struct vscsiif_back_ring *ring = &info->ring;
+ struct vscsiif_request *ring_req;
+ struct vscsibk_pend *pending_req;
+ RING_IDX rc, rp;
+ int err, more_to_do;
+ uint32_t result;
+ uint8_t act;
+
+ rc = ring->req_cons;
+ rp = ring->sring->req_prod;
+ rmb(); /* guest system is accessing ring, too */
+
+ if (RING_REQUEST_PROD_OVERFLOW(ring, rp)) {
+ rc = ring->rsp_prod_pvt;
+ pr_warn("xen-pvscsi: Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n",
+ info->domid, rp, rc, rp - rc);
+ info->ring_error = 1;
+ return 0;
+ }
+
+ while ((rc != rp)) {
+ if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
+ break;
+ pending_req = kmem_cache_alloc(scsiback_cachep, GFP_KERNEL);
+ if (!pending_req)
+ return 1;
+
+ ring_req = RING_GET_REQUEST(ring, rc);
+ ring->req_cons = ++rc;
+
+ act = ring_req->act;
+ err = prepare_pending_reqs(info, ring_req, pending_req);
+ if (err) {
+ switch (err) {
+ case -ENODEV:
+ result = DID_NO_CONNECT;
+ break;
+ default:
+ result = DRIVER_ERROR;
+ break;
+ }
+ scsiback_do_resp_with_sense(NULL, result << 24, 0,
+ pending_req);
+ kmem_cache_free(scsiback_cachep, pending_req);
+ return 1;
+ }
+
+ switch (act) {
+ case VSCSIIF_ACT_SCSI_CDB:
+ if (scsiback_gnttab_data_map(ring_req, pending_req)) {
+ scsiback_fast_flush_area(pending_req);
+ scsiback_do_resp_with_sense(NULL,
+ DRIVER_ERROR << 24, 0, pending_req);
+ kmem_cache_free(scsiback_cachep, pending_req);
+ } else {
+ scsiback_cmd_exec(pending_req);
+ }
+ break;
+ case VSCSIIF_ACT_SCSI_ABORT:
+ scsiback_device_action(pending_req, TMR_ABORT_TASK,
+ ring_req->ref_rqid);
+ break;
+ case VSCSIIF_ACT_SCSI_RESET:
+ scsiback_device_action(pending_req, TMR_LUN_RESET, 0);
+ break;
+ default:
+ pr_err_ratelimited("xen-pvscsi: invalid request\n");
+ scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24,
+ 0, pending_req);
+ kmem_cache_free(scsiback_cachep, pending_req);
+ break;
+ }
+
+ /* Yield point for this unbounded loop. */
+ cond_resched();
+ }
+
+ RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
+ return more_to_do;
+}
+
+static irqreturn_t scsiback_irq_fn(int irq, void *dev_id)
+{
+ struct vscsibk_info *info = dev_id;
+
+ if (info->ring_error)
+ return IRQ_HANDLED;
+
+ while (scsiback_do_cmd_fn(info))
+ cond_resched();
+
+ return IRQ_HANDLED;
+}
+
+static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref,
+ evtchn_port_t evtchn)
+{
+ void *area;
+ struct vscsiif_sring *sring;
+ int err;
+
+ if (info->irq)
+ return -1;
+
+ err = xenbus_map_ring_valloc(info->dev, ring_ref, &area);
+ if (err)
+ return err;
+
+ sring = (struct vscsiif_sring *)area;
+ BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
+
+ err = bind_interdomain_evtchn_to_irq(info->domid, evtchn);
+ if (err < 0)
+ goto unmap_page;
+
+ info->irq = err;
+
+ err = request_threaded_irq(info->irq, NULL, scsiback_irq_fn,
+ IRQF_ONESHOT, "vscsiif-backend", info);
+ if (err)
+ goto free_irq;
+
+ return 0;
+
+free_irq:
+ unbind_from_irqhandler(info->irq, info);
+ info->irq = 0;
+unmap_page:
+ xenbus_unmap_ring_vfree(info->dev, area);
+
+ return err;
+}
+
+static int scsiback_map(struct vscsibk_info *info)
+{
+ struct xenbus_device *dev = info->dev;
+ unsigned int ring_ref, evtchn;
+ int err;
+
+ err = xenbus_gather(XBT_NIL, dev->otherend,
+ "ring-ref", "%u", &ring_ref,
+ "event-channel", "%u", &evtchn, NULL);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "reading %s ring", dev->otherend);
+ return err;
+ }
+
+ return scsiback_init_sring(info, ring_ref, evtchn);
+}
+
+/*
+ Add a new translation entry
+*/
+static int scsiback_add_translation_entry(struct vscsibk_info *info,
+ char *phy, struct ids_tuple *v)
+{
+ int err = 0;
+ struct v2p_entry *entry;
+ struct v2p_entry *new;
+ struct list_head *head = &(info->v2p_entry_lists);
+ unsigned long flags;
+ char *lunp;
+ unsigned int lun;
+ struct scsiback_tpg *tpg_entry, *tpg = NULL;
+ char *error = "doesn't exist";
+
+ lunp = strrchr(phy, ':');
+ if (!lunp) {
+ pr_err("xen-pvscsi: illegal format of physical device %s\n",
+ phy);
+ return -EINVAL;
+ }
+ *lunp = 0;
+ lunp++;
+ if (kstrtouint(lunp, 10, &lun) || lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
+ pr_err("xen-pvscsi: lun number not valid: %s\n", lunp);
+ return -EINVAL;
+ }
+
+ mutex_lock(&scsiback_mutex);
+ list_for_each_entry(tpg_entry, &scsiback_list, tv_tpg_list) {
+ if (!strcmp(phy, tpg_entry->tport->tport_name) ||
+ !strcmp(phy, tpg_entry->param_alias)) {
+ spin_lock(&tpg_entry->se_tpg.tpg_lun_lock);
+ if (tpg_entry->se_tpg.tpg_lun_list[lun]->lun_status ==
+ TRANSPORT_LUN_STATUS_ACTIVE) {
+ if (!tpg_entry->tpg_nexus)
+ error = "nexus undefined";
+ else
+ tpg = tpg_entry;
+ }
+ spin_unlock(&tpg_entry->se_tpg.tpg_lun_lock);
+ break;
+ }
+ }
+ if (tpg) {
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tpg->tv_tpg_fe_count++;
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ }
+ mutex_unlock(&scsiback_mutex);
+
+ if (!tpg) {
+ pr_err("xen-pvscsi: %s:%d %s\n", phy, lun, error);
+ return -ENODEV;
+ }
+
+ new = kmalloc(sizeof(struct v2p_entry), GFP_KERNEL);
+ if (new == NULL) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ spin_lock_irqsave(&info->v2p_lock, flags);
+
+ /* Check double assignment to identical virtual ID */
+ list_for_each_entry(entry, head, l) {
+ if ((entry->v.chn == v->chn) &&
+ (entry->v.tgt == v->tgt) &&
+ (entry->v.lun == v->lun)) {
+ pr_warn("xen-pvscsi: Virtual ID is already used. Assignment was not performed.\n");
+ err = -EEXIST;
+ goto out;
+ }
+
+ }
+
+ /* Create a new translation entry and add to the list */
+ kref_init(&new->kref);
+ new->v = *v;
+ new->tpg = tpg;
+ new->lun = lun;
+ list_add_tail(&new->l, head);
+
+out:
+ spin_unlock_irqrestore(&info->v2p_lock, flags);
+
+out_free:
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tpg->tv_tpg_fe_count--;
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ if (err)
+ kfree(new);
+
+ return err;
+}
+
+static void __scsiback_del_translation_entry(struct v2p_entry *entry)
+{
+ list_del(&entry->l);
+ kref_put(&entry->kref, scsiback_free_translation_entry);
+}
+
+/*
+ Delete the translation entry specfied
+*/
+static int scsiback_del_translation_entry(struct vscsibk_info *info,
+ struct ids_tuple *v)
+{
+ struct v2p_entry *entry;
+ struct list_head *head = &(info->v2p_entry_lists);
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->v2p_lock, flags);
+ /* Find out the translation entry specified */
+ list_for_each_entry(entry, head, l) {
+ if ((entry->v.chn == v->chn) &&
+ (entry->v.tgt == v->tgt) &&
+ (entry->v.lun == v->lun)) {
+ goto found;
+ }
+ }
+
+ spin_unlock_irqrestore(&info->v2p_lock, flags);
+ return 1;
+
+found:
+ /* Delete the translation entry specfied */
+ __scsiback_del_translation_entry(entry);
+
+ spin_unlock_irqrestore(&info->v2p_lock, flags);
+ return 0;
+}
+
+static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
+ char *phy, struct ids_tuple *vir)
+{
+ if (!scsiback_add_translation_entry(info, phy, vir)) {
+ if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
+ "%d", XenbusStateInitialised)) {
+ pr_err("xen-pvscsi: xenbus_printf error %s\n", state);
+ scsiback_del_translation_entry(info, vir);
+ }
+ } else {
+ xenbus_printf(XBT_NIL, info->dev->nodename, state,
+ "%d", XenbusStateClosed);
+ }
+}
+
+static void scsiback_do_del_lun(struct vscsibk_info *info, const char *state,
+ struct ids_tuple *vir)
+{
+ if (!scsiback_del_translation_entry(info, vir)) {
+ if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
+ "%d", XenbusStateClosed))
+ pr_err("xen-pvscsi: xenbus_printf error %s\n", state);
+ }
+}
+
+#define VSCSIBACK_OP_ADD_OR_DEL_LUN 1
+#define VSCSIBACK_OP_UPDATEDEV_STATE 2
+
+static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
+ char *ent)
+{
+ int err;
+ struct ids_tuple vir;
+ char *val;
+ int device_state;
+ char phy[VSCSI_NAMELEN];
+ char str[64];
+ char state[64];
+ struct xenbus_device *dev = info->dev;
+
+ /* read status */
+ snprintf(state, sizeof(state), "vscsi-devs/%s/state", ent);
+ err = xenbus_scanf(XBT_NIL, dev->nodename, state, "%u", &device_state);
+ if (XENBUS_EXIST_ERR(err))
+ return;
+
+ /* physical SCSI device */
+ snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent);
+ val = xenbus_read(XBT_NIL, dev->nodename, str, NULL);
+ if (IS_ERR(val)) {
+ xenbus_printf(XBT_NIL, dev->nodename, state,
+ "%d", XenbusStateClosed);
+ return;
+ }
+ strlcpy(phy, val, VSCSI_NAMELEN);
+ kfree(val);
+
+ /* virtual SCSI device */
+ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", ent);
+ err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
+ &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
+ if (XENBUS_EXIST_ERR(err)) {
+ xenbus_printf(XBT_NIL, dev->nodename, state,
+ "%d", XenbusStateClosed);
+ return;
+ }
+
+ switch (op) {
+ case VSCSIBACK_OP_ADD_OR_DEL_LUN:
+ if (device_state == XenbusStateInitialising)
+ scsiback_do_add_lun(info, state, phy, &vir);
+ if (device_state == XenbusStateClosing)
+ scsiback_do_del_lun(info, state, &vir);
+ break;
+
+ case VSCSIBACK_OP_UPDATEDEV_STATE:
+ if (device_state == XenbusStateInitialised) {
+ /* modify vscsi-devs/dev-x/state */
+ if (xenbus_printf(XBT_NIL, dev->nodename, state,
+ "%d", XenbusStateConnected)) {
+ pr_err("xen-pvscsi: xenbus_printf error %s\n",
+ str);
+ scsiback_del_translation_entry(info, &vir);
+ xenbus_printf(XBT_NIL, dev->nodename, state,
+ "%d", XenbusStateClosed);
+ }
+ }
+ break;
+ /*When it is necessary, processing is added here.*/
+ default:
+ break;
+ }
+}
+
+static void scsiback_do_lun_hotplug(struct vscsibk_info *info, int op)
+{
+ int i;
+ char **dir;
+ unsigned int ndir = 0;
+
+ dir = xenbus_directory(XBT_NIL, info->dev->nodename, "vscsi-devs",
+ &ndir);
+ if (IS_ERR(dir))
+ return;
+
+ for (i = 0; i < ndir; i++)
+ scsiback_do_1lun_hotplug(info, op, dir[i]);
+
+ kfree(dir);
+}
+
+static void scsiback_frontend_changed(struct xenbus_device *dev,
+ enum xenbus_state frontend_state)
+{
+ struct vscsibk_info *info = dev_get_drvdata(&dev->dev);
+
+ switch (frontend_state) {
+ case XenbusStateInitialising:
+ break;
+
+ case XenbusStateInitialised:
+ if (scsiback_map(info))
+ break;
+
+ scsiback_do_lun_hotplug(info, VSCSIBACK_OP_ADD_OR_DEL_LUN);
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateConnected:
+ scsiback_do_lun_hotplug(info, VSCSIBACK_OP_UPDATEDEV_STATE);
+
+ if (dev->state == XenbusStateConnected)
+ break;
+
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosing:
+ if (info->irq)
+ scsiback_disconnect(info);
+
+ xenbus_switch_state(dev, XenbusStateClosing);
+ break;
+
+ case XenbusStateClosed:
+ xenbus_switch_state(dev, XenbusStateClosed);
+ if (xenbus_dev_is_online(dev))
+ break;
+ /* fall through if not online */
+ case XenbusStateUnknown:
+ device_unregister(&dev->dev);
+ break;
+
+ case XenbusStateReconfiguring:
+ scsiback_do_lun_hotplug(info, VSCSIBACK_OP_ADD_OR_DEL_LUN);
+ xenbus_switch_state(dev, XenbusStateReconfigured);
+
+ break;
+
+ default:
+ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
+ frontend_state);
+ break;
+ }
+}
+
+/*
+ Release the translation entry specfied
+*/
+static void scsiback_release_translation_entry(struct vscsibk_info *info)
+{
+ struct v2p_entry *entry, *tmp;
+ struct list_head *head = &(info->v2p_entry_lists);
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->v2p_lock, flags);
+
+ list_for_each_entry_safe(entry, tmp, head, l)
+ __scsiback_del_translation_entry(entry);
+
+ spin_unlock_irqrestore(&info->v2p_lock, flags);
+}
+
+static int scsiback_remove(struct xenbus_device *dev)
+{
+ struct vscsibk_info *info = dev_get_drvdata(&dev->dev);
+
+ if (info->irq)
+ scsiback_disconnect(info);
+
+ scsiback_release_translation_entry(info);
+
+ dev_set_drvdata(&dev->dev, NULL);
+
+ return 0;
+}
+
+static int scsiback_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ int err;
+
+ struct vscsibk_info *info = kzalloc(sizeof(struct vscsibk_info),
+ GFP_KERNEL);
+
+ DPRINTK("%p %d\n", dev, dev->otherend_id);
+
+ if (!info) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating backend structure");
+ return -ENOMEM;
+ }
+ info->dev = dev;
+ dev_set_drvdata(&dev->dev, info);
+
+ info->domid = dev->otherend_id;
+ spin_lock_init(&info->ring_lock);
+ info->ring_error = 0;
+ atomic_set(&info->nr_unreplied_reqs, 0);
+ init_waitqueue_head(&info->waiting_to_free);
+ info->dev = dev;
+ info->irq = 0;
+ INIT_LIST_HEAD(&info->v2p_entry_lists);
+ spin_lock_init(&info->v2p_lock);
+
+ err = xenbus_printf(XBT_NIL, dev->nodename, "feature-sg-grant", "%u",
+ SG_ALL);
+ if (err)
+ xenbus_dev_error(dev, err, "writing feature-sg-grant");
+
+ err = xenbus_switch_state(dev, XenbusStateInitWait);
+ if (err)
+ goto fail;
+
+ return 0;
+
+fail:
+ pr_warn("xen-pvscsi: %s failed\n", __func__);
+ scsiback_remove(dev);
+
+ return err;
+}
+
+static char *scsiback_dump_proto_id(struct scsiback_tport *tport)
+{
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return "SAS";
+ case SCSI_PROTOCOL_FCP:
+ return "FCP";
+ case SCSI_PROTOCOL_ISCSI:
+ return "iSCSI";
+ default:
+ break;
+ }
+
+ return "Unknown";
+}
+
+static u8 scsiback_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ struct scsiback_tpg *tpg = container_of(se_tpg,
+ struct scsiback_tpg, se_tpg);
+ struct scsiback_tport *tport = tpg->tport;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_get_fabric_proto_ident(se_tpg);
+ case SCSI_PROTOCOL_FCP:
+ return fc_get_fabric_proto_ident(se_tpg);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_fabric_proto_ident(se_tpg);
+ default:
+ pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n",
+ tport->tport_proto_id);
+ break;
+ }
+
+ return sas_get_fabric_proto_ident(se_tpg);
+}
+
+static char *scsiback_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct scsiback_tpg *tpg = container_of(se_tpg,
+ struct scsiback_tpg, se_tpg);
+ struct scsiback_tport *tport = tpg->tport;
+
+ return &tport->tport_name[0];
+}
+
+static u16 scsiback_get_tag(struct se_portal_group *se_tpg)
+{
+ struct scsiback_tpg *tpg = container_of(se_tpg,
+ struct scsiback_tpg, se_tpg);
+ return tpg->tport_tpgt;
+}
+
+static u32 scsiback_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static u32
+scsiback_get_pr_transport_id(struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ struct scsiback_tpg *tpg = container_of(se_tpg,
+ struct scsiback_tpg, se_tpg);
+ struct scsiback_tport *tport = tpg->tport;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ case SCSI_PROTOCOL_FCP:
+ return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ default:
+ pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n",
+ tport->tport_proto_id);
+ break;
+ }
+
+ return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+}
+
+static u32
+scsiback_get_pr_transport_id_len(struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ struct scsiback_tpg *tpg = container_of(se_tpg,
+ struct scsiback_tpg, se_tpg);
+ struct scsiback_tport *tport = tpg->tport;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ case SCSI_PROTOCOL_FCP:
+ return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ default:
+ pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n",
+ tport->tport_proto_id);
+ break;
+ }
+
+ return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+}
+
+static char *
+scsiback_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ struct scsiback_tpg *tpg = container_of(se_tpg,
+ struct scsiback_tpg, se_tpg);
+ struct scsiback_tport *tport = tpg->tport;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ case SCSI_PROTOCOL_FCP:
+ return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ default:
+ pr_err("Unknown tport_proto_id: 0x%02x, using SAS emulation\n",
+ tport->tport_proto_id);
+ break;
+ }
+
+ return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+}
+
+static struct se_wwn *
+scsiback_make_tport(struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct scsiback_tport *tport;
+ char *ptr;
+ u64 wwpn = 0;
+ int off = 0;
+
+ tport = kzalloc(sizeof(struct scsiback_tport), GFP_KERNEL);
+ if (!tport)
+ return ERR_PTR(-ENOMEM);
+
+ tport->tport_wwpn = wwpn;
+ /*
+ * Determine the emulated Protocol Identifier and Target Port Name
+ * based on the incoming configfs directory name.
+ */
+ ptr = strstr(name, "naa.");
+ if (ptr) {
+ tport->tport_proto_id = SCSI_PROTOCOL_SAS;
+ goto check_len;
+ }
+ ptr = strstr(name, "fc.");
+ if (ptr) {
+ tport->tport_proto_id = SCSI_PROTOCOL_FCP;
+ off = 3; /* Skip over "fc." */
+ goto check_len;
+ }
+ ptr = strstr(name, "iqn.");
+ if (ptr) {
+ tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
+ goto check_len;
+ }
+
+ pr_err("Unable to locate prefix for emulated Target Port: %s\n", name);
+ kfree(tport);
+ return ERR_PTR(-EINVAL);
+
+check_len:
+ if (strlen(name) >= VSCSI_NAMELEN) {
+ pr_err("Emulated %s Address: %s, exceeds max: %d\n", name,
+ scsiback_dump_proto_id(tport), VSCSI_NAMELEN);
+ kfree(tport);
+ return ERR_PTR(-EINVAL);
+ }
+ snprintf(&tport->tport_name[0], VSCSI_NAMELEN, "%s", &name[off]);
+
+ pr_debug("xen-pvscsi: Allocated emulated Target %s Address: %s\n",
+ scsiback_dump_proto_id(tport), name);
+
+ return &tport->tport_wwn;
+}
+
+static void scsiback_drop_tport(struct se_wwn *wwn)
+{
+ struct scsiback_tport *tport = container_of(wwn,
+ struct scsiback_tport, tport_wwn);
+
+ pr_debug("xen-pvscsi: Deallocating emulated Target %s Address: %s\n",
+ scsiback_dump_proto_id(tport), tport->tport_name);
+
+ kfree(tport);
+}
+
+static struct se_node_acl *
+scsiback_alloc_fabric_acl(struct se_portal_group *se_tpg)
+{
+ return kzalloc(sizeof(struct se_node_acl), GFP_KERNEL);
+}
+
+static void
+scsiback_release_fabric_acl(struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl)
+{
+ kfree(se_nacl);
+}
+
+static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int scsiback_check_stop_free(struct se_cmd *se_cmd)
+{
+ /*
+ * Do not release struct se_cmd's containing a valid TMR
+ * pointer. These will be released directly in scsiback_device_action()
+ * with transport_generic_free_cmd().
+ */
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ return 0;
+
+ transport_generic_free_cmd(se_cmd, 0);
+ return 1;
+}
+
+static void scsiback_release_cmd(struct se_cmd *se_cmd)
+{
+ struct vscsibk_pend *pending_req = container_of(se_cmd,
+ struct vscsibk_pend, se_cmd);
+
+ kmem_cache_free(scsiback_cachep, pending_req);
+}
+
+static int scsiback_shutdown_session(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static void scsiback_close_session(struct se_session *se_sess)
+{
+}
+
+static u32 scsiback_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static int scsiback_write_pending(struct se_cmd *se_cmd)
+{
+ /* Go ahead and process the write immediately */
+ target_execute_cmd(se_cmd);
+
+ return 0;
+}
+
+static int scsiback_write_pending_status(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static void scsiback_set_default_node_attrs(struct se_node_acl *nacl)
+{
+}
+
+static u32 scsiback_get_task_tag(struct se_cmd *se_cmd)
+{
+ struct vscsibk_pend *pending_req = container_of(se_cmd,
+ struct vscsibk_pend, se_cmd);
+
+ return pending_req->rqid;
+}
+
+static int scsiback_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int scsiback_queue_data_in(struct se_cmd *se_cmd)
+{
+ struct vscsibk_pend *pending_req = container_of(se_cmd,
+ struct vscsibk_pend, se_cmd);
+
+ pending_req->result = SAM_STAT_GOOD;
+ scsiback_cmd_done(pending_req);
+ return 0;
+}
+
+static int scsiback_queue_status(struct se_cmd *se_cmd)
+{
+ struct vscsibk_pend *pending_req = container_of(se_cmd,
+ struct vscsibk_pend, se_cmd);
+
+ if (se_cmd->sense_buffer &&
+ ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
+ (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE)))
+ pending_req->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ else
+ pending_req->result = se_cmd->scsi_status;
+
+ scsiback_cmd_done(pending_req);
+ return 0;
+}
+
+static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+ struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr;
+
+ atomic_set(&tmr->tmr_complete, 1);
+ wake_up(&tmr->tmr_wait);
+}
+
+static void scsiback_aborted_task(struct se_cmd *se_cmd)
+{
+}
+
+static ssize_t scsiback_tpg_param_show_alias(struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct scsiback_tpg *tpg = container_of(se_tpg, struct scsiback_tpg,
+ se_tpg);
+ ssize_t rb;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ rb = snprintf(page, PAGE_SIZE, "%s\n", tpg->param_alias);
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ return rb;
+}
+
+static ssize_t scsiback_tpg_param_store_alias(struct se_portal_group *se_tpg,
+ const char *page, size_t count)
+{
+ struct scsiback_tpg *tpg = container_of(se_tpg, struct scsiback_tpg,
+ se_tpg);
+ int len;
+
+ if (strlen(page) >= VSCSI_NAMELEN) {
+ pr_err("param alias: %s, exceeds max: %d\n", page,
+ VSCSI_NAMELEN);
+ return -EINVAL;
+ }
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ len = snprintf(tpg->param_alias, VSCSI_NAMELEN, "%s", page);
+ if (tpg->param_alias[len - 1] == '\n')
+ tpg->param_alias[len - 1] = '\0';
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ return count;
+}
+
+TF_TPG_PARAM_ATTR(scsiback, alias, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *scsiback_param_attrs[] = {
+ &scsiback_tpg_param_alias.attr,
+ NULL,
+};
+
+static int scsiback_make_nexus(struct scsiback_tpg *tpg,
+ const char *name)
+{
+ struct se_portal_group *se_tpg;
+ struct se_session *se_sess;
+ struct scsiback_nexus *tv_nexus;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ if (tpg->tpg_nexus) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_debug("tpg->tpg_nexus already exists\n");
+ return -EEXIST;
+ }
+ se_tpg = &tpg->se_tpg;
+
+ tv_nexus = kzalloc(sizeof(struct scsiback_nexus), GFP_KERNEL);
+ if (!tv_nexus) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ return -ENOMEM;
+ }
+ /*
+ * Initialize the struct se_session pointer
+ */
+ tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
+ if (IS_ERR(tv_nexus->tvn_se_sess)) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ kfree(tv_nexus);
+ return -ENOMEM;
+ }
+ se_sess = tv_nexus->tvn_se_sess;
+ /*
+ * Since we are running in 'demo mode' this call with generate a
+ * struct se_node_acl for the scsiback struct se_portal_group with
+ * the SCSI Initiator port name of the passed configfs group 'name'.
+ */
+ tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+ se_tpg, (unsigned char *)name);
+ if (!tv_nexus->tvn_se_sess->se_node_acl) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_debug("core_tpg_check_initiator_node_acl() failed for %s\n",
+ name);
+ goto out;
+ }
+ /*
+ * Now register the TCM pvscsi virtual I_T Nexus as active with the
+ * call to __transport_register_session()
+ */
+ __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
+ tv_nexus->tvn_se_sess, tv_nexus);
+ tpg->tpg_nexus = tv_nexus;
+
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ return 0;
+
+out:
+ transport_free_session(se_sess);
+ kfree(tv_nexus);
+ return -ENOMEM;
+}
+
+static int scsiback_drop_nexus(struct scsiback_tpg *tpg)
+{
+ struct se_session *se_sess;
+ struct scsiback_nexus *tv_nexus;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ return -ENODEV;
+ }
+
+ se_sess = tv_nexus->tvn_se_sess;
+ if (!se_sess) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ return -ENODEV;
+ }
+
+ if (tpg->tv_tpg_port_count != 0) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_err("Unable to remove xen-pvscsi I_T Nexus with active TPG port count: %d\n",
+ tpg->tv_tpg_port_count);
+ return -EBUSY;
+ }
+
+ if (tpg->tv_tpg_fe_count != 0) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_err("Unable to remove xen-pvscsi I_T Nexus with active TPG frontend count: %d\n",
+ tpg->tv_tpg_fe_count);
+ return -EBUSY;
+ }
+
+ pr_debug("xen-pvscsi: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
+ scsiback_dump_proto_id(tpg->tport),
+ tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+
+ /*
+ * Release the SCSI I_T Nexus to the emulated xen-pvscsi Target Port
+ */
+ transport_deregister_session(tv_nexus->tvn_se_sess);
+ tpg->tpg_nexus = NULL;
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ kfree(tv_nexus);
+ return 0;
+}
+
+static ssize_t scsiback_tpg_show_nexus(struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct scsiback_tpg *tpg = container_of(se_tpg,
+ struct scsiback_tpg, se_tpg);
+ struct scsiback_nexus *tv_nexus;
+ ssize_t ret;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ return -ENODEV;
+ }
+ ret = snprintf(page, PAGE_SIZE, "%s\n",
+ tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ return ret;
+}
+
+static ssize_t scsiback_tpg_store_nexus(struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct scsiback_tpg *tpg = container_of(se_tpg,
+ struct scsiback_tpg, se_tpg);
+ struct scsiback_tport *tport_wwn = tpg->tport;
+ unsigned char i_port[VSCSI_NAMELEN], *ptr, *port_ptr;
+ int ret;
+ /*
+ * Shutdown the active I_T nexus if 'NULL' is passed..
+ */
+ if (!strncmp(page, "NULL", 4)) {
+ ret = scsiback_drop_nexus(tpg);
+ return (!ret) ? count : ret;
+ }
+ /*
+ * Otherwise make sure the passed virtual Initiator port WWN matches
+ * the fabric protocol_id set in scsiback_make_tport(), and call
+ * scsiback_make_nexus().
+ */
+ if (strlen(page) >= VSCSI_NAMELEN) {
+ pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
+ page, VSCSI_NAMELEN);
+ return -EINVAL;
+ }
+ snprintf(&i_port[0], VSCSI_NAMELEN, "%s", page);
+
+ ptr = strstr(i_port, "naa.");
+ if (ptr) {
+ if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
+ pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
+ i_port, scsiback_dump_proto_id(tport_wwn));
+ return -EINVAL;
+ }
+ port_ptr = &i_port[0];
+ goto check_newline;
+ }
+ ptr = strstr(i_port, "fc.");
+ if (ptr) {
+ if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
+ pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
+ i_port, scsiback_dump_proto_id(tport_wwn));
+ return -EINVAL;
+ }
+ port_ptr = &i_port[3]; /* Skip over "fc." */
+ goto check_newline;
+ }
+ ptr = strstr(i_port, "iqn.");
+ if (ptr) {
+ if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
+ pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
+ i_port, scsiback_dump_proto_id(tport_wwn));
+ return -EINVAL;
+ }
+ port_ptr = &i_port[0];
+ goto check_newline;
+ }
+ pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
+ i_port);
+ return -EINVAL;
+ /*
+ * Clear any trailing newline for the NAA WWN
+ */
+check_newline:
+ if (i_port[strlen(i_port) - 1] == '\n')
+ i_port[strlen(i_port) - 1] = '\0';
+
+ ret = scsiback_make_nexus(tpg, port_ptr);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(scsiback, nexus, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *scsiback_tpg_attrs[] = {
+ &scsiback_tpg_nexus.attr,
+ NULL,
+};
+
+static ssize_t
+scsiback_wwn_show_attr_version(struct target_fabric_configfs *tf,
+ char *page)
+{
+ return sprintf(page, "xen-pvscsi fabric module %s on %s/%s on "
+ UTS_RELEASE"\n",
+ VSCSI_VERSION, utsname()->sysname, utsname()->machine);
+}
+
+TF_WWN_ATTR_RO(scsiback, version);
+
+static struct configfs_attribute *scsiback_wwn_attrs[] = {
+ &scsiback_wwn_version.attr,
+ NULL,
+};
+
+static char *scsiback_get_fabric_name(void)
+{
+ return "xen-pvscsi";
+}
+
+static int scsiback_port_link(struct se_portal_group *se_tpg,
+ struct se_lun *lun)
+{
+ struct scsiback_tpg *tpg = container_of(se_tpg,
+ struct scsiback_tpg, se_tpg);
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tpg->tv_tpg_port_count++;
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ return 0;
+}
+
+static void scsiback_port_unlink(struct se_portal_group *se_tpg,
+ struct se_lun *lun)
+{
+ struct scsiback_tpg *tpg = container_of(se_tpg,
+ struct scsiback_tpg, se_tpg);
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tpg->tv_tpg_port_count--;
+ mutex_unlock(&tpg->tv_tpg_mutex);
+}
+
+static struct se_portal_group *
+scsiback_make_tpg(struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct scsiback_tport *tport = container_of(wwn,
+ struct scsiback_tport, tport_wwn);
+
+ struct scsiback_tpg *tpg;
+ u16 tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ ret = kstrtou16(name + 5, 10, &tpgt);
+ if (ret)
+ return ERR_PTR(ret);
+
+ tpg = kzalloc(sizeof(struct scsiback_tpg), GFP_KERNEL);
+ if (!tpg)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&tpg->tv_tpg_mutex);
+ INIT_LIST_HEAD(&tpg->tv_tpg_list);
+ INIT_LIST_HEAD(&tpg->info_list);
+ tpg->tport = tport;
+ tpg->tport_tpgt = tpgt;
+
+ ret = core_tpg_register(&scsiback_fabric_configfs->tf_ops, wwn,
+ &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ mutex_lock(&scsiback_mutex);
+ list_add_tail(&tpg->tv_tpg_list, &scsiback_list);
+ mutex_unlock(&scsiback_mutex);
+
+ return &tpg->se_tpg;
+}
+
+static void scsiback_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct scsiback_tpg *tpg = container_of(se_tpg,
+ struct scsiback_tpg, se_tpg);
+
+ mutex_lock(&scsiback_mutex);
+ list_del(&tpg->tv_tpg_list);
+ mutex_unlock(&scsiback_mutex);
+ /*
+ * Release the virtual I_T Nexus for this xen-pvscsi TPG
+ */
+ scsiback_drop_nexus(tpg);
+ /*
+ * Deregister the se_tpg from TCM..
+ */
+ core_tpg_deregister(se_tpg);
+ kfree(tpg);
+}
+
+static int scsiback_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int scsiback_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+static struct target_core_fabric_ops scsiback_ops = {
+ .get_fabric_name = scsiback_get_fabric_name,
+ .get_fabric_proto_ident = scsiback_get_fabric_proto_ident,
+ .tpg_get_wwn = scsiback_get_fabric_wwn,
+ .tpg_get_tag = scsiback_get_tag,
+ .tpg_get_default_depth = scsiback_get_default_depth,
+ .tpg_get_pr_transport_id = scsiback_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = scsiback_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = scsiback_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = scsiback_check_true,
+ .tpg_check_demo_mode_cache = scsiback_check_true,
+ .tpg_check_demo_mode_write_protect = scsiback_check_false,
+ .tpg_check_prod_mode_write_protect = scsiback_check_false,
+ .tpg_alloc_fabric_acl = scsiback_alloc_fabric_acl,
+ .tpg_release_fabric_acl = scsiback_release_fabric_acl,
+ .tpg_get_inst_index = scsiback_tpg_get_inst_index,
+ .check_stop_free = scsiback_check_stop_free,
+ .release_cmd = scsiback_release_cmd,
+ .put_session = NULL,
+ .shutdown_session = scsiback_shutdown_session,
+ .close_session = scsiback_close_session,
+ .sess_get_index = scsiback_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = scsiback_write_pending,
+ .write_pending_status = scsiback_write_pending_status,
+ .set_default_node_attributes = scsiback_set_default_node_attrs,
+ .get_task_tag = scsiback_get_task_tag,
+ .get_cmd_state = scsiback_get_cmd_state,
+ .queue_data_in = scsiback_queue_data_in,
+ .queue_status = scsiback_queue_status,
+ .queue_tm_rsp = scsiback_queue_tm_rsp,
+ .aborted_task = scsiback_aborted_task,
+ /*
+ * Setup callers for generic logic in target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = scsiback_make_tport,
+ .fabric_drop_wwn = scsiback_drop_tport,
+ .fabric_make_tpg = scsiback_make_tpg,
+ .fabric_drop_tpg = scsiback_drop_tpg,
+ .fabric_post_link = scsiback_port_link,
+ .fabric_pre_unlink = scsiback_port_unlink,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+#if 0
+ .fabric_make_nodeacl = scsiback_make_nodeacl,
+ .fabric_drop_nodeacl = scsiback_drop_nodeacl,
+#endif
+};
+
+static int scsiback_register_configfs(void)
+{
+ struct target_fabric_configfs *fabric;
+ int ret;
+
+ pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n",
+ VSCSI_VERSION, utsname()->sysname, utsname()->machine);
+ /*
+ * Register the top level struct config_item_type with TCM core
+ */
+ fabric = target_fabric_configfs_init(THIS_MODULE, "xen-pvscsi");
+ if (IS_ERR(fabric))
+ return PTR_ERR(fabric);
+
+ /*
+ * Setup fabric->tf_ops from our local scsiback_ops
+ */
+ fabric->tf_ops = scsiback_ops;
+ /*
+ * Setup default attribute lists for various fabric->tf_cit_tmpl
+ */
+ fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = scsiback_wwn_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = scsiback_tpg_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = scsiback_param_attrs;
+ fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ /*
+ * Register the fabric for use within TCM
+ */
+ ret = target_fabric_configfs_register(fabric);
+ if (ret < 0) {
+ target_fabric_configfs_free(fabric);
+ return ret;
+ }
+ /*
+ * Setup our local pointer to *fabric
+ */
+ scsiback_fabric_configfs = fabric;
+ pr_debug("xen-pvscsi: Set fabric -> scsiback_fabric_configfs\n");
+ return 0;
+};
+
+static void scsiback_deregister_configfs(void)
+{
+ if (!scsiback_fabric_configfs)
+ return;
+
+ target_fabric_configfs_deregister(scsiback_fabric_configfs);
+ scsiback_fabric_configfs = NULL;
+ pr_debug("xen-pvscsi: Cleared scsiback_fabric_configfs\n");
+};
+
+static const struct xenbus_device_id scsiback_ids[] = {
+ { "vscsi" },
+ { "" }
+};
+
+static struct xenbus_driver scsiback_driver = {
+ .ids = scsiback_ids,
+ .probe = scsiback_probe,
+ .remove = scsiback_remove,
+ .otherend_changed = scsiback_frontend_changed
+};
+
+static void scsiback_init_pend(void *p)
+{
+ struct vscsibk_pend *pend = p;
+ int i;
+
+ memset(pend, 0, sizeof(*pend));
+ for (i = 0; i < VSCSI_MAX_GRANTS; i++)
+ pend->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
+}
+
+static int __init scsiback_init(void)
+{
+ int ret;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ scsiback_cachep = kmem_cache_create("vscsiif_cache",
+ sizeof(struct vscsibk_pend), 0, 0, scsiback_init_pend);
+ if (!scsiback_cachep)
+ return -ENOMEM;
+
+ ret = xenbus_register_backend(&scsiback_driver);
+ if (ret)
+ goto out_cache_destroy;
+
+ ret = scsiback_register_configfs();
+ if (ret)
+ goto out_unregister_xenbus;
+
+ return 0;
+
+out_unregister_xenbus:
+ xenbus_unregister_driver(&scsiback_driver);
+out_cache_destroy:
+ kmem_cache_destroy(scsiback_cachep);
+ pr_err("xen-pvscsi: %s: error %d\n", __func__, ret);
+ return ret;
+}
+
+static void __exit scsiback_exit(void)
+{
+ struct page *page;
+
+ while (free_pages_num) {
+ if (get_free_page(&page))
+ BUG();
+ free_xenballooned_pages(1, &page);
+ }
+ scsiback_deregister_configfs();
+ xenbus_unregister_driver(&scsiback_driver);
+ kmem_cache_destroy(scsiback_cachep);
+}
+
+module_init(scsiback_init);
+module_exit(scsiback_exit);
+
+MODULE_DESCRIPTION("Xen SCSI backend driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("xen-backend:vscsi");
+MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 439c9dca9eee..ca744102b666 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -259,7 +259,6 @@ static char *error_path(struct xenbus_device *dev)
static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
const char *fmt, va_list ap)
{
- int ret;
unsigned int len;
char *printf_buffer = NULL;
char *path_buffer = NULL;
@@ -270,9 +269,7 @@ static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
goto fail;
len = sprintf(printf_buffer, "%i ", -err);
- ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
-
- BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
+ vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
dev_err(&dev->dev, "%s\n", printf_buffer);
@@ -361,8 +358,8 @@ static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
* @ring_mfn: mfn of ring to grant
* Grant access to the given @ring_mfn to the peer of the given device. Return
- * 0 on success, or -errno on error. On error, the device will switch to
- * XenbusStateClosing, and the error will be saved in the store.
+ * a grant reference on success, or -errno on error. On error, the device will
+ * switch to XenbusStateClosing, and the error will be saved in the store.
*/
int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
{
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 3c0a74b3e9b1..564b31584860 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -297,9 +297,13 @@ void xenbus_dev_shutdown(struct device *_dev)
EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
int xenbus_register_driver_common(struct xenbus_driver *drv,
- struct xen_bus_type *bus)
+ struct xen_bus_type *bus,
+ struct module *owner, const char *mod_name)
{
+ drv->driver.name = drv->name ? drv->name : drv->ids[0].devicetype;
drv->driver.bus = &bus->bus;
+ drv->driver.owner = owner;
+ drv->driver.mod_name = mod_name;
return driver_register(&drv->driver);
}
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 1085ec294a19..c9ec7ca1f7ab 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -60,7 +60,9 @@ extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
extern int xenbus_dev_probe(struct device *_dev);
extern int xenbus_dev_remove(struct device *_dev);
extern int xenbus_register_driver_common(struct xenbus_driver *drv,
- struct xen_bus_type *bus);
+ struct xen_bus_type *bus,
+ struct module *owner,
+ const char *mod_name);
extern int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
const char *nodename);
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 5125dce11a60..04f7f85a5edf 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -234,13 +234,15 @@ int xenbus_dev_is_online(struct xenbus_device *dev)
}
EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
-int xenbus_register_backend(struct xenbus_driver *drv)
+int __xenbus_register_backend(struct xenbus_driver *drv, struct module *owner,
+ const char *mod_name)
{
drv->read_otherend_details = read_frontend_details;
- return xenbus_register_driver_common(drv, &xenbus_backend);
+ return xenbus_register_driver_common(drv, &xenbus_backend,
+ owner, mod_name);
}
-EXPORT_SYMBOL_GPL(xenbus_register_backend);
+EXPORT_SYMBOL_GPL(__xenbus_register_backend);
static int backend_probe_and_watch(struct notifier_block *notifier,
unsigned long event,
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index cb385c10d2b1..bcb53bdc469c 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -317,13 +317,15 @@ static void wait_for_devices(struct xenbus_driver *xendrv)
print_device_status);
}
-int xenbus_register_frontend(struct xenbus_driver *drv)
+int __xenbus_register_frontend(struct xenbus_driver *drv, struct module *owner,
+ const char *mod_name)
{
int ret;
drv->read_otherend_details = read_backend_details;
- ret = xenbus_register_driver_common(drv, &xenbus_frontend);
+ ret = xenbus_register_driver_common(drv, &xenbus_frontend,
+ owner, mod_name);
if (ret)
return ret;
@@ -332,7 +334,7 @@ int xenbus_register_frontend(struct xenbus_driver *drv)
return 0;
}
-EXPORT_SYMBOL_GPL(xenbus_register_frontend);
+EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq);
static int backend_state;
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index d51ec9fafcc8..47db55aee7f2 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -65,8 +65,8 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any)
{
struct p9_fid *fid, *ret;
- p9_debug(P9_DEBUG_VFS, " dentry: %s (%p) uid %d any %d\n",
- dentry->d_name.name, dentry, from_kuid(&init_user_ns, uid),
+ p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p) uid %d any %d\n",
+ dentry, dentry, from_kuid(&init_user_ns, uid),
any);
ret = NULL;
/* we'll recheck under lock if there's anything to look in */
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index cc1cfae726b3..eb14e055ea83 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -266,8 +266,8 @@ v9fs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
* Now that we do caching with cache mode enabled, We need
* to support direct IO
*/
- p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) off/no(%lld/%lu) EINVAL\n",
- iocb->ki_filp->f_path.dentry->d_name.name,
+ p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%pD) off/no(%lld/%lu) EINVAL\n",
+ iocb->ki_filp,
(long long)pos, iter->nr_segs);
return -EINVAL;
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index b03dd23feda8..a345b2d659cc 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -49,8 +49,8 @@
*/
static int v9fs_cached_dentry_delete(const struct dentry *dentry)
{
- p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n",
- dentry->d_name.name, dentry);
+ p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n",
+ dentry, dentry);
/* Don't cache negative dentries */
if (!dentry->d_inode)
@@ -67,8 +67,8 @@ static int v9fs_cached_dentry_delete(const struct dentry *dentry)
static void v9fs_dentry_release(struct dentry *dentry)
{
struct hlist_node *p, *n;
- p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n",
- dentry->d_name.name, dentry);
+ p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n",
+ dentry, dentry);
hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata)
p9_client_clunk(hlist_entry(p, struct p9_fid, dlist));
dentry->d_fsdata = NULL;
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 0b3bfa303dda..4f1151088ebe 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -116,7 +116,7 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
int reclen = 0;
struct p9_rdir *rdir;
- p9_debug(P9_DEBUG_VFS, "name %s\n", file->f_path.dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "name %pD\n", file);
fid = file->private_data;
buflen = fid->clnt->msize - P9_IOHDRSZ;
@@ -172,7 +172,7 @@ static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx)
struct p9_rdir *rdir;
struct p9_dirent curdirent;
- p9_debug(P9_DEBUG_VFS, "name %s\n", file->f_path.dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "name %pD\n", file);
fid = file->private_data;
buflen = fid->clnt->msize - P9_READDIRHDRSZ;
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 520c11c2dcca..5594505e6e73 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -301,8 +301,8 @@ static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
struct inode *inode = file_inode(filp);
int ret = -ENOLCK;
- p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
- filp, cmd, fl, filp->f_path.dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
+ filp, cmd, fl, filp);
/* No mandatory locks */
if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
@@ -337,8 +337,8 @@ static int v9fs_file_flock_dotl(struct file *filp, int cmd,
struct inode *inode = file_inode(filp);
int ret = -ENOLCK;
- p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n",
- filp, cmd, fl, filp->f_path.dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
+ filp, cmd, fl, filp);
/* No mandatory locks */
if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 7fa4f7a7653d..296482fc77a9 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -648,7 +648,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
struct p9_fid *dfid, *ofid, *fid;
struct inode *inode;
- p9_debug(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry);
err = 0;
ofid = NULL;
@@ -755,7 +755,7 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
struct p9_fid *fid;
struct v9fs_session_info *v9ses;
- p9_debug(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry);
err = 0;
v9ses = v9fs_inode2v9ses(dir);
perm = unixmode2p9mode(v9ses, mode | S_IFDIR);
@@ -791,8 +791,8 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
struct inode *inode;
char *name;
- p9_debug(P9_DEBUG_VFS, "dir: %p dentry: (%s) %p flags: %x\n",
- dir, dentry->d_name.name, dentry, flags);
+ p9_debug(P9_DEBUG_VFS, "dir: %p dentry: (%pd) %p flags: %x\n",
+ dir, dentry, dentry, flags);
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
@@ -1239,7 +1239,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
struct p9_fid *fid;
struct p9_wstat *st;
- p9_debug(P9_DEBUG_VFS, " %s\n", dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, " %pd\n", dentry);
retval = -EPERM;
v9ses = v9fs_dentry2v9ses(dentry);
fid = v9fs_fid_lookup(dentry);
@@ -1262,8 +1262,8 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
retval = min(strlen(st->extension)+1, (size_t)buflen);
memcpy(buffer, st->extension, retval);
- p9_debug(P9_DEBUG_VFS, "%s -> %s (%.*s)\n",
- dentry->d_name.name, st->extension, buflen, buffer);
+ p9_debug(P9_DEBUG_VFS, "%pd -> %s (%.*s)\n",
+ dentry, st->extension, buflen, buffer);
done:
p9stat_free(st);
@@ -1283,7 +1283,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
int len = 0;
char *link = __getname();
- p9_debug(P9_DEBUG_VFS, "%s\n", dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "%pd\n", dentry);
if (!link)
link = ERR_PTR(-ENOMEM);
@@ -1314,8 +1314,8 @@ v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
{
char *s = nd_get_link(nd);
- p9_debug(P9_DEBUG_VFS, " %s %s\n",
- dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
+ p9_debug(P9_DEBUG_VFS, " %pd %s\n",
+ dentry, IS_ERR(s) ? "<error>" : s);
if (!IS_ERR(s))
__putname(s);
}
@@ -1364,8 +1364,8 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
static int
v9fs_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
{
- p9_debug(P9_DEBUG_VFS, " %lu,%s,%s\n",
- dir->i_ino, dentry->d_name.name, symname);
+ p9_debug(P9_DEBUG_VFS, " %lu,%pd,%s\n",
+ dir->i_ino, dentry, symname);
return v9fs_vfs_mkspecial(dir, dentry, P9_DMSYMLINK, symname);
}
@@ -1386,8 +1386,8 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
char *name;
struct p9_fid *oldfid;
- p9_debug(P9_DEBUG_VFS, " %lu,%s,%s\n",
- dir->i_ino, dentry->d_name.name, old_dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, " %lu,%pd,%pd\n",
+ dir->i_ino, dentry, old_dentry);
oldfid = v9fs_fid_clone(old_dentry);
if (IS_ERR(oldfid))
@@ -1428,8 +1428,8 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rde
char *name;
u32 perm;
- p9_debug(P9_DEBUG_VFS, " %lu,%s mode: %hx MAJOR: %u MINOR: %u\n",
- dir->i_ino, dentry->d_name.name, mode,
+ p9_debug(P9_DEBUG_VFS, " %lu,%pd mode: %hx MAJOR: %u MINOR: %u\n",
+ dir->i_ino, dentry, mode,
MAJOR(rdev), MINOR(rdev));
if (!new_valid_dev(rdev))
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 1fa85aae24df..02b64f4e576a 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -393,7 +393,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
struct dentry *dir_dentry;
struct posix_acl *dacl = NULL, *pacl = NULL;
- p9_debug(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry);
err = 0;
v9ses = v9fs_inode2v9ses(dir);
@@ -767,8 +767,8 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
struct p9_fid *dfid, *oldfid;
struct v9fs_session_info *v9ses;
- p9_debug(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
- dir->i_ino, old_dentry->d_name.name, dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "dir ino: %lu, old_name: %pd, new_name: %pd\n",
+ dir->i_ino, old_dentry, dentry);
v9ses = v9fs_inode2v9ses(dir);
dir_dentry = dentry->d_parent;
@@ -917,7 +917,7 @@ v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
char *link = __getname();
char *target;
- p9_debug(P9_DEBUG_VFS, "%s\n", dentry->d_name.name);
+ p9_debug(P9_DEBUG_VFS, "%pd\n", dentry);
if (!link) {
link = ERR_PTR(-ENOMEM);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 529300327f45..a1645b88fe8a 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -669,7 +669,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
out_valid:
dentry->d_fsdata = dir_version;
-out_skip:
dput(parent);
key_put(key);
_leave(" = 1 [valid]");
@@ -682,10 +681,6 @@ not_found:
spin_unlock(&dentry->d_lock);
out_bad:
- /* don't unhash if we have submounts */
- if (check_submounts_and_drop(dentry) != 0)
- goto out_skip;
-
_debug("dropping dentry %s/%s",
parent->d_name.name, dentry->d_name.name);
dput(parent);
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index b6df2e83809f..52976785a32c 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -130,7 +130,6 @@ static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl,
/* second+ BUSY - sleep a little bit */
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
- __set_current_state(TASK_RUNNING);
}
continue;
}
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index a7be57e39be7..8fa3895cda02 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -255,12 +255,6 @@ static int autofs4_tree_busy(struct vfsmount *mnt,
struct autofs_info *ino = autofs4_dentry_ino(p);
unsigned int ino_count = atomic_read(&ino->count);
- /*
- * Clean stale dentries below that have not been
- * invalidated after a mount fail during lookup
- */
- d_invalidate(p);
-
/* allow for dget above and top is already dgot */
if (p == top)
ino_count += 2;
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index ca0ba15a7306..929dec08c348 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -256,11 +256,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
(current->mm->start_brk = N_BSSADDR(ex));
retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
- if (retval < 0) {
- /* Someone check-me: is this error path enough? */
- send_sig(SIGKILL, current, 0);
+ if (retval < 0)
return retval;
- }
install_exec_creds(bprm);
@@ -278,17 +275,13 @@ static int load_aout_binary(struct linux_binprm * bprm)
map_size = ex.a_text+ex.a_data;
#endif
error = vm_brk(text_addr & PAGE_MASK, map_size);
- if (error != (text_addr & PAGE_MASK)) {
- send_sig(SIGKILL, current, 0);
+ if (error != (text_addr & PAGE_MASK))
return error;
- }
error = read_code(bprm->file, text_addr, pos,
ex.a_text+ex.a_data);
- if ((signed long)error < 0) {
- send_sig(SIGKILL, current, 0);
+ if ((signed long)error < 0)
return error;
- }
} else {
if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
(N_MAGIC(ex) != NMAGIC) && printk_ratelimit())
@@ -315,28 +308,22 @@ static int load_aout_binary(struct linux_binprm * bprm)
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
fd_offset);
- if (error != N_TXTADDR(ex)) {
- send_sig(SIGKILL, current, 0);
+ if (error != N_TXTADDR(ex))
return error;
- }
error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
fd_offset + ex.a_text);
- if (error != N_DATADDR(ex)) {
- send_sig(SIGKILL, current, 0);
+ if (error != N_DATADDR(ex))
return error;
- }
}
beyond_if:
set_binfmt(&aout_format);
retval = set_brk(current->mm->start_brk, current->mm->brk);
- if (retval < 0) {
- send_sig(SIGKILL, current, 0);
+ if (retval < 0)
return retval;
- }
current->mm->start_stack =
(unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 3892c1a23241..d8fc0605b9d2 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -738,10 +738,8 @@ static int load_elf_binary(struct linux_binprm *bprm)
change some of these later */
retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
executable_stack);
- if (retval < 0) {
- send_sig(SIGKILL, current, 0);
+ if (retval < 0)
goto out_free_dentry;
- }
current->mm->start_stack = bprm->p;
@@ -763,10 +761,8 @@ static int load_elf_binary(struct linux_binprm *bprm)
and clear the area. */
retval = set_brk(elf_bss + load_bias,
elf_brk + load_bias);
- if (retval) {
- send_sig(SIGKILL, current, 0);
+ if (retval)
goto out_free_dentry;
- }
nbyte = ELF_PAGEOFFSET(elf_bss);
if (nbyte) {
nbyte = ELF_MIN_ALIGN - nbyte;
@@ -820,7 +816,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
elf_prot, elf_flags, 0);
if (BAD_ADDR(error)) {
- send_sig(SIGKILL, current, 0);
retval = IS_ERR((void *)error) ?
PTR_ERR((void*)error) : -EINVAL;
goto out_free_dentry;
@@ -851,7 +846,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
elf_ppnt->p_memsz > TASK_SIZE ||
TASK_SIZE - elf_ppnt->p_memsz < k) {
/* set_brk can never work. Avoid overflows. */
- send_sig(SIGKILL, current, 0);
retval = -EINVAL;
goto out_free_dentry;
}
@@ -883,12 +877,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
* up getting placed where the bss needs to go.
*/
retval = set_brk(elf_bss, elf_brk);
- if (retval) {
- send_sig(SIGKILL, current, 0);
+ if (retval)
goto out_free_dentry;
- }
if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
- send_sig(SIGSEGV, current, 0);
retval = -EFAULT; /* Nobody gets to see this, but.. */
goto out_free_dentry;
}
@@ -909,7 +900,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
elf_entry += loc->interp_elf_ex.e_entry;
}
if (BAD_ADDR(elf_entry)) {
- force_sig(SIGSEGV, current);
retval = IS_ERR((void *)elf_entry) ?
(int)elf_entry : -EINVAL;
goto out_free_dentry;
@@ -922,7 +912,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
} else {
elf_entry = loc->elf_ex.e_entry;
if (BAD_ADDR(elf_entry)) {
- force_sig(SIGSEGV, current);
retval = -EINVAL;
goto out_free_dentry;
}
@@ -934,19 +923,15 @@ static int load_elf_binary(struct linux_binprm *bprm)
#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
- if (retval < 0) {
- send_sig(SIGKILL, current, 0);
+ if (retval < 0)
goto out;
- }
#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
install_exec_creds(bprm);
retval = create_elf_tables(bprm, &loc->elf_ex,
load_addr, interp_load_addr);
- if (retval < 0) {
- send_sig(SIGKILL, current, 0);
+ if (retval < 0)
goto out;
- }
/* N.B. passed_fileno might not be initialized? */
current->mm->end_code = end_code;
current->mm->start_code = start_code;
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index fe2a643ee005..d3634bfb7fe1 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -317,8 +317,8 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
goto error;
/* there's now no turning back... the old userspace image is dead,
- * defunct, deceased, etc. after this point we have to exit via
- * error_kill */
+ * defunct, deceased, etc.
+ */
set_personality(PER_LINUX_FDPIC);
if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
@@ -343,24 +343,22 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
retval = setup_arg_pages(bprm, current->mm->start_stack,
executable_stack);
- if (retval < 0) {
- send_sig(SIGKILL, current, 0);
- goto error_kill;
- }
+ if (retval < 0)
+ goto error;
#endif
/* load the executable and interpreter into memory */
retval = elf_fdpic_map_file(&exec_params, bprm->file, current->mm,
"executable");
if (retval < 0)
- goto error_kill;
+ goto error;
if (interpreter_name) {
retval = elf_fdpic_map_file(&interp_params, interpreter,
current->mm, "interpreter");
if (retval < 0) {
printk(KERN_ERR "Unable to load interpreter\n");
- goto error_kill;
+ goto error;
}
allow_write_access(interpreter);
@@ -397,7 +395,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
if (IS_ERR_VALUE(current->mm->start_brk)) {
retval = current->mm->start_brk;
current->mm->start_brk = 0;
- goto error_kill;
+ goto error;
}
current->mm->brk = current->mm->start_brk;
@@ -410,7 +408,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
install_exec_creds(bprm);
if (create_elf_fdpic_tables(bprm, current->mm,
&exec_params, &interp_params) < 0)
- goto error_kill;
+ goto error;
kdebug("- start_code %lx", current->mm->start_code);
kdebug("- end_code %lx", current->mm->end_code);
@@ -449,12 +447,6 @@ error:
kfree(interp_params.phdrs);
kfree(interp_params.loadmap);
return retval;
-
- /* unrecoverable error - kill the process */
-error_kill:
- send_sig(SIGSEGV, current, 0);
- goto error;
-
}
/*****************************************************************************/
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index fbd76ded9a34..4dabeb893b7c 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -74,6 +74,7 @@ BTRFS_WORK_HELPER(endio_helper);
BTRFS_WORK_HELPER(endio_meta_helper);
BTRFS_WORK_HELPER(endio_meta_write_helper);
BTRFS_WORK_HELPER(endio_raid56_helper);
+BTRFS_WORK_HELPER(endio_repair_helper);
BTRFS_WORK_HELPER(rmw_helper);
BTRFS_WORK_HELPER(endio_write_helper);
BTRFS_WORK_HELPER(freespace_write_helper);
@@ -91,7 +92,7 @@ __btrfs_alloc_workqueue(const char *name, int flags, int max_active,
{
struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
- if (unlikely(!ret))
+ if (!ret)
return NULL;
ret->max_active = max_active;
@@ -115,7 +116,7 @@ __btrfs_alloc_workqueue(const char *name, int flags, int max_active,
ret->normal_wq = alloc_workqueue("%s-%s", flags,
ret->max_active, "btrfs",
name);
- if (unlikely(!ret->normal_wq)) {
+ if (!ret->normal_wq) {
kfree(ret);
return NULL;
}
@@ -137,12 +138,12 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
{
struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
- if (unlikely(!ret))
+ if (!ret)
return NULL;
ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
max_active, thresh);
- if (unlikely(!ret->normal)) {
+ if (!ret->normal) {
kfree(ret);
return NULL;
}
@@ -150,7 +151,7 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
if (flags & WQ_HIGHPRI) {
ret->high = __btrfs_alloc_workqueue(name, flags, max_active,
thresh);
- if (unlikely(!ret->high)) {
+ if (!ret->high) {
__btrfs_destroy_workqueue(ret->normal);
kfree(ret);
return NULL;
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index e9e31c94758f..e386c29ef1f6 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -53,6 +53,7 @@ BTRFS_WORK_HELPER_PROTO(endio_helper);
BTRFS_WORK_HELPER_PROTO(endio_meta_helper);
BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper);
BTRFS_WORK_HELPER_PROTO(endio_raid56_helper);
+BTRFS_WORK_HELPER_PROTO(endio_repair_helper);
BTRFS_WORK_HELPER_PROTO(rmw_helper);
BTRFS_WORK_HELPER_PROTO(endio_write_helper);
BTRFS_WORK_HELPER_PROTO(freespace_write_helper);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 54a201dac7f9..2d3e32ebfd15 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -25,6 +25,9 @@
#include "delayed-ref.h"
#include "locking.h"
+/* Just an arbitrary number so we can be sure this happened */
+#define BACKREF_FOUND_SHARED 6
+
struct extent_inode_elem {
u64 inum;
u64 offset;
@@ -377,7 +380,8 @@ out:
static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u64 time_seq,
struct list_head *head,
- const u64 *extent_item_pos, u64 total_refs)
+ const u64 *extent_item_pos, u64 total_refs,
+ u64 root_objectid)
{
int err;
int ret = 0;
@@ -402,6 +406,10 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
continue;
if (ref->count == 0)
continue;
+ if (root_objectid && ref->root_id != root_objectid) {
+ ret = BACKREF_FOUND_SHARED;
+ goto out;
+ }
err = __resolve_indirect_ref(fs_info, path, time_seq, ref,
parents, extent_item_pos,
total_refs);
@@ -482,7 +490,7 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info,
continue;
BUG_ON(!ref->wanted_disk_byte);
eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
- fs_info->tree_root->leafsize, 0);
+ 0);
if (!eb || !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
return -EIO;
@@ -561,7 +569,8 @@ static void __merge_refs(struct list_head *head, int mode)
* smaller or equal that seq to the list
*/
static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
- struct list_head *prefs, u64 *total_refs)
+ struct list_head *prefs, u64 *total_refs,
+ u64 inum)
{
struct btrfs_delayed_extent_op *extent_op = head->extent_op;
struct rb_node *n = &head->node.rb_node;
@@ -625,6 +634,16 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
key.objectid = ref->objectid;
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = ref->offset;
+
+ /*
+ * Found a inum that doesn't match our known inum, we
+ * know it's shared.
+ */
+ if (inum && ref->objectid != inum) {
+ ret = BACKREF_FOUND_SHARED;
+ break;
+ }
+
ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
node->bytenr,
node->ref_mod * sgn, GFP_ATOMIC);
@@ -659,7 +678,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
static int __add_inline_refs(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u64 bytenr,
int *info_level, struct list_head *prefs,
- u64 *total_refs)
+ u64 *total_refs, u64 inum)
{
int ret = 0;
int slot;
@@ -744,6 +763,12 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
dref);
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
+
+ if (inum && key.objectid != inum) {
+ ret = BACKREF_FOUND_SHARED;
+ break;
+ }
+
root = btrfs_extent_data_ref_root(leaf, dref);
ret = __add_prelim_ref(prefs, root, &key, 0, 0,
bytenr, count, GFP_NOFS);
@@ -765,7 +790,7 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
*/
static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u64 bytenr,
- int info_level, struct list_head *prefs)
+ int info_level, struct list_head *prefs, u64 inum)
{
struct btrfs_root *extent_root = fs_info->extent_root;
int ret;
@@ -827,6 +852,12 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
dref);
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
+
+ if (inum && key.objectid != inum) {
+ ret = BACKREF_FOUND_SHARED;
+ break;
+ }
+
root = btrfs_extent_data_ref_root(leaf, dref);
ret = __add_prelim_ref(prefs, root, &key, 0, 0,
bytenr, count, GFP_NOFS);
@@ -854,7 +885,8 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
static int find_parent_nodes(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 time_seq, struct ulist *refs,
- struct ulist *roots, const u64 *extent_item_pos)
+ struct ulist *roots, const u64 *extent_item_pos,
+ u64 root_objectid, u64 inum)
{
struct btrfs_key key;
struct btrfs_path *path;
@@ -929,7 +961,8 @@ again:
}
spin_unlock(&delayed_refs->lock);
ret = __add_delayed_refs(head, time_seq,
- &prefs_delayed, &total_refs);
+ &prefs_delayed, &total_refs,
+ inum);
mutex_unlock(&head->mutex);
if (ret)
goto out;
@@ -951,11 +984,11 @@ again:
key.type == BTRFS_METADATA_ITEM_KEY)) {
ret = __add_inline_refs(fs_info, path, bytenr,
&info_level, &prefs,
- &total_refs);
+ &total_refs, inum);
if (ret)
goto out;
ret = __add_keyed_refs(fs_info, path, bytenr,
- info_level, &prefs);
+ info_level, &prefs, inum);
if (ret)
goto out;
}
@@ -971,7 +1004,8 @@ again:
__merge_refs(&prefs, 1);
ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
- extent_item_pos, total_refs);
+ extent_item_pos, total_refs,
+ root_objectid);
if (ret)
goto out;
@@ -981,6 +1015,11 @@ again:
ref = list_first_entry(&prefs, struct __prelim_ref, list);
WARN_ON(ref->count < 0);
if (roots && ref->count && ref->root_id && ref->parent == 0) {
+ if (root_objectid && ref->root_id != root_objectid) {
+ ret = BACKREF_FOUND_SHARED;
+ goto out;
+ }
+
/* no parent == root of tree */
ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
if (ret < 0)
@@ -989,12 +1028,10 @@ again:
if (ref->count && ref->parent) {
if (extent_item_pos && !ref->inode_list &&
ref->level == 0) {
- u32 bsz;
struct extent_buffer *eb;
- bsz = btrfs_level_size(fs_info->extent_root,
- ref->level);
+
eb = read_tree_block(fs_info->extent_root,
- ref->parent, bsz, 0);
+ ref->parent, 0);
if (!eb || !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
ret = -EIO;
@@ -1087,7 +1124,7 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
return -ENOMEM;
ret = find_parent_nodes(trans, fs_info, bytenr,
- time_seq, *leafs, NULL, extent_item_pos);
+ time_seq, *leafs, NULL, extent_item_pos, 0, 0);
if (ret < 0 && ret != -ENOENT) {
free_leaf_list(*leafs);
return ret;
@@ -1130,7 +1167,7 @@ static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans,
ULIST_ITER_INIT(&uiter);
while (1) {
ret = find_parent_nodes(trans, fs_info, bytenr,
- time_seq, tmp, *roots, NULL);
+ time_seq, tmp, *roots, NULL, 0, 0);
if (ret < 0 && ret != -ENOENT) {
ulist_free(tmp);
ulist_free(*roots);
@@ -1161,6 +1198,54 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
return ret;
}
+int btrfs_check_shared(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 root_objectid,
+ u64 inum, u64 bytenr)
+{
+ struct ulist *tmp = NULL;
+ struct ulist *roots = NULL;
+ struct ulist_iterator uiter;
+ struct ulist_node *node;
+ struct seq_list elem = {};
+ int ret = 0;
+
+ tmp = ulist_alloc(GFP_NOFS);
+ roots = ulist_alloc(GFP_NOFS);
+ if (!tmp || !roots) {
+ ulist_free(tmp);
+ ulist_free(roots);
+ return -ENOMEM;
+ }
+
+ if (trans)
+ btrfs_get_tree_mod_seq(fs_info, &elem);
+ else
+ down_read(&fs_info->commit_root_sem);
+ ULIST_ITER_INIT(&uiter);
+ while (1) {
+ ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
+ roots, NULL, root_objectid, inum);
+ if (ret == BACKREF_FOUND_SHARED) {
+ ret = 1;
+ break;
+ }
+ if (ret < 0 && ret != -ENOENT)
+ break;
+ node = ulist_next(tmp, &uiter);
+ if (!node)
+ break;
+ bytenr = node->val;
+ cond_resched();
+ }
+ if (trans)
+ btrfs_put_tree_mod_seq(fs_info, &elem);
+ else
+ up_read(&fs_info->commit_root_sem);
+ ulist_free(tmp);
+ ulist_free(roots);
+ return ret;
+}
+
/*
* this makes the path point to (inum INODE_ITEM ioff)
*/
@@ -1193,7 +1278,7 @@ int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
unsigned long ptr;
key.objectid = inode_objectid;
- btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
+ key.type = BTRFS_INODE_EXTREF_KEY;
key.offset = start_off;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -1233,7 +1318,7 @@ int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
ret = -ENOENT;
if (found_key.objectid != inode_objectid)
break;
- if (btrfs_key_type(&found_key) != BTRFS_INODE_EXTREF_KEY)
+ if (found_key.type != BTRFS_INODE_EXTREF_KEY)
break;
ret = 0;
@@ -1366,7 +1451,7 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
}
btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
if (found_key->type == BTRFS_METADATA_ITEM_KEY)
- size = fs_info->extent_root->leafsize;
+ size = fs_info->extent_root->nodesize;
else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
size = found_key->offset;
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 86fc20fec282..2a1ac6bfc724 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -71,6 +71,9 @@ int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
u64 start_off, struct btrfs_path *path,
struct btrfs_inode_extref **ret_extref,
u64 *found_off);
+int btrfs_check_shared(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 root_objectid,
+ u64 inum, u64 bytenr);
int __init btrfs_prelim_ref_init(void);
void btrfs_prelim_ref_exit(void);
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 56b8522d5767..4aadadcfab20 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -44,6 +44,17 @@
#define BTRFS_INODE_IN_DELALLOC_LIST 9
#define BTRFS_INODE_READDIO_NEED_LOCK 10
#define BTRFS_INODE_HAS_PROPS 11
+/*
+ * The following 3 bits are meant only for the btree inode.
+ * When any of them is set, it means an error happened while writing an
+ * extent buffer belonging to:
+ * 1) a non-log btree
+ * 2) a log btree and first log sub-transaction
+ * 3) a log btree and second log sub-transaction
+ */
+#define BTRFS_INODE_BTREE_ERR 12
+#define BTRFS_INODE_BTREE_LOG1_ERR 13
+#define BTRFS_INODE_BTREE_LOG2_ERR 14
/* in memory btrfs inode */
struct btrfs_inode {
@@ -121,6 +132,12 @@ struct btrfs_inode {
u64 delalloc_bytes;
/*
+ * total number of bytes pending defrag, used by stat to check whether
+ * it needs COW.
+ */
+ u64 defrag_bytes;
+
+ /*
* the size of the file stored in the metadata on disk. data=ordered
* means the in-memory i_size might be larger than the size on disk
* because not all the blocks are written yet.
@@ -248,8 +265,11 @@ static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
return 0;
}
+#define BTRFS_DIO_ORIG_BIO_SUBMITTED 0x1
+
struct btrfs_dio_private {
struct inode *inode;
+ unsigned long flags;
u64 logical_offset;
u64 disk_bytenr;
u64 bytes;
@@ -266,7 +286,12 @@ struct btrfs_dio_private {
/* dio_bio came from fs/direct-io.c */
struct bio *dio_bio;
- u8 csum[0];
+
+ /*
+ * The original bio may be splited to several sub-bios, this is
+ * done during endio of sub-bios
+ */
+ int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int);
};
/*
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index ce92ae30250f..cb7f3fe9c9f6 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -807,7 +807,7 @@ static int btrfsic_process_superblock_dev_mirror(
/* super block bytenr is always the unmapped device bytenr */
dev_bytenr = btrfs_sb_offset(superblock_mirror_num);
- if (dev_bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
+ if (dev_bytenr + BTRFS_SUPER_INFO_SIZE > device->commit_total_bytes)
return -1;
bh = __bread(superblock_bdev, dev_bytenr / 4096,
BTRFS_SUPER_INFO_SIZE);
@@ -820,7 +820,6 @@ static int btrfsic_process_superblock_dev_mirror(
btrfs_super_magic(super_tmp) != BTRFS_MAGIC ||
memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) ||
btrfs_super_nodesize(super_tmp) != state->metablock_size ||
- btrfs_super_leafsize(super_tmp) != state->metablock_size ||
btrfs_super_sectorsize(super_tmp) != state->datablock_size) {
brelse(bh);
return 0;
@@ -1252,8 +1251,7 @@ static void btrfsic_read_from_block_data(
while (len > 0) {
cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page));
- BUG_ON(i >= (block_ctx->len + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT);
+ BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_CACHE_SIZE));
kaddr = block_ctx->datav[i];
memcpy(dst, kaddr + offset_in_page, cur);
@@ -3120,24 +3118,12 @@ int btrfsic_mount(struct btrfs_root *root,
struct list_head *dev_head = &fs_devices->devices;
struct btrfs_device *device;
- if (root->nodesize != root->leafsize) {
- printk(KERN_INFO
- "btrfsic: cannot handle nodesize %d != leafsize %d!\n",
- root->nodesize, root->leafsize);
- return -1;
- }
if (root->nodesize & ((u64)PAGE_CACHE_SIZE - 1)) {
printk(KERN_INFO
"btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
root->nodesize, PAGE_CACHE_SIZE);
return -1;
}
- if (root->leafsize & ((u64)PAGE_CACHE_SIZE - 1)) {
- printk(KERN_INFO
- "btrfsic: cannot handle leafsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
- root->leafsize, PAGE_CACHE_SIZE);
- return -1;
- }
if (root->sectorsize & ((u64)PAGE_CACHE_SIZE - 1)) {
printk(KERN_INFO
"btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 1daea0b47187..d3220d31d3cb 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -91,8 +91,7 @@ static inline int compressed_bio_size(struct btrfs_root *root,
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
return sizeof(struct compressed_bio) +
- ((disk_size + root->sectorsize - 1) / root->sectorsize) *
- csum_size;
+ (DIV_ROUND_UP(disk_size, root->sectorsize)) * csum_size;
}
static struct bio *compressed_bio_alloc(struct block_device *bdev,
@@ -389,7 +388,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
* freed before we're done setting it up
*/
atomic_inc(&cb->pending_bios);
- ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
+ ret = btrfs_bio_wq_end_io(root->fs_info, bio,
+ BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) {
@@ -420,7 +420,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
}
bio_get(bio);
- ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
+ ret = btrfs_bio_wq_end_io(root->fs_info, bio, BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) {
@@ -615,8 +615,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
cb->compress_type = extent_compress_type(bio_flags);
cb->orig_bio = bio;
- nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) /
- PAGE_CACHE_SIZE;
+ nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE);
cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages,
GFP_NOFS);
if (!cb->compressed_pages)
@@ -670,7 +669,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
PAGE_CACHE_SIZE) {
bio_get(comp_bio);
- ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
+ ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
+ BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
/*
@@ -686,8 +686,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
comp_bio, sums);
BUG_ON(ret); /* -ENOMEM */
}
- sums += (comp_bio->bi_iter.bi_size +
- root->sectorsize - 1) / root->sectorsize;
+ sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
+ root->sectorsize);
ret = btrfs_map_bio(root, READ, comp_bio,
mirror_num, 0);
@@ -708,7 +708,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
}
bio_get(comp_bio);
- ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
+ ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
+ BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 44ee5d2e52a4..19bc6162fb8e 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -258,9 +258,8 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
else
btrfs_node_key(buf, &disk_key, 0);
- cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
- new_root_objectid, &disk_key, level,
- buf->start, 0);
+ cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
+ &disk_key, level, buf->start, 0);
if (IS_ERR(cow))
return PTR_ERR(cow);
@@ -1133,9 +1132,9 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
} else
parent_start = 0;
- cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
- root->root_key.objectid, &disk_key,
- level, search_start, empty_size);
+ cow = btrfs_alloc_tree_block(trans, root, parent_start,
+ root->root_key.objectid, &disk_key, level,
+ search_start, empty_size);
if (IS_ERR(cow))
return PTR_ERR(cow);
@@ -1425,7 +1424,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
struct tree_mod_root *old_root = NULL;
u64 old_generation = 0;
u64 logical;
- u32 blocksize;
eb_root = btrfs_read_lock_root_node(root);
tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
@@ -1444,8 +1442,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root);
- blocksize = btrfs_level_size(root, old_root->level);
- old = read_tree_block(root, logical, blocksize, 0);
+ old = read_tree_block(root, logical, 0);
if (WARN_ON(!old || !extent_buffer_uptodate(old))) {
free_extent_buffer(old);
btrfs_warn(root->fs_info,
@@ -1506,10 +1503,9 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf)
{
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state)))
+ if (btrfs_test_is_dummy_root(root))
return 0;
-#endif
+
/* ensure we can see the force_cow */
smp_rmb();
@@ -1651,7 +1647,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
WARN_ON(trans->transid != root->fs_info->generation);
parent_nritems = btrfs_header_nritems(parent);
- blocksize = btrfs_level_size(root, parent_level - 1);
+ blocksize = root->nodesize;
end_slot = parent_nritems;
if (parent_nritems == 1)
@@ -1685,15 +1681,14 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
continue;
}
- cur = btrfs_find_tree_block(root, blocknr, blocksize);
+ cur = btrfs_find_tree_block(root, blocknr);
if (cur)
uptodate = btrfs_buffer_uptodate(cur, gen, 0);
else
uptodate = 0;
if (!cur || !uptodate) {
if (!cur) {
- cur = read_tree_block(root, blocknr,
- blocksize, gen);
+ cur = read_tree_block(root, blocknr, gen);
if (!cur || !extent_buffer_uptodate(cur)) {
free_extent_buffer(cur);
return -EIO;
@@ -1872,7 +1867,6 @@ static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
BUG_ON(level == 0);
eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
- btrfs_level_size(root, level - 1),
btrfs_node_ptr_generation(parent, slot));
if (eb && !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
@@ -2267,8 +2261,8 @@ static void reada_for_search(struct btrfs_root *root,
node = path->nodes[level];
search = btrfs_node_blockptr(node, slot);
- blocksize = btrfs_level_size(root, level - 1);
- eb = btrfs_find_tree_block(root, search, blocksize);
+ blocksize = root->nodesize;
+ eb = btrfs_find_tree_block(root, search);
if (eb) {
free_extent_buffer(eb);
return;
@@ -2298,7 +2292,7 @@ static void reada_for_search(struct btrfs_root *root,
if ((search <= target && target - search <= 65536) ||
(search > target && search - target <= 65536)) {
gen = btrfs_node_ptr_generation(node, nr);
- readahead_tree_block(root, search, blocksize, gen);
+ readahead_tree_block(root, search, blocksize);
nread += blocksize;
}
nscan++;
@@ -2325,12 +2319,12 @@ static noinline void reada_for_balance(struct btrfs_root *root,
nritems = btrfs_header_nritems(parent);
slot = path->slots[level + 1];
- blocksize = btrfs_level_size(root, level);
+ blocksize = root->nodesize;
if (slot > 0) {
block1 = btrfs_node_blockptr(parent, slot - 1);
gen = btrfs_node_ptr_generation(parent, slot - 1);
- eb = btrfs_find_tree_block(root, block1, blocksize);
+ eb = btrfs_find_tree_block(root, block1);
/*
* if we get -eagain from btrfs_buffer_uptodate, we
* don't want to return eagain here. That will loop
@@ -2343,16 +2337,16 @@ static noinline void reada_for_balance(struct btrfs_root *root,
if (slot + 1 < nritems) {
block2 = btrfs_node_blockptr(parent, slot + 1);
gen = btrfs_node_ptr_generation(parent, slot + 1);
- eb = btrfs_find_tree_block(root, block2, blocksize);
+ eb = btrfs_find_tree_block(root, block2);
if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
block2 = 0;
free_extent_buffer(eb);
}
if (block1)
- readahead_tree_block(root, block1, blocksize, 0);
+ readahead_tree_block(root, block1, blocksize);
if (block2)
- readahead_tree_block(root, block2, blocksize, 0);
+ readahead_tree_block(root, block2, blocksize);
}
@@ -2454,16 +2448,14 @@ read_block_for_search(struct btrfs_trans_handle *trans,
{
u64 blocknr;
u64 gen;
- u32 blocksize;
struct extent_buffer *b = *eb_ret;
struct extent_buffer *tmp;
int ret;
blocknr = btrfs_node_blockptr(b, slot);
gen = btrfs_node_ptr_generation(b, slot);
- blocksize = btrfs_level_size(root, level - 1);
- tmp = btrfs_find_tree_block(root, blocknr, blocksize);
+ tmp = btrfs_find_tree_block(root, blocknr);
if (tmp) {
/* first we do an atomic uptodate check */
if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
@@ -2507,7 +2499,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
btrfs_release_path(p);
ret = -EAGAIN;
- tmp = read_tree_block(root, blocknr, blocksize, 0);
+ tmp = read_tree_block(root, blocknr, 0);
if (tmp) {
/*
* If the read above didn't mark this buffer up to date,
@@ -2792,8 +2784,6 @@ again:
if (!should_cow_block(trans, root, b))
goto cow_done;
- btrfs_set_path_blocking(p);
-
/*
* must have write locks on this node and the
* parent
@@ -2807,6 +2797,7 @@ again:
goto again;
}
+ btrfs_set_path_blocking(p);
err = btrfs_cow_block(trans, root, b,
p->nodes[level + 1],
p->slots[level + 1], &b);
@@ -3362,9 +3353,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
else
btrfs_node_key(lower, &lower_key, 0);
- c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
- root->root_key.objectid, &lower_key,
- level, root->node->start, 0);
+ c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+ &lower_key, level, root->node->start, 0);
if (IS_ERR(c))
return PTR_ERR(c);
@@ -3502,9 +3492,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
mid = (c_nritems + 1) / 2;
btrfs_node_key(c, &disk_key, mid);
- split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
- root->root_key.objectid,
- &disk_key, level, c->start, 0);
+ split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+ &disk_key, level, c->start, 0);
if (IS_ERR(split))
return PTR_ERR(split);
@@ -4282,13 +4271,12 @@ again:
else
btrfs_item_key(l, &disk_key, mid);
- right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
- root->root_key.objectid,
- &disk_key, 0, l->start, 0);
+ right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+ &disk_key, 0, l->start, 0);
if (IS_ERR(right))
return PTR_ERR(right);
- root_add_used(root, root->leafsize);
+ root_add_used(root, root->nodesize);
memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
btrfs_set_header_bytenr(right, right->start);
@@ -4626,8 +4614,7 @@ void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
ptr = btrfs_item_ptr_offset(leaf, slot);
memmove_extent_buffer(leaf, ptr,
(unsigned long)fi,
- offsetof(struct btrfs_file_extent_item,
- disk_bytenr));
+ BTRFS_FILE_EXTENT_INLINE_DATA_START);
}
}
@@ -4738,6 +4725,12 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
int slot;
struct btrfs_map_token token;
+ if (path->slots[0] == 0) {
+ btrfs_cpu_key_to_disk(&disk_key, cpu_key);
+ fixup_low_keys(root, path, &disk_key, 1);
+ }
+ btrfs_unlock_up_safe(path, 1);
+
btrfs_init_map_token(&token);
leaf = path->nodes[0];
@@ -4798,12 +4791,6 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
}
btrfs_set_header_nritems(leaf, nritems + nr);
-
- if (slot == 0) {
- btrfs_cpu_key_to_disk(&disk_key, cpu_key);
- fixup_low_keys(root, path, &disk_key, 1);
- }
- btrfs_unlock_up_safe(path, 1);
btrfs_mark_buffer_dirty(leaf);
if (btrfs_leaf_free_space(root, leaf) < 0) {
@@ -5145,8 +5132,9 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
u32 nritems;
int level;
int ret = 1;
+ int keep_locks = path->keep_locks;
- WARN_ON(!path->keep_locks);
+ path->keep_locks = 1;
again:
cur = btrfs_read_lock_root_node(root);
level = btrfs_header_level(cur);
@@ -5210,7 +5198,6 @@ find_next_key:
path->slots[level] = slot;
if (level == path->lowest_level) {
ret = 0;
- unlock_up(path, level, 1, 0, NULL);
goto out;
}
btrfs_set_path_blocking(path);
@@ -5225,9 +5212,12 @@ find_next_key:
btrfs_clear_path_blocking(path, NULL, 0);
}
out:
- if (ret == 0)
+ path->keep_locks = keep_locks;
+ if (ret == 0) {
+ btrfs_unlock_up_safe(path, path->lowest_level + 1);
+ btrfs_set_path_blocking(path);
memcpy(min_key, &found_key, sizeof(found_key));
- btrfs_set_path_blocking(path);
+ }
return ret;
}
@@ -5375,7 +5365,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
goto out;
}
- tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS);
+ tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS);
if (!tmp_buf) {
ret = -ENOMEM;
goto out;
@@ -5520,18 +5510,18 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
goto out;
advance_right = ADVANCE;
} else {
- enum btrfs_compare_tree_result cmp;
+ enum btrfs_compare_tree_result result;
WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
ret = tree_compare_item(left_root, left_path,
right_path, tmp_buf);
if (ret)
- cmp = BTRFS_COMPARE_TREE_CHANGED;
+ result = BTRFS_COMPARE_TREE_CHANGED;
else
- cmp = BTRFS_COMPARE_TREE_SAME;
+ result = BTRFS_COMPARE_TREE_SAME;
ret = changed_cb(left_root, right_root,
left_path, right_path,
- &left_key, cmp, ctx);
+ &left_key, result, ctx);
if (ret < 0)
goto out;
advance_left = ADVANCE;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8e29b614fe93..d557264ee974 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -34,6 +34,7 @@
#include <linux/pagemap.h>
#include <linux/btrfs.h>
#include <linux/workqueue.h>
+#include <linux/security.h>
#include "extent_io.h"
#include "extent_map.h"
#include "async-thread.h"
@@ -62,13 +63,6 @@ struct btrfs_ordered_sum;
#define BTRFS_COMPAT_EXTENT_TREE_V0
-/*
- * files bigger than this get some pre-flushing when they are added
- * to the ordered operations list. That way we limit the total
- * work done by the commit
- */
-#define BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT (8 * 1024 * 1024)
-
/* holds pointers to all of the tree roots */
#define BTRFS_ROOT_TREE_OBJECTID 1ULL
@@ -391,10 +385,12 @@ struct btrfs_header {
sizeof(struct btrfs_header)) / \
sizeof(struct btrfs_key_ptr))
#define __BTRFS_LEAF_DATA_SIZE(bs) ((bs) - sizeof(struct btrfs_header))
-#define BTRFS_LEAF_DATA_SIZE(r) (__BTRFS_LEAF_DATA_SIZE(r->leafsize))
+#define BTRFS_LEAF_DATA_SIZE(r) (__BTRFS_LEAF_DATA_SIZE(r->nodesize))
+#define BTRFS_FILE_EXTENT_INLINE_DATA_START \
+ (offsetof(struct btrfs_file_extent_item, disk_bytenr))
#define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \
sizeof(struct btrfs_item) - \
- sizeof(struct btrfs_file_extent_item))
+ BTRFS_FILE_EXTENT_INLINE_DATA_START)
#define BTRFS_MAX_XATTR_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \
sizeof(struct btrfs_item) -\
sizeof(struct btrfs_dir_item))
@@ -474,7 +470,7 @@ struct btrfs_super_block {
__le64 num_devices;
__le32 sectorsize;
__le32 nodesize;
- __le32 leafsize;
+ __le32 __unused_leafsize;
__le32 stripesize;
__le32 sys_chunk_array_size;
__le64 chunk_root_generation;
@@ -903,6 +899,8 @@ struct btrfs_file_extent_item {
/*
* disk space consumed by the extent, checksum blocks are included
* in these numbers
+ *
+ * At this offset in the structure, the inline extent data start.
*/
__le64 disk_bytenr;
__le64 disk_num_bytes;
@@ -1305,8 +1303,8 @@ struct btrfs_block_group_cache {
*/
struct list_head cluster_list;
- /* For delayed block group creation */
- struct list_head new_bg_list;
+ /* For delayed block group creation or deletion of empty block groups */
+ struct list_head bg_list;
};
/* delayed seq elem */
@@ -1545,6 +1543,7 @@ struct btrfs_fs_info {
struct btrfs_workqueue *endio_workers;
struct btrfs_workqueue *endio_meta_workers;
struct btrfs_workqueue *endio_raid56_workers;
+ struct btrfs_workqueue *endio_repair_workers;
struct btrfs_workqueue *rmw_workers;
struct btrfs_workqueue *endio_meta_write_workers;
struct btrfs_workqueue *endio_write_workers;
@@ -1574,6 +1573,7 @@ struct btrfs_fs_info {
int do_barriers;
int closing;
int log_root_recovering;
+ int open;
u64 total_pinned;
@@ -1723,6 +1723,12 @@ struct btrfs_fs_info {
/* Used to reclaim the metadata space in the background. */
struct work_struct async_reclaim_work;
+
+ spinlock_t unused_bgs_lock;
+ struct list_head unused_bgs;
+
+ /* For btrfs to record security options */
+ struct security_mnt_opts security_opts;
};
struct btrfs_subvolume_writers {
@@ -1776,12 +1782,12 @@ struct btrfs_root {
/* free ino cache stuff */
struct btrfs_free_space_ctl *free_ino_ctl;
- enum btrfs_caching_type cached;
- spinlock_t cache_lock;
- wait_queue_head_t cache_wait;
+ enum btrfs_caching_type ino_cache_state;
+ spinlock_t ino_cache_lock;
+ wait_queue_head_t ino_cache_wait;
struct btrfs_free_space_ctl *free_ino_pinned;
- u64 cache_progress;
- struct inode *cache_inode;
+ u64 ino_cache_progress;
+ struct inode *ino_cache_inode;
struct mutex log_mutex;
wait_queue_head_t log_writer_wait;
@@ -1806,18 +1812,14 @@ struct btrfs_root {
/* node allocations are done in nodesize units */
u32 nodesize;
- /* leaf allocations are done in leafsize units */
- u32 leafsize;
-
u32 stripesize;
u32 type;
u64 highest_objectid;
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+ /* only used with CONFIG_BTRFS_FS_RUN_SANITY_TESTS is enabled */
u64 alloc_bytenr;
-#endif
u64 defrag_trans_start;
struct btrfs_key defrag_progress;
@@ -2094,6 +2096,7 @@ struct btrfs_ioctl_defrag_range_args {
#define BTRFS_MOUNT_CHANGE_INODE_CACHE (1 << 24)
#define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
+#define BTRFS_DEFAULT_MAX_INLINE (8192)
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -2995,8 +2998,6 @@ BTRFS_SETGET_STACK_FUNCS(super_sectorsize, struct btrfs_super_block,
sectorsize, 32);
BTRFS_SETGET_STACK_FUNCS(super_nodesize, struct btrfs_super_block,
nodesize, 32);
-BTRFS_SETGET_STACK_FUNCS(super_leafsize, struct btrfs_super_block,
- leafsize, 32);
BTRFS_SETGET_STACK_FUNCS(super_stripesize, struct btrfs_super_block,
stripesize, 32);
BTRFS_SETGET_STACK_FUNCS(super_root_dir, struct btrfs_super_block,
@@ -3049,14 +3050,12 @@ BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression,
static inline unsigned long
btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e)
{
- unsigned long offset = (unsigned long)e;
- offset += offsetof(struct btrfs_file_extent_item, disk_bytenr);
- return offset;
+ return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START;
}
static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize)
{
- return offsetof(struct btrfs_file_extent_item, disk_bytenr) + datasize;
+ return BTRFS_FILE_EXTENT_INLINE_DATA_START + datasize;
}
BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item,
@@ -3086,9 +3085,7 @@ BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item,
static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb,
struct btrfs_item *e)
{
- unsigned long offset;
- offset = offsetof(struct btrfs_file_extent_item, disk_bytenr);
- return btrfs_item_size(eb, e) - offset;
+ return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START;
}
/* this returns the number of file bytes represented by the inline item.
@@ -3232,13 +3229,6 @@ static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
return sb->s_fs_info;
}
-static inline u32 btrfs_level_size(struct btrfs_root *root, int level)
-{
- if (level == 0)
- return root->leafsize;
- return root->nodesize;
-}
-
/* helper function to cast into the data area of the leaf. */
#define btrfs_item_ptr(leaf, slot, type) \
((type *)(btrfs_leaf_data(leaf) + \
@@ -3263,7 +3253,7 @@ static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
unsigned num_items)
{
- return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
+ return (root->nodesize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
2 * num_items;
}
@@ -3274,8 +3264,7 @@ static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_root *root,
unsigned num_items)
{
- return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
- num_items;
+ return root->nodesize * BTRFS_MAX_LEVEL * num_items;
}
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
@@ -3305,9 +3294,9 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(
u64 bytenr);
void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
int get_block_group_index(struct btrfs_block_group_cache *cache);
-struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u32 blocksize,
- u64 parent, u64 root_objectid,
+struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 parent,
+ u64 root_objectid,
struct btrfs_disk_key *key, int level,
u64 hint, u64 empty_size);
void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
@@ -3363,6 +3352,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
u64 size);
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 group_start);
+void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
@@ -3604,6 +3594,7 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info)
kfree(fs_info->uuid_root);
kfree(fs_info->super_copy);
kfree(fs_info->super_for_commit);
+ security_free_mnt_opts(&fs_info->security_opts);
kfree(fs_info);
}
@@ -3739,8 +3730,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u32 *dst);
int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
- struct btrfs_dio_private *dip, struct bio *bio,
- u64 logical_offset);
+ struct bio *bio, u64 logical_offset);
int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 objectid, u64 pos,
@@ -4141,8 +4131,15 @@ static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
/* Sanity test specific functions */
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
void btrfs_test_destroy_inode(struct inode *inode);
-int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
- u64 rfer, u64 excl);
#endif
+static inline int btrfs_test_is_dummy_root(struct btrfs_root *root)
+{
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+ if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state)))
+ return 1;
+#endif
+ return 0;
+}
+
#endif
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index a2e90f855d7d..054577bddaf2 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1042,7 +1042,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
int ret;
key.objectid = node->inode_id;
- btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+ key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
@@ -1099,7 +1099,7 @@ err_out:
search:
btrfs_release_path(path);
- btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
+ key.type = BTRFS_INODE_EXTREF_KEY;
key.offset = -1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
@@ -1473,7 +1473,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
}
delayed_item->key.objectid = btrfs_ino(dir);
- btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
+ delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
delayed_item->key.offset = index;
dir_item = (struct btrfs_dir_item *)delayed_item->data;
@@ -1542,7 +1542,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
return PTR_ERR(node);
item_key.objectid = btrfs_ino(dir);
- btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
+ item_key.type = BTRFS_DIR_INDEX_KEY;
item_key.offset = index;
ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index eea26e1b2fda..6f662b34ba0e 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -168,8 +168,12 @@ no_valid_dev_replace_entry_found:
dev_replace->srcdev->total_bytes;
dev_replace->tgtdev->disk_total_bytes =
dev_replace->srcdev->disk_total_bytes;
+ dev_replace->tgtdev->commit_total_bytes =
+ dev_replace->srcdev->commit_total_bytes;
dev_replace->tgtdev->bytes_used =
dev_replace->srcdev->bytes_used;
+ dev_replace->tgtdev->commit_bytes_used =
+ dev_replace->srcdev->commit_bytes_used;
}
dev_replace->tgtdev->is_tgtdev_for_dev_replace = 1;
btrfs_init_dev_replace_tgtdev_for_resume(fs_info,
@@ -329,30 +333,34 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
args->start.tgtdev_name[0] == '\0')
return -EINVAL;
- mutex_lock(&fs_info->volume_mutex);
- ret = btrfs_init_dev_replace_tgtdev(root, args->start.tgtdev_name,
- &tgt_device);
- if (ret) {
- btrfs_err(fs_info, "target device %s is invalid!",
- args->start.tgtdev_name);
- mutex_unlock(&fs_info->volume_mutex);
- return -EINVAL;
+ /*
+ * Here we commit the transaction to make sure commit_total_bytes
+ * of all the devices are updated.
+ */
+ trans = btrfs_attach_transaction(root);
+ if (!IS_ERR(trans)) {
+ ret = btrfs_commit_transaction(trans, root);
+ if (ret)
+ return ret;
+ } else if (PTR_ERR(trans) != -ENOENT) {
+ return PTR_ERR(trans);
}
+ /* the disk copy procedure reuses the scrub code */
+ mutex_lock(&fs_info->volume_mutex);
ret = btrfs_dev_replace_find_srcdev(root, args->start.srcdevid,
args->start.srcdev_name,
&src_device);
- mutex_unlock(&fs_info->volume_mutex);
if (ret) {
- ret = -EINVAL;
- goto leave_no_lock;
+ mutex_unlock(&fs_info->volume_mutex);
+ return ret;
}
- if (tgt_device->total_bytes < src_device->total_bytes) {
- btrfs_err(fs_info, "target device is smaller than source device!");
- ret = -EINVAL;
- goto leave_no_lock;
- }
+ ret = btrfs_init_dev_replace_tgtdev(root, args->start.tgtdev_name,
+ src_device, &tgt_device);
+ mutex_unlock(&fs_info->volume_mutex);
+ if (ret)
+ return ret;
btrfs_dev_replace_lock(dev_replace);
switch (dev_replace->replace_state) {
@@ -380,10 +388,6 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
src_device->devid,
rcu_str_deref(tgt_device->name));
- tgt_device->total_bytes = src_device->total_bytes;
- tgt_device->disk_total_bytes = src_device->disk_total_bytes;
- tgt_device->bytes_used = src_device->bytes_used;
-
/*
* from now on, the writes to the srcdev are all duplicated to
* go to the tgtdev as well (refer to btrfs_map_block()).
@@ -414,7 +418,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
/* the disk copy procedure reuses the scrub code */
ret = btrfs_scrub_dev(fs_info, src_device->devid, 0,
- src_device->total_bytes,
+ btrfs_device_get_total_bytes(src_device),
&dev_replace->scrub_progress, 0, 1);
ret = btrfs_dev_replace_finishing(root->fs_info, ret);
@@ -426,9 +430,7 @@ leave:
dev_replace->srcdev = NULL;
dev_replace->tgtdev = NULL;
btrfs_dev_replace_unlock(dev_replace);
-leave_no_lock:
- if (tgt_device)
- btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
+ btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
return ret;
}
@@ -507,9 +509,10 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
ret = btrfs_commit_transaction(trans, root);
WARN_ON(ret);
+ mutex_lock(&uuid_mutex);
/* keep away write_all_supers() during the finishing procedure */
- mutex_lock(&root->fs_info->chunk_mutex);
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&root->fs_info->chunk_mutex);
btrfs_dev_replace_lock(dev_replace);
dev_replace->replace_state =
scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED
@@ -532,8 +535,9 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
src_device->devid,
rcu_str_deref(tgt_device->name), scrub_ret);
btrfs_dev_replace_unlock(dev_replace);
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
mutex_unlock(&root->fs_info->chunk_mutex);
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&uuid_mutex);
if (tgt_device)
btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
@@ -542,7 +546,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
}
printk_in_rcu(KERN_INFO
- "BTRFS: dev_replace from %s (devid %llu) to %s) finished\n",
+ "BTRFS: dev_replace from %s (devid %llu) to %s finished\n",
src_device->missing ? "<missing disk>" :
rcu_str_deref(src_device->name),
src_device->devid,
@@ -550,23 +554,29 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
tgt_device->is_tgtdev_for_dev_replace = 0;
tgt_device->devid = src_device->devid;
src_device->devid = BTRFS_DEV_REPLACE_DEVID;
- tgt_device->bytes_used = src_device->bytes_used;
memcpy(uuid_tmp, tgt_device->uuid, sizeof(uuid_tmp));
memcpy(tgt_device->uuid, src_device->uuid, sizeof(tgt_device->uuid));
memcpy(src_device->uuid, uuid_tmp, sizeof(src_device->uuid));
- tgt_device->total_bytes = src_device->total_bytes;
- tgt_device->disk_total_bytes = src_device->disk_total_bytes;
- tgt_device->bytes_used = src_device->bytes_used;
+ btrfs_device_set_total_bytes(tgt_device, src_device->total_bytes);
+ btrfs_device_set_disk_total_bytes(tgt_device,
+ src_device->disk_total_bytes);
+ btrfs_device_set_bytes_used(tgt_device, src_device->bytes_used);
+ ASSERT(list_empty(&src_device->resized_list));
+ tgt_device->commit_total_bytes = src_device->commit_total_bytes;
+ tgt_device->commit_bytes_used = src_device->bytes_used;
if (fs_info->sb->s_bdev == src_device->bdev)
fs_info->sb->s_bdev = tgt_device->bdev;
if (fs_info->fs_devices->latest_bdev == src_device->bdev)
fs_info->fs_devices->latest_bdev = tgt_device->bdev;
list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
+ fs_info->fs_devices->rw_devices++;
/* replace the sysfs entry */
btrfs_kobj_rm_device(fs_info, src_device);
btrfs_kobj_add_device(fs_info, tgt_device);
+ btrfs_dev_replace_unlock(dev_replace);
+
btrfs_rm_dev_replace_blocked(fs_info);
btrfs_rm_dev_replace_srcdev(fs_info, src_device);
@@ -580,9 +590,9 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
* superblock is scratched out so that it is no longer marked to
* belong to this filesystem.
*/
- btrfs_dev_replace_unlock(dev_replace);
- mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
mutex_unlock(&root->fs_info->chunk_mutex);
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&uuid_mutex);
/* write back the superblocks */
trans = btrfs_start_transaction(root, 0);
@@ -643,6 +653,7 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args)
{
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+ struct btrfs_device *srcdev;
btrfs_dev_replace_lock(dev_replace);
/* even if !dev_replace_is_valid, the values are good enough for
@@ -665,8 +676,9 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
break;
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
+ srcdev = dev_replace->srcdev;
args->status.progress_1000 = div64_u64(dev_replace->cursor_left,
- div64_u64(dev_replace->srcdev->total_bytes, 1000));
+ div64_u64(btrfs_device_get_total_bytes(srcdev), 1000));
break;
}
btrfs_dev_replace_unlock(dev_replace);
@@ -825,7 +837,7 @@ static int btrfs_dev_replace_continue_on_mount(struct btrfs_fs_info *fs_info)
ret = btrfs_scrub_dev(fs_info, dev_replace->srcdev->devid,
dev_replace->committed_cursor_left,
- dev_replace->srcdev->total_bytes,
+ btrfs_device_get_total_bytes(dev_replace->srcdev),
&dev_replace->scrub_progress, 0, 1);
ret = btrfs_dev_replace_finishing(fs_info, ret);
WARN_ON(ret);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index a0691df5dcea..fc8df866e919 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -86,7 +86,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root));
key.objectid = objectid;
- btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
+ key.type = BTRFS_XATTR_ITEM_KEY;
key.offset = btrfs_name_hash(name, name_len);
data_size = sizeof(*dir_item) + name_len + data_len;
@@ -137,7 +137,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
u32 data_size;
key.objectid = btrfs_ino(dir);
- btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
+ key.type = BTRFS_DIR_ITEM_KEY;
key.offset = btrfs_name_hash(name, name_len);
path = btrfs_alloc_path();
@@ -204,7 +204,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
int cow = mod != 0;
key.objectid = dir;
- btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
+ key.type = BTRFS_DIR_ITEM_KEY;
key.offset = btrfs_name_hash(name, name_len);
@@ -234,7 +234,7 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
return -ENOMEM;
key.objectid = dir;
- btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
+ key.type = BTRFS_DIR_ITEM_KEY;
key.offset = btrfs_name_hash(name, name_len);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -297,7 +297,7 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
int cow = mod != 0;
key.objectid = dir;
- btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
+ key.type = BTRFS_DIR_INDEX_KEY;
key.offset = objectid;
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
@@ -367,7 +367,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
int cow = mod != 0;
key.objectid = dir;
- btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
+ key.type = BTRFS_XATTR_ITEM_KEY;
key.offset = btrfs_name_hash(name, name_len);
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
if (ret < 0)
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index d0d78dc07792..fa45e3cae40d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -72,21 +72,41 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root);
static void btrfs_error_commit_super(struct btrfs_root *root);
/*
- * end_io_wq structs are used to do processing in task context when an IO is
- * complete. This is used during reads to verify checksums, and it is used
+ * btrfs_end_io_wq structs are used to do processing in task context when an IO
+ * is complete. This is used during reads to verify checksums, and it is used
* by writes to insert metadata for new file extents after IO is complete.
*/
-struct end_io_wq {
+struct btrfs_end_io_wq {
struct bio *bio;
bio_end_io_t *end_io;
void *private;
struct btrfs_fs_info *info;
int error;
- int metadata;
+ enum btrfs_wq_endio_type metadata;
struct list_head list;
struct btrfs_work work;
};
+static struct kmem_cache *btrfs_end_io_wq_cache;
+
+int __init btrfs_end_io_wq_init(void)
+{
+ btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
+ sizeof(struct btrfs_end_io_wq),
+ 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ NULL);
+ if (!btrfs_end_io_wq_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void btrfs_end_io_wq_exit(void)
+{
+ if (btrfs_end_io_wq_cache)
+ kmem_cache_destroy(btrfs_end_io_wq_cache);
+}
+
/*
* async submit bios are used to offload expensive checksumming
* onto the worker threads. They checksum file and metadata bios
@@ -327,8 +347,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
{
struct extent_state *cached_state = NULL;
int ret;
- bool need_lock = (current->journal_info ==
- (void *)BTRFS_SEND_TRANS_STUB);
+ bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB);
if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
return 0;
@@ -348,9 +367,9 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
ret = 0;
goto out;
}
- printk_ratelimited("parent transid verify failed on %llu wanted %llu "
- "found %llu\n",
- eb->start, parent_transid, btrfs_header_generation(eb));
+ printk_ratelimited(KERN_INFO "BTRFS (device %s): parent transid verify failed on %llu wanted %llu found %llu\n",
+ eb->fs_info->sb->s_id, eb->start,
+ parent_transid, btrfs_header_generation(eb));
ret = 1;
/*
@@ -607,22 +626,22 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
goto err;
eb->read_mirror = mirror;
- if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
+ if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
ret = -EIO;
goto err;
}
found_start = btrfs_header_bytenr(eb);
if (found_start != eb->start) {
- printk_ratelimited(KERN_INFO "BTRFS: bad tree block start "
+ printk_ratelimited(KERN_INFO "BTRFS (device %s): bad tree block start "
"%llu %llu\n",
- found_start, eb->start);
+ eb->fs_info->sb->s_id, found_start, eb->start);
ret = -EIO;
goto err;
}
if (check_tree_block_fsid(root, eb)) {
- printk_ratelimited(KERN_INFO "BTRFS: bad fsid on block %llu\n",
- eb->start);
+ printk_ratelimited(KERN_INFO "BTRFS (device %s): bad fsid on block %llu\n",
+ eb->fs_info->sb->s_id, eb->start);
ret = -EIO;
goto err;
}
@@ -680,7 +699,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror)
struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
eb = (struct extent_buffer *)page->private;
- set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
eb->read_mirror = failed_mirror;
atomic_dec(&eb->io_pages);
if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
@@ -690,7 +709,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror)
static void end_workqueue_bio(struct bio *bio, int err)
{
- struct end_io_wq *end_io_wq = bio->bi_private;
+ struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
struct btrfs_fs_info *fs_info;
struct btrfs_workqueue *wq;
btrfs_work_func_t func;
@@ -713,7 +732,11 @@ static void end_workqueue_bio(struct bio *bio, int err)
func = btrfs_endio_write_helper;
}
} else {
- if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
+ if (unlikely(end_io_wq->metadata ==
+ BTRFS_WQ_ENDIO_DIO_REPAIR)) {
+ wq = fs_info->endio_repair_workers;
+ func = btrfs_endio_repair_helper;
+ } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) {
wq = fs_info->endio_raid56_workers;
func = btrfs_endio_raid56_helper;
} else if (end_io_wq->metadata) {
@@ -729,19 +752,12 @@ static void end_workqueue_bio(struct bio *bio, int err)
btrfs_queue_work(wq, &end_io_wq->work);
}
-/*
- * For the metadata arg you want
- *
- * 0 - if data
- * 1 - if normal metadta
- * 2 - if writing to the free space cache area
- * 3 - raid parity work
- */
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
- int metadata)
+ enum btrfs_wq_endio_type metadata)
{
- struct end_io_wq *end_io_wq;
- end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
+ struct btrfs_end_io_wq *end_io_wq;
+
+ end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
if (!end_io_wq)
return -ENOMEM;
@@ -925,7 +941,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
* can happen in the async kernel threads
*/
ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
- bio, 1);
+ bio, BTRFS_WQ_ENDIO_METADATA);
if (ret)
goto out_w_error;
ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
@@ -1057,20 +1073,17 @@ static const struct address_space_operations btree_aops = {
.set_page_dirty = btree_set_page_dirty,
};
-int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
- u64 parent_transid)
+void readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize)
{
struct extent_buffer *buf = NULL;
struct inode *btree_inode = root->fs_info->btree_inode;
- int ret = 0;
buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
if (!buf)
- return 0;
+ return;
read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
buf, 0, WAIT_NONE, btree_get_extent, 0);
free_extent_buffer(buf);
- return ret;
}
int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
@@ -1106,7 +1119,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
}
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
- u64 bytenr, u32 blocksize)
+ u64 bytenr)
{
return find_extent_buffer(root->fs_info, bytenr);
}
@@ -1114,11 +1127,9 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize)
{
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state)))
+ if (btrfs_test_is_dummy_root(root))
return alloc_test_extent_buffer(root->fs_info, bytenr,
blocksize);
-#endif
return alloc_extent_buffer(root->fs_info, bytenr, blocksize);
}
@@ -1136,12 +1147,12 @@ int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
}
struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
- u32 blocksize, u64 parent_transid)
+ u64 parent_transid)
{
struct extent_buffer *buf = NULL;
int ret;
- buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+ buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize);
if (!buf)
return NULL;
@@ -1200,16 +1211,14 @@ btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
kfree(writers);
}
-static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
- u32 stripesize, struct btrfs_root *root,
- struct btrfs_fs_info *fs_info,
+static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
+ struct btrfs_root *root, struct btrfs_fs_info *fs_info,
u64 objectid)
{
root->node = NULL;
root->commit_root = NULL;
root->sectorsize = sectorsize;
root->nodesize = nodesize;
- root->leafsize = leafsize;
root->stripesize = stripesize;
root->state = 0;
root->orphan_cleanup_state = 0;
@@ -1295,7 +1304,7 @@ struct btrfs_root *btrfs_alloc_dummy_root(void)
root = btrfs_alloc_root(NULL);
if (!root)
return ERR_PTR(-ENOMEM);
- __setup_root(4096, 4096, 4096, 4096, root, NULL, 1);
+ __setup_root(4096, 4096, 4096, root, NULL, 1);
set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state);
root->alloc_bytenr = 0;
@@ -1318,15 +1327,13 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
if (!root)
return ERR_PTR(-ENOMEM);
- __setup_root(tree_root->nodesize, tree_root->leafsize,
- tree_root->sectorsize, tree_root->stripesize,
- root, fs_info, objectid);
+ __setup_root(tree_root->nodesize, tree_root->sectorsize,
+ tree_root->stripesize, root, fs_info, objectid);
root->root_key.objectid = objectid;
root->root_key.type = BTRFS_ROOT_ITEM_KEY;
root->root_key.offset = 0;
- leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
- 0, objectid, NULL, 0, 0, 0);
+ leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
if (IS_ERR(leaf)) {
ret = PTR_ERR(leaf);
leaf = NULL;
@@ -1396,9 +1403,9 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
if (!root)
return ERR_PTR(-ENOMEM);
- __setup_root(tree_root->nodesize, tree_root->leafsize,
- tree_root->sectorsize, tree_root->stripesize,
- root, fs_info, BTRFS_TREE_LOG_OBJECTID);
+ __setup_root(tree_root->nodesize, tree_root->sectorsize,
+ tree_root->stripesize, root, fs_info,
+ BTRFS_TREE_LOG_OBJECTID);
root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
root->root_key.type = BTRFS_ROOT_ITEM_KEY;
@@ -1413,9 +1420,8 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
* updated (along with back refs to the log tree).
*/
- leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
- BTRFS_TREE_LOG_OBJECTID, NULL,
- 0, 0, 0);
+ leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
+ NULL, 0, 0, 0);
if (IS_ERR(leaf)) {
kfree(root);
return ERR_CAST(leaf);
@@ -1465,7 +1471,7 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
btrfs_set_stack_inode_generation(inode_item, 1);
btrfs_set_stack_inode_size(inode_item, 3);
btrfs_set_stack_inode_nlink(inode_item, 1);
- btrfs_set_stack_inode_nbytes(inode_item, root->leafsize);
+ btrfs_set_stack_inode_nbytes(inode_item, root->nodesize);
btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
btrfs_set_root_node(&log_root->root_item, log_root->node);
@@ -1485,7 +1491,6 @@ static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
struct btrfs_fs_info *fs_info = tree_root->fs_info;
struct btrfs_path *path;
u64 generation;
- u32 blocksize;
int ret;
path = btrfs_alloc_path();
@@ -1498,9 +1503,8 @@ static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
goto alloc_fail;
}
- __setup_root(tree_root->nodesize, tree_root->leafsize,
- tree_root->sectorsize, tree_root->stripesize,
- root, fs_info, key->objectid);
+ __setup_root(tree_root->nodesize, tree_root->sectorsize,
+ tree_root->stripesize, root, fs_info, key->objectid);
ret = btrfs_find_root(tree_root, key, path,
&root->root_item, &root->root_key);
@@ -1511,9 +1515,8 @@ static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
}
generation = btrfs_root_generation(&root->root_item);
- blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
- blocksize, generation);
+ generation);
if (!root->node) {
ret = -ENOMEM;
goto find_fail;
@@ -1573,8 +1576,8 @@ int btrfs_init_fs_root(struct btrfs_root *root)
root->subv_writers = writers;
btrfs_init_free_ino_ctl(root);
- spin_lock_init(&root->cache_lock);
- init_waitqueue_head(&root->cache_wait);
+ spin_lock_init(&root->ino_cache_lock);
+ init_waitqueue_head(&root->ino_cache_wait);
ret = get_anon_bdev(&root->anon_dev);
if (ret)
@@ -1708,10 +1711,6 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
return ret;
}
-/*
- * If this fails, caller must call bdi_destroy() to get rid of the
- * bdi again.
- */
static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
{
int err;
@@ -1734,16 +1733,16 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
static void end_workqueue_fn(struct btrfs_work *work)
{
struct bio *bio;
- struct end_io_wq *end_io_wq;
+ struct btrfs_end_io_wq *end_io_wq;
int error;
- end_io_wq = container_of(work, struct end_io_wq, work);
+ end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
bio = end_io_wq->bio;
error = end_io_wq->error;
bio->bi_private = end_io_wq->private;
bio->bi_end_io = end_io_wq->end_io;
- kfree(end_io_wq);
+ kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
bio_endio_nodec(bio, error);
}
@@ -1772,6 +1771,7 @@ static int cleaner_kthread(void *arg)
}
btrfs_run_delayed_iputs(root);
+ btrfs_delete_unused_bgs(root->fs_info);
again = btrfs_clean_one_deleted_snapshot(root);
mutex_unlock(&root->fs_info->cleaner_mutex);
@@ -2063,6 +2063,7 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
btrfs_destroy_workqueue(fs_info->endio_workers);
btrfs_destroy_workqueue(fs_info->endio_meta_workers);
btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
+ btrfs_destroy_workqueue(fs_info->endio_repair_workers);
btrfs_destroy_workqueue(fs_info->rmw_workers);
btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
btrfs_destroy_workqueue(fs_info->endio_write_workers);
@@ -2143,8 +2144,6 @@ int open_ctree(struct super_block *sb,
{
u32 sectorsize;
u32 nodesize;
- u32 leafsize;
- u32 blocksize;
u32 stripesize;
u64 generation;
u64 features;
@@ -2233,6 +2232,7 @@ int open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->super_lock);
spin_lock_init(&fs_info->qgroup_op_lock);
spin_lock_init(&fs_info->buffer_lock);
+ spin_lock_init(&fs_info->unused_bgs_lock);
rwlock_init(&fs_info->tree_mod_log_lock);
mutex_init(&fs_info->reloc_mutex);
mutex_init(&fs_info->delalloc_root_mutex);
@@ -2242,6 +2242,7 @@ int open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
INIT_LIST_HEAD(&fs_info->space_info);
INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
+ INIT_LIST_HEAD(&fs_info->unused_bgs);
btrfs_mapping_init(&fs_info->mapping_tree);
btrfs_init_block_rsv(&fs_info->global_block_rsv,
BTRFS_BLOCK_RSV_GLOBAL);
@@ -2260,7 +2261,7 @@ int open_ctree(struct super_block *sb,
atomic_set(&fs_info->qgroup_op_seq, 0);
atomic64_set(&fs_info->tree_mod_seq, 0);
fs_info->sb = sb;
- fs_info->max_inline = 8192 * 1024;
+ fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
fs_info->metadata_ratio = 0;
fs_info->defrag_inodes = RB_ROOT;
fs_info->free_chunk_space = 0;
@@ -2389,7 +2390,7 @@ int open_ctree(struct super_block *sb,
goto fail_alloc;
}
- __setup_root(4096, 4096, 4096, 4096, tree_root,
+ __setup_root(4096, 4096, 4096, tree_root,
fs_info, BTRFS_ROOT_TREE_OBJECTID);
invalidate_bdev(fs_devices->latest_bdev);
@@ -2469,19 +2470,22 @@ int open_ctree(struct super_block *sb,
goto fail_alloc;
}
- if (btrfs_super_leafsize(disk_super) !=
+ /*
+ * Leafsize and nodesize were always equal, this is only a sanity check.
+ */
+ if (le32_to_cpu(disk_super->__unused_leafsize) !=
btrfs_super_nodesize(disk_super)) {
printk(KERN_ERR "BTRFS: couldn't mount because metadata "
"blocksizes don't match. node %d leaf %d\n",
btrfs_super_nodesize(disk_super),
- btrfs_super_leafsize(disk_super));
+ le32_to_cpu(disk_super->__unused_leafsize));
err = -EINVAL;
goto fail_alloc;
}
- if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
+ if (btrfs_super_nodesize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
printk(KERN_ERR "BTRFS: couldn't mount because metadata "
"blocksize (%d) was too large\n",
- btrfs_super_leafsize(disk_super));
+ btrfs_super_nodesize(disk_super));
err = -EINVAL;
goto fail_alloc;
}
@@ -2498,17 +2502,16 @@ int open_ctree(struct super_block *sb,
* flag our filesystem as having big metadata blocks if
* they are bigger than the page size
*/
- if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
+ if (btrfs_super_nodesize(disk_super) > PAGE_CACHE_SIZE) {
if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n");
features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
}
nodesize = btrfs_super_nodesize(disk_super);
- leafsize = btrfs_super_leafsize(disk_super);
sectorsize = btrfs_super_sectorsize(disk_super);
stripesize = btrfs_super_stripesize(disk_super);
- fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
+ fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
/*
@@ -2516,7 +2519,7 @@ int open_ctree(struct super_block *sb,
* extent buffers for the same range. It leads to corruptions
*/
if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
- (sectorsize != leafsize)) {
+ (sectorsize != nodesize)) {
printk(KERN_WARNING "BTRFS: unequal leaf/node/sector sizes "
"are not allowed for mixed block groups on %s\n",
sb->s_id);
@@ -2579,6 +2582,8 @@ int open_ctree(struct super_block *sb,
btrfs_alloc_workqueue("endio-meta-write", flags, max_active, 2);
fs_info->endio_raid56_workers =
btrfs_alloc_workqueue("endio-raid56", flags, max_active, 4);
+ fs_info->endio_repair_workers =
+ btrfs_alloc_workqueue("endio-repair", flags, 1, 0);
fs_info->rmw_workers =
btrfs_alloc_workqueue("rmw", flags, max_active, 2);
fs_info->endio_write_workers =
@@ -2600,11 +2605,12 @@ int open_ctree(struct super_block *sb,
fs_info->submit_workers && fs_info->flush_workers &&
fs_info->endio_workers && fs_info->endio_meta_workers &&
fs_info->endio_meta_write_workers &&
+ fs_info->endio_repair_workers &&
fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
fs_info->endio_freespace_worker && fs_info->rmw_workers &&
fs_info->caching_workers && fs_info->readahead_workers &&
fs_info->fixup_workers && fs_info->delayed_workers &&
- fs_info->fixup_workers && fs_info->extent_workers &&
+ fs_info->extent_workers &&
fs_info->qgroup_rescan_workers)) {
err = -ENOMEM;
goto fail_sb_buffer;
@@ -2615,7 +2621,6 @@ int open_ctree(struct super_block *sb,
4 * 1024 * 1024 / PAGE_CACHE_SIZE);
tree_root->nodesize = nodesize;
- tree_root->leafsize = leafsize;
tree_root->sectorsize = sectorsize;
tree_root->stripesize = stripesize;
@@ -2642,16 +2647,14 @@ int open_ctree(struct super_block *sb,
goto fail_sb_buffer;
}
- blocksize = btrfs_level_size(tree_root,
- btrfs_super_chunk_root_level(disk_super));
generation = btrfs_super_chunk_root_generation(disk_super);
- __setup_root(nodesize, leafsize, sectorsize, stripesize,
- chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
+ __setup_root(nodesize, sectorsize, stripesize, chunk_root,
+ fs_info, BTRFS_CHUNK_TREE_OBJECTID);
chunk_root->node = read_tree_block(chunk_root,
btrfs_super_chunk_root(disk_super),
- blocksize, generation);
+ generation);
if (!chunk_root->node ||
!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
printk(KERN_WARNING "BTRFS: failed to read chunk root on %s\n",
@@ -2684,13 +2687,11 @@ int open_ctree(struct super_block *sb,
}
retry_root_backup:
- blocksize = btrfs_level_size(tree_root,
- btrfs_super_root_level(disk_super));
generation = btrfs_super_generation(disk_super);
tree_root->node = read_tree_block(tree_root,
btrfs_super_root(disk_super),
- blocksize, generation);
+ generation);
if (!tree_root->node ||
!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
@@ -2859,9 +2860,6 @@ retry_root_backup:
err = -EIO;
goto fail_qgroup;
}
- blocksize =
- btrfs_level_size(tree_root,
- btrfs_super_log_root_level(disk_super));
log_tree_root = btrfs_alloc_root(fs_info);
if (!log_tree_root) {
@@ -2869,11 +2867,10 @@ retry_root_backup:
goto fail_qgroup;
}
- __setup_root(nodesize, leafsize, sectorsize, stripesize,
+ __setup_root(nodesize, sectorsize, stripesize,
log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
log_tree_root->node = read_tree_block(tree_root, bytenr,
- blocksize,
generation + 1);
if (!log_tree_root->node ||
!extent_buffer_uptodate(log_tree_root->node)) {
@@ -2980,6 +2977,8 @@ retry_root_backup:
fs_info->update_uuid_tree_gen = 1;
}
+ fs_info->open = 1;
+
return 0;
fail_qgroup:
@@ -3139,7 +3138,8 @@ static int write_dev_supers(struct btrfs_device *device,
for (i = 0; i < max_mirrors; i++) {
bytenr = btrfs_sb_offset(i);
- if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
+ if (bytenr + BTRFS_SUPER_INFO_SIZE >=
+ device->commit_total_bytes)
break;
if (wait) {
@@ -3456,8 +3456,9 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
btrfs_set_stack_device_type(dev_item, dev->type);
btrfs_set_stack_device_id(dev_item, dev->devid);
btrfs_set_stack_device_total_bytes(dev_item,
- dev->disk_total_bytes);
- btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
+ dev->commit_total_bytes);
+ btrfs_set_stack_device_bytes_used(dev_item,
+ dev->commit_bytes_used);
btrfs_set_stack_device_io_align(dev_item, dev->io_align);
btrfs_set_stack_device_io_width(dev_item, dev->io_width);
btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
@@ -3532,7 +3533,7 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
static void free_fs_root(struct btrfs_root *root)
{
- iput(root->cache_inode);
+ iput(root->ino_cache_inode);
WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
btrfs_free_block_rsv(root, root->orphan_block_rsv);
root->orphan_block_rsv = NULL;
@@ -3623,7 +3624,7 @@ int btrfs_commit_super(struct btrfs_root *root)
return btrfs_commit_transaction(trans, root);
}
-int close_ctree(struct btrfs_root *root)
+void close_ctree(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
@@ -3689,6 +3690,7 @@ int close_ctree(struct btrfs_root *root)
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
btrfs_stop_all_workers(fs_info);
+ fs_info->open = 0;
free_root_pointers(fs_info, 1);
iput(fs_info->btree_inode);
@@ -3711,8 +3713,6 @@ int close_ctree(struct btrfs_root *root)
btrfs_free_block_rsv(root, root->orphan_block_rsv);
root->orphan_block_rsv = NULL;
-
- return 0;
}
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
@@ -3814,10 +3814,73 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int read_only)
{
+ struct btrfs_super_block *sb = fs_info->super_copy;
+ int ret = 0;
+
+ if (sb->root_level > BTRFS_MAX_LEVEL) {
+ printk(KERN_ERR "BTRFS: tree_root level too big: %d > %d\n",
+ sb->root_level, BTRFS_MAX_LEVEL);
+ ret = -EINVAL;
+ }
+ if (sb->chunk_root_level > BTRFS_MAX_LEVEL) {
+ printk(KERN_ERR "BTRFS: chunk_root level too big: %d > %d\n",
+ sb->chunk_root_level, BTRFS_MAX_LEVEL);
+ ret = -EINVAL;
+ }
+ if (sb->log_root_level > BTRFS_MAX_LEVEL) {
+ printk(KERN_ERR "BTRFS: log_root level too big: %d > %d\n",
+ sb->log_root_level, BTRFS_MAX_LEVEL);
+ ret = -EINVAL;
+ }
+
/*
- * Placeholder for checks
+ * The common minimum, we don't know if we can trust the nodesize/sectorsize
+ * items yet, they'll be verified later. Issue just a warning.
*/
- return 0;
+ if (!IS_ALIGNED(sb->root, 4096))
+ printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
+ sb->root);
+ if (!IS_ALIGNED(sb->chunk_root, 4096))
+ printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
+ sb->chunk_root);
+ if (!IS_ALIGNED(sb->log_root, 4096))
+ printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
+ sb->log_root);
+
+ if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
+ printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n",
+ fs_info->fsid, sb->dev_item.fsid);
+ ret = -EINVAL;
+ }
+
+ /*
+ * Hint to catch really bogus numbers, bitflips or so, more exact checks are
+ * done later
+ */
+ if (sb->num_devices > (1UL << 31))
+ printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
+ sb->num_devices);
+
+ if (sb->bytenr != BTRFS_SUPER_INFO_OFFSET) {
+ printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n",
+ sb->bytenr, BTRFS_SUPER_INFO_OFFSET);
+ ret = -EINVAL;
+ }
+
+ /*
+ * The generation is a global counter, we'll trust it more than the others
+ * but it's still possible that it's the one that's wrong.
+ */
+ if (sb->generation < sb->chunk_root_generation)
+ printk(KERN_WARNING
+ "BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n",
+ sb->generation, sb->chunk_root_generation);
+ if (sb->generation < sb->cache_generation && sb->cache_generation != (u64)-1)
+ printk(KERN_WARNING
+ "BTRFS: suspicious: generation < cache_generation: %llu < %llu\n",
+ sb->generation, sb->cache_generation);
+
+ return ret;
}
static void btrfs_error_commit_super(struct btrfs_root *root)
@@ -4009,9 +4072,8 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
while (start <= end) {
- eb = btrfs_find_tree_block(root, start,
- root->leafsize);
- start += root->leafsize;
+ eb = btrfs_find_tree_block(root, start);
+ start += root->nodesize;
if (!eb)
continue;
wait_on_extent_buffer_writeback(eb);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 23ce3ceba0a9..414651821fb3 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -25,11 +25,12 @@
#define BTRFS_SUPER_MIRROR_MAX 3
#define BTRFS_SUPER_MIRROR_SHIFT 12
-enum {
+enum btrfs_wq_endio_type {
BTRFS_WQ_ENDIO_DATA = 0,
BTRFS_WQ_ENDIO_METADATA = 1,
BTRFS_WQ_ENDIO_FREE_SPACE = 2,
BTRFS_WQ_ENDIO_RAID56 = 3,
+ BTRFS_WQ_ENDIO_DIO_REPAIR = 4,
};
static inline u64 btrfs_sb_offset(int mirror)
@@ -44,9 +45,8 @@ struct btrfs_device;
struct btrfs_fs_devices;
struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
- u32 blocksize, u64 parent_transid);
-int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
- u64 parent_transid);
+ u64 parent_transid);
+void readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize);
int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
int mirror_num, struct extent_buffer **eb);
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
@@ -56,13 +56,13 @@ void clean_tree_block(struct btrfs_trans_handle *trans,
int open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
char *options);
-int close_ctree(struct btrfs_root *root);
+void close_ctree(struct btrfs_root *root);
int write_ctree_super(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int max_mirrors);
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
int btrfs_commit_super(struct btrfs_root *root);
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
- u64 bytenr, u32 blocksize);
+ u64 bytenr);
struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
struct btrfs_key *location);
int btrfs_init_fs_root(struct btrfs_root *root);
@@ -119,7 +119,7 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
u32 btrfs_csum_data(char *data, u32 seed, size_t len);
void btrfs_csum_final(u32 crc, char *result);
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
- int metadata);
+ enum btrfs_wq_endio_type metadata);
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
int rw, struct bio *bio, int mirror_num,
unsigned long bio_flags, u64 bio_offset,
@@ -141,6 +141,8 @@ int btree_lock_page_hook(struct page *page, void *data,
void (*flush_fn)(void *));
int btrfs_calc_num_tolerated_disk_barrier_failures(
struct btrfs_fs_info *fs_info);
+int __init btrfs_end_io_wq_init(void);
+void btrfs_end_io_wq_exit(void);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_init_lockdep(void);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 41422a3de8ed..37d164540c3a 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -70,7 +70,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
return ERR_PTR(-ESTALE);
key.objectid = root_objectid;
- btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
+ key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
index = srcu_read_lock(&fs_info->subvol_srcu);
@@ -82,7 +82,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
}
key.objectid = objectid;
- btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+ key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
inode = btrfs_iget(sb, &key, root, NULL);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index caaf015d6e4b..d56589571012 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -491,7 +491,7 @@ next:
key.objectid);
if (key.type == BTRFS_METADATA_ITEM_KEY)
last = key.objectid +
- fs_info->tree_root->leafsize;
+ fs_info->tree_root->nodesize;
else
last = key.objectid + key.offset;
@@ -765,7 +765,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
* different
*/
if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
- offset = root->leafsize;
+ offset = root->nodesize;
metadata = 0;
}
@@ -799,13 +799,13 @@ again:
path->slots[0]);
if (key.objectid == bytenr &&
key.type == BTRFS_EXTENT_ITEM_KEY &&
- key.offset == root->leafsize)
+ key.offset == root->nodesize)
ret = 0;
}
if (ret) {
key.objectid = bytenr;
key.type = BTRFS_EXTENT_ITEM_KEY;
- key.offset = root->leafsize;
+ key.offset = root->nodesize;
btrfs_release_path(path);
goto again;
}
@@ -2651,7 +2651,7 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
num_bytes = btrfs_calc_trans_metadata_size(root, 1);
num_heads = heads_to_leaves(root, num_heads);
if (num_heads > 1)
- num_bytes += (num_heads - 1) * root->leafsize;
+ num_bytes += (num_heads - 1) * root->nodesize;
num_bytes <<= 1;
global_rsv = &root->fs_info->global_block_rsv;
@@ -3073,10 +3073,10 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
u64, u64, u64, u64, u64, u64, int);
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state)))
+
+ if (btrfs_test_is_dummy_root(root))
return 0;
-#endif
+
ref_root = btrfs_header_owner(buf);
nritems = btrfs_header_nritems(buf);
level = btrfs_header_level(buf);
@@ -3097,7 +3097,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
for (i = 0; i < nritems; i++) {
if (level == 0) {
btrfs_item_key_to_cpu(buf, &key, i);
- if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
+ if (key.type != BTRFS_EXTENT_DATA_KEY)
continue;
fi = btrfs_item_ptr(buf, i,
struct btrfs_file_extent_item);
@@ -3117,7 +3117,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
goto fail;
} else {
bytenr = btrfs_node_blockptr(buf, i);
- num_bytes = btrfs_level_size(root, level - 1);
+ num_bytes = root->nodesize;
ret = process_func(trans, root, bytenr, num_bytes,
parent, ref_root, level - 1, 0,
1);
@@ -4343,11 +4343,21 @@ static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
}
static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
- struct btrfs_fs_info *fs_info)
+ struct btrfs_fs_info *fs_info,
+ int flush_state)
{
u64 used;
spin_lock(&space_info->lock);
+ /*
+ * We run out of space and have not got any free space via flush_space,
+ * so don't bother doing async reclaim.
+ */
+ if (flush_state > COMMIT_TRANS && space_info->full) {
+ spin_unlock(&space_info->lock);
+ return 0;
+ }
+
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_pinned + space_info->bytes_readonly +
space_info->bytes_may_use;
@@ -4380,11 +4390,12 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
flush_space(fs_info->fs_root, space_info, to_reclaim,
to_reclaim, flush_state);
flush_state++;
- if (!btrfs_need_do_async_reclaim(space_info, fs_info))
+ if (!btrfs_need_do_async_reclaim(space_info, fs_info,
+ flush_state))
return;
} while (flush_state <= COMMIT_TRANS);
- if (btrfs_need_do_async_reclaim(space_info, fs_info))
+ if (btrfs_need_do_async_reclaim(space_info, fs_info, flush_state))
queue_work(system_unbound_wq, work);
}
@@ -4502,7 +4513,13 @@ again:
space_info->flush = 1;
} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
used += orig_bytes;
- if (need_do_async_reclaim(space_info, root->fs_info, used) &&
+ /*
+ * We will do the space reservation dance during log replay,
+ * which means we won't have fs_info->fs_root set, so don't do
+ * the async reclaim as we will panic.
+ */
+ if (!root->fs_info->log_root_recovering &&
+ need_do_async_reclaim(space_info, root->fs_info, used) &&
!work_busy(&root->fs_info->async_reclaim_work))
queue_work(system_unbound_wq,
&root->fs_info->async_reclaim_work);
@@ -4839,7 +4856,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
if (num_bytes * 3 > meta_used)
num_bytes = div64_u64(meta_used, 3);
- return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
+ return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
}
static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
@@ -4988,7 +5005,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
if (root->fs_info->quota_enabled) {
/* One for parent inode, two for dir entries */
- num_bytes = 3 * root->leafsize;
+ num_bytes = 3 * root->nodesize;
ret = btrfs_qgroup_reserve(root, num_bytes);
if (ret)
return ret;
@@ -5176,7 +5193,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
if (root->fs_info->quota_enabled) {
ret = btrfs_qgroup_reserve(root, num_bytes +
- nr_extents * root->leafsize);
+ nr_extents * root->nodesize);
if (ret)
goto out_fail;
}
@@ -5185,7 +5202,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
if (unlikely(ret)) {
if (root->fs_info->quota_enabled)
btrfs_qgroup_free(root, num_bytes +
- nr_extents * root->leafsize);
+ nr_extents * root->nodesize);
goto out_fail;
}
@@ -5301,7 +5318,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
btrfs_ino(inode), to_free, 0);
if (root->fs_info->quota_enabled) {
btrfs_qgroup_free(root, num_bytes +
- dropped * root->leafsize);
+ dropped * root->nodesize);
}
btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
@@ -5422,6 +5439,20 @@ static int update_block_group(struct btrfs_root *root,
spin_unlock(&cache->space_info->lock);
} else {
old_val -= num_bytes;
+
+ /*
+ * No longer have used bytes in this block group, queue
+ * it for deletion.
+ */
+ if (old_val == 0) {
+ spin_lock(&info->unused_bgs_lock);
+ if (list_empty(&cache->bg_list)) {
+ btrfs_get_block_group(cache);
+ list_add_tail(&cache->bg_list,
+ &info->unused_bgs);
+ }
+ spin_unlock(&info->unused_bgs_lock);
+ }
btrfs_set_block_group_used(&cache->item, old_val);
cache->pinned += num_bytes;
cache->space_info->bytes_pinned += num_bytes;
@@ -6233,10 +6264,9 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int ret;
struct btrfs_fs_info *fs_info = root->fs_info;
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state)))
+ if (btrfs_test_is_dummy_root(root))
return 0;
-#endif
+
add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
/*
@@ -6263,14 +6293,6 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
return ret;
}
-static u64 stripe_align(struct btrfs_root *root,
- struct btrfs_block_group_cache *cache,
- u64 val, u64 num_bytes)
-{
- u64 ret = ALIGN(val, root->stripesize);
- return ret;
-}
-
/*
* when we wait for progress in the block group caching, its because
* our allocation attempt failed at least once. So, we must sleep
@@ -6464,7 +6486,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
bool have_caching_bg = false;
WARN_ON(num_bytes < root->sectorsize);
- btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
+ ins->type = BTRFS_EXTENT_ITEM_KEY;
ins->objectid = 0;
ins->offset = 0;
@@ -6751,8 +6773,7 @@ unclustered_alloc:
goto loop;
}
checks:
- search_start = stripe_align(root, block_group,
- offset, num_bytes);
+ search_start = ALIGN(offset, root->stripesize);
/* move on to the next group */
if (search_start + num_bytes >
@@ -7077,7 +7098,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
if (!path) {
btrfs_free_and_pin_reserved_extent(root, ins->objectid,
- root->leafsize);
+ root->nodesize);
return -ENOMEM;
}
@@ -7086,7 +7107,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
ins, size);
if (ret) {
btrfs_free_and_pin_reserved_extent(root, ins->objectid,
- root->leafsize);
+ root->nodesize);
btrfs_free_path(path);
return ret;
}
@@ -7101,7 +7122,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
if (skinny_metadata) {
iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
- num_bytes = root->leafsize;
+ num_bytes = root->nodesize;
} else {
block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
btrfs_set_tree_block_key(leaf, block_info, key);
@@ -7131,14 +7152,14 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
return ret;
}
- ret = update_block_group(root, ins->objectid, root->leafsize, 1);
+ ret = update_block_group(root, ins->objectid, root->nodesize, 1);
if (ret) { /* -ENOENT, logic error */
btrfs_err(fs_info, "update block group failed for %llu %llu",
ins->objectid, ins->offset);
BUG();
}
- trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->leafsize);
+ trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
return ret;
}
@@ -7213,17 +7234,19 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
btrfs_set_buffer_uptodate(buf);
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
+ buf->log_index = root->log_transid % 2;
/*
* we allow two log transactions at a time, use different
* EXENT bit to differentiate dirty pages.
*/
- if (root->log_transid % 2 == 0)
+ if (buf->log_index == 0)
set_extent_dirty(&root->dirty_log_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
else
set_extent_new(&root->dirty_log_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
} else {
+ buf->log_index = -1;
set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
}
@@ -7300,8 +7323,8 @@ static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
*
* returns the tree buffer or NULL.
*/
-struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u32 blocksize,
+struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
u64 parent, u64 root_objectid,
struct btrfs_disk_key *key, int level,
u64 hint, u64 empty_size)
@@ -7311,18 +7334,18 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
struct extent_buffer *buf;
u64 flags = 0;
int ret;
+ u32 blocksize = root->nodesize;
bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
SKINNY_METADATA);
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state))) {
+ if (btrfs_test_is_dummy_root(root)) {
buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
blocksize, level);
if (!IS_ERR(buf))
root->alloc_bytenr += blocksize;
return buf;
}
-#endif
+
block_rsv = use_block_rsv(trans, root, blocksize);
if (IS_ERR(block_rsv))
return ERR_CAST(block_rsv);
@@ -7417,7 +7440,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
eb = path->nodes[wc->level];
nritems = btrfs_header_nritems(eb);
- blocksize = btrfs_level_size(root, wc->level - 1);
+ blocksize = root->nodesize;
for (slot = path->slots[wc->level]; slot < nritems; slot++) {
if (nread >= wc->reada_count)
@@ -7464,10 +7487,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
continue;
}
reada:
- ret = readahead_tree_block(root, bytenr, blocksize,
- generation);
- if (ret)
- break;
+ readahead_tree_block(root, bytenr, blocksize);
nread++;
}
wc->reada_slot = slot;
@@ -7626,7 +7646,6 @@ walk_down:
level = root_level;
while (level >= 0) {
if (path->nodes[level] == NULL) {
- int child_bsize = root->nodesize;
int parent_slot;
u64 child_gen;
u64 child_bytenr;
@@ -7638,8 +7657,7 @@ walk_down:
child_bytenr = btrfs_node_blockptr(eb, parent_slot);
child_gen = btrfs_node_ptr_generation(eb, parent_slot);
- eb = read_tree_block(root, child_bytenr, child_bsize,
- child_gen);
+ eb = read_tree_block(root, child_bytenr, child_gen);
if (!eb || !extent_buffer_uptodate(eb)) {
ret = -EIO;
goto out;
@@ -7655,7 +7673,7 @@ walk_down:
ret = btrfs_qgroup_record_ref(trans, root->fs_info,
root->objectid,
child_bytenr,
- child_bsize,
+ root->nodesize,
BTRFS_QGROUP_OPER_SUB_SUBTREE,
0);
if (ret)
@@ -7806,9 +7824,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
}
bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
- blocksize = btrfs_level_size(root, level - 1);
+ blocksize = root->nodesize;
- next = btrfs_find_tree_block(root, bytenr, blocksize);
+ next = btrfs_find_tree_block(root, bytenr);
if (!next) {
next = btrfs_find_create_tree_block(root, bytenr, blocksize);
if (!next)
@@ -7870,7 +7888,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
if (!next) {
if (reada && level == 1)
reada_walk_down(trans, root, wc, path);
- next = read_tree_block(root, bytenr, blocksize, generation);
+ next = read_tree_block(root, bytenr, generation);
if (!next || !extent_buffer_uptodate(next)) {
free_extent_buffer(next);
return -EIO;
@@ -8853,6 +8871,16 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
}
up_write(&info->commit_root_sem);
+ spin_lock(&info->unused_bgs_lock);
+ while (!list_empty(&info->unused_bgs)) {
+ block_group = list_first_entry(&info->unused_bgs,
+ struct btrfs_block_group_cache,
+ bg_list);
+ list_del_init(&block_group->bg_list);
+ btrfs_put_block_group(block_group);
+ }
+ spin_unlock(&info->unused_bgs_lock);
+
spin_lock(&info->block_group_cache_lock);
while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
block_group = rb_entry(n, struct btrfs_block_group_cache,
@@ -8987,7 +9015,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
init_rwsem(&cache->data_rwsem);
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
- INIT_LIST_HEAD(&cache->new_bg_list);
+ INIT_LIST_HEAD(&cache->bg_list);
btrfs_init_free_space_ctl(cache);
return cache;
@@ -9009,7 +9037,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
root = info->extent_root;
key.objectid = 0;
key.offset = 0;
- btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
+ key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -9128,8 +9156,18 @@ int btrfs_read_block_groups(struct btrfs_root *root)
__link_block_group(space_info, cache);
set_avail_alloc_bits(root->fs_info, cache->flags);
- if (btrfs_chunk_readonly(root, cache->key.objectid))
+ if (btrfs_chunk_readonly(root, cache->key.objectid)) {
set_block_group_ro(cache, 1);
+ } else if (btrfs_block_group_used(&cache->item) == 0) {
+ spin_lock(&info->unused_bgs_lock);
+ /* Should always be true but just in case. */
+ if (list_empty(&cache->bg_list)) {
+ btrfs_get_block_group(cache);
+ list_add_tail(&cache->bg_list,
+ &info->unused_bgs);
+ }
+ spin_unlock(&info->unused_bgs_lock);
+ }
}
list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
@@ -9170,10 +9208,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_key key;
int ret = 0;
- list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
- new_bg_list) {
- list_del_init(&block_group->new_bg_list);
-
+ list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
+ list_del_init(&block_group->bg_list);
if (ret)
continue;
@@ -9259,7 +9295,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
__link_block_group(cache->space_info, cache);
- list_add_tail(&cache->new_bg_list, &trans->new_bgs);
+ list_add_tail(&cache->bg_list, &trans->new_bgs);
set_avail_alloc_bits(extent_root->fs_info, type);
@@ -9413,8 +9449,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
memcpy(&key, &block_group->key, sizeof(key));
- btrfs_clear_space_info_full(root->fs_info);
-
btrfs_put_block_group(block_group);
btrfs_put_block_group(block_group);
@@ -9430,6 +9464,101 @@ out:
return ret;
}
+/*
+ * Process the unused_bgs list and remove any that don't have any allocated
+ * space inside of them.
+ */
+void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_block_group_cache *block_group;
+ struct btrfs_space_info *space_info;
+ struct btrfs_root *root = fs_info->extent_root;
+ struct btrfs_trans_handle *trans;
+ int ret = 0;
+
+ if (!fs_info->open)
+ return;
+
+ spin_lock(&fs_info->unused_bgs_lock);
+ while (!list_empty(&fs_info->unused_bgs)) {
+ u64 start, end;
+
+ block_group = list_first_entry(&fs_info->unused_bgs,
+ struct btrfs_block_group_cache,
+ bg_list);
+ space_info = block_group->space_info;
+ list_del_init(&block_group->bg_list);
+ if (ret || btrfs_mixed_space_info(space_info)) {
+ btrfs_put_block_group(block_group);
+ continue;
+ }
+ spin_unlock(&fs_info->unused_bgs_lock);
+
+ /* Don't want to race with allocators so take the groups_sem */
+ down_write(&space_info->groups_sem);
+ spin_lock(&block_group->lock);
+ if (block_group->reserved ||
+ btrfs_block_group_used(&block_group->item) ||
+ block_group->ro) {
+ /*
+ * We want to bail if we made new allocations or have
+ * outstanding allocations in this block group. We do
+ * the ro check in case balance is currently acting on
+ * this block group.
+ */
+ spin_unlock(&block_group->lock);
+ up_write(&space_info->groups_sem);
+ goto next;
+ }
+ spin_unlock(&block_group->lock);
+
+ /* We don't want to force the issue, only flip if it's ok. */
+ ret = set_block_group_ro(block_group, 0);
+ up_write(&space_info->groups_sem);
+ if (ret < 0) {
+ ret = 0;
+ goto next;
+ }
+
+ /*
+ * Want to do this before we do anything else so we can recover
+ * properly if we fail to join the transaction.
+ */
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ btrfs_set_block_group_rw(root, block_group);
+ ret = PTR_ERR(trans);
+ goto next;
+ }
+
+ /*
+ * We could have pending pinned extents for this block group,
+ * just delete them, we don't care about them anymore.
+ */
+ start = block_group->key.objectid;
+ end = start + block_group->key.offset - 1;
+ clear_extent_bits(&fs_info->freed_extents[0], start, end,
+ EXTENT_DIRTY, GFP_NOFS);
+ clear_extent_bits(&fs_info->freed_extents[1], start, end,
+ EXTENT_DIRTY, GFP_NOFS);
+
+ /* Reset pinned so btrfs_put_block_group doesn't complain */
+ block_group->pinned = 0;
+
+ /*
+ * Btrfs_remove_chunk will abort the transaction if things go
+ * horribly wrong.
+ */
+ ret = btrfs_remove_chunk(trans, root,
+ block_group->key.objectid);
+ btrfs_end_transaction(trans, root);
+next:
+ btrfs_put_block_group(block_group);
+ spin_lock(&fs_info->unused_bgs_lock);
+ }
+ spin_unlock(&fs_info->unused_bgs_lock);
+}
+
int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
{
struct btrfs_space_info *space_info;
@@ -9561,7 +9690,7 @@ void btrfs_end_nocow_write(struct btrfs_root *root)
int btrfs_start_nocow_write(struct btrfs_root *root)
{
- if (unlikely(atomic_read(&root->will_be_snapshoted)))
+ if (atomic_read(&root->will_be_snapshoted))
return 0;
percpu_counter_inc(&root->subv_writers->counter);
@@ -9569,7 +9698,7 @@ int btrfs_start_nocow_write(struct btrfs_root *root)
* Make sure counter is updated before we check for snapshot creation.
*/
smp_mb();
- if (unlikely(atomic_read(&root->will_be_snapshoted))) {
+ if (atomic_read(&root->will_be_snapshoted)) {
btrfs_end_nocow_write(root);
return 0;
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index af0359dcf337..bf3f424e0013 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -25,6 +25,11 @@ static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache;
static struct bio_set *btrfs_bioset;
+static inline bool extent_state_in_tree(const struct extent_state *state)
+{
+ return !RB_EMPTY_NODE(&state->rb_node);
+}
+
#ifdef CONFIG_BTRFS_DEBUG
static LIST_HEAD(buffers);
static LIST_HEAD(states);
@@ -59,9 +64,9 @@ void btrfs_leak_debug_check(void)
while (!list_empty(&states)) {
state = list_entry(states.next, struct extent_state, leak_list);
- printk(KERN_ERR "BTRFS: state leak: start %llu end %llu "
- "state %lu in tree %p refs %d\n",
- state->start, state->end, state->state, state->tree,
+ pr_err("BTRFS: state leak: start %llu end %llu state %lu in tree %d refs %d\n",
+ state->start, state->end, state->state,
+ extent_state_in_tree(state),
atomic_read(&state->refs));
list_del(&state->leak_list);
kmem_cache_free(extent_state_cache, state);
@@ -209,7 +214,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
return state;
state->state = 0;
state->private = 0;
- state->tree = NULL;
+ RB_CLEAR_NODE(&state->rb_node);
btrfs_leak_debug_add(&state->leak_list, &states);
atomic_set(&state->refs, 1);
init_waitqueue_head(&state->wq);
@@ -222,7 +227,7 @@ void free_extent_state(struct extent_state *state)
if (!state)
return;
if (atomic_dec_and_test(&state->refs)) {
- WARN_ON(state->tree);
+ WARN_ON(extent_state_in_tree(state));
btrfs_leak_debug_del(&state->leak_list);
trace_free_extent_state(state, _RET_IP_);
kmem_cache_free(extent_state_cache, state);
@@ -371,8 +376,8 @@ static void merge_state(struct extent_io_tree *tree,
other->state == state->state) {
merge_cb(tree, state, other);
state->start = other->start;
- other->tree = NULL;
rb_erase(&other->rb_node, &tree->state);
+ RB_CLEAR_NODE(&other->rb_node);
free_extent_state(other);
}
}
@@ -383,8 +388,8 @@ static void merge_state(struct extent_io_tree *tree,
other->state == state->state) {
merge_cb(tree, state, other);
state->end = other->end;
- other->tree = NULL;
rb_erase(&other->rb_node, &tree->state);
+ RB_CLEAR_NODE(&other->rb_node);
free_extent_state(other);
}
}
@@ -442,7 +447,6 @@ static int insert_state(struct extent_io_tree *tree,
found->start, found->end, start, end);
return -EEXIST;
}
- state->tree = tree;
merge_state(tree, state);
return 0;
}
@@ -486,7 +490,6 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
free_extent_state(prealloc);
return -EEXIST;
}
- prealloc->tree = tree;
return 0;
}
@@ -524,9 +527,9 @@ static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
wake_up(&state->wq);
if (state->state == 0) {
next = next_state(state);
- if (state->tree) {
+ if (extent_state_in_tree(state)) {
rb_erase(&state->rb_node, &tree->state);
- state->tree = NULL;
+ RB_CLEAR_NODE(&state->rb_node);
free_extent_state(state);
} else {
WARN_ON(1);
@@ -606,8 +609,8 @@ again:
cached_state = NULL;
}
- if (cached && cached->tree && cached->start <= start &&
- cached->end > start) {
+ if (cached && extent_state_in_tree(cached) &&
+ cached->start <= start && cached->end > start) {
if (clear)
atomic_dec(&cached->refs);
state = cached;
@@ -843,7 +846,7 @@ again:
if (cached_state && *cached_state) {
state = *cached_state;
if (state->start <= start && state->end > start &&
- state->tree) {
+ extent_state_in_tree(state)) {
node = &state->rb_node;
goto hit_next;
}
@@ -1069,7 +1072,7 @@ again:
if (cached_state && *cached_state) {
state = *cached_state;
if (state->start <= start && state->end > start &&
- state->tree) {
+ extent_state_in_tree(state)) {
node = &state->rb_node;
goto hit_next;
}
@@ -1459,7 +1462,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
spin_lock(&tree->lock);
if (cached_state && *cached_state) {
state = *cached_state;
- if (state->end == start - 1 && state->tree) {
+ if (state->end == start - 1 && extent_state_in_tree(state)) {
n = rb_next(&state->rb_node);
while (n) {
state = rb_entry(n, struct extent_state,
@@ -1905,7 +1908,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
int bitset = 0;
spin_lock(&tree->lock);
- if (cached && cached->tree && cached->start <= start &&
+ if (cached && extent_state_in_tree(cached) && cached->start <= start &&
cached->end > start)
node = &cached->rb_node;
else
@@ -1959,27 +1962,7 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
SetPageUptodate(page);
}
-/*
- * When IO fails, either with EIO or csum verification fails, we
- * try other mirrors that might have a good copy of the data. This
- * io_failure_record is used to record state as we go through all the
- * mirrors. If another mirror has good data, the page is set up to date
- * and things continue. If a good mirror can't be found, the original
- * bio end_io callback is called to indicate things have failed.
- */
-struct io_failure_record {
- struct page *page;
- u64 start;
- u64 len;
- u64 logical;
- unsigned long bio_flags;
- int this_mirror;
- int failed_mirror;
- int in_validation;
-};
-
-static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
- int did_repair)
+int free_io_failure(struct inode *inode, struct io_failure_record *rec)
{
int ret;
int err = 0;
@@ -2012,10 +1995,10 @@ static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
* currently, there can be no more than two copies of every data bit. thus,
* exactly one rewrite is required.
*/
-int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
- u64 length, u64 logical, struct page *page,
- int mirror_num)
+int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
+ struct page *page, unsigned int pg_offset, int mirror_num)
{
+ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct bio *bio;
struct btrfs_device *dev;
u64 map_length = 0;
@@ -2053,7 +2036,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
return -EIO;
}
bio->bi_bdev = dev->bdev;
- bio_add_page(bio, page, length, start - page_offset(page));
+ bio_add_page(bio, page, length, pg_offset);
if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
/* try to remap that extent elsewhere? */
@@ -2063,10 +2046,9 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
}
printk_ratelimited_in_rcu(KERN_INFO
- "BTRFS: read error corrected: ino %lu off %llu "
- "(dev %s sector %llu)\n", page->mapping->host->i_ino,
- start, rcu_str_deref(dev->name), sector);
-
+ "BTRFS: read error corrected: ino %llu off %llu (dev %s sector %llu)\n",
+ btrfs_ino(inode), start,
+ rcu_str_deref(dev->name), sector);
bio_put(bio);
return 0;
}
@@ -2082,9 +2064,11 @@ int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
return -EROFS;
for (i = 0; i < num_pages; i++) {
- struct page *p = extent_buffer_page(eb, i);
- ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE,
- start, p, mirror_num);
+ struct page *p = eb->pages[i];
+
+ ret = repair_io_failure(root->fs_info->btree_inode, start,
+ PAGE_CACHE_SIZE, start, p,
+ start - page_offset(p), mirror_num);
if (ret)
break;
start += PAGE_CACHE_SIZE;
@@ -2097,16 +2081,15 @@ int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
* each time an IO finishes, we do a fast check in the IO failure tree
* to see if we need to process or clean up an io_failure_record
*/
-static int clean_io_failure(u64 start, struct page *page)
+int clean_io_failure(struct inode *inode, u64 start, struct page *page,
+ unsigned int pg_offset)
{
u64 private;
u64 private_failure;
struct io_failure_record *failrec;
- struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct extent_state *state;
int num_copies;
- int did_repair = 0;
int ret;
private = 0;
@@ -2127,7 +2110,6 @@ static int clean_io_failure(u64 start, struct page *page)
/* there was no real error, just free the record */
pr_debug("clean_io_failure: freeing dummy error at %llu\n",
failrec->start);
- did_repair = 1;
goto out;
}
if (fs_info->sb->s_flags & MS_RDONLY)
@@ -2144,55 +2126,70 @@ static int clean_io_failure(u64 start, struct page *page)
num_copies = btrfs_num_copies(fs_info, failrec->logical,
failrec->len);
if (num_copies > 1) {
- ret = repair_io_failure(fs_info, start, failrec->len,
- failrec->logical, page,
- failrec->failed_mirror);
- did_repair = !ret;
+ repair_io_failure(inode, start, failrec->len,
+ failrec->logical, page,
+ pg_offset, failrec->failed_mirror);
}
- ret = 0;
}
out:
- if (!ret)
- ret = free_io_failure(inode, failrec, did_repair);
+ free_io_failure(inode, failrec);
- return ret;
+ return 0;
}
/*
- * this is a generic handler for readpage errors (default
- * readpage_io_failed_hook). if other copies exist, read those and write back
- * good data to the failed position. does not investigate in remapping the
- * failed extent elsewhere, hoping the device will be smart enough to do this as
- * needed
+ * Can be called when
+ * - hold extent lock
+ * - under ordered extent
+ * - the inode is freeing
*/
+void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
+{
+ struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
+ struct io_failure_record *failrec;
+ struct extent_state *state, *next;
-static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
- struct page *page, u64 start, u64 end,
- int failed_mirror)
+ if (RB_EMPTY_ROOT(&failure_tree->state))
+ return;
+
+ spin_lock(&failure_tree->lock);
+ state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
+ while (state) {
+ if (state->start > end)
+ break;
+
+ ASSERT(state->end <= end);
+
+ next = next_state(state);
+
+ failrec = (struct io_failure_record *)state->private;
+ free_extent_state(state);
+ kfree(failrec);
+
+ state = next;
+ }
+ spin_unlock(&failure_tree->lock);
+}
+
+int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
+ struct io_failure_record **failrec_ret)
{
- struct io_failure_record *failrec = NULL;
+ struct io_failure_record *failrec;
u64 private;
struct extent_map *em;
- struct inode *inode = page->mapping->host;
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
- struct bio *bio;
- struct btrfs_io_bio *btrfs_failed_bio;
- struct btrfs_io_bio *btrfs_bio;
- int num_copies;
int ret;
- int read_mode;
u64 logical;
- BUG_ON(failed_bio->bi_rw & REQ_WRITE);
-
ret = get_state_private(failure_tree, start, &private);
if (ret) {
failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
if (!failrec)
return -ENOMEM;
+
failrec->start = start;
failrec->len = end - start + 1;
failrec->this_mirror = 0;
@@ -2212,11 +2209,11 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
em = NULL;
}
read_unlock(&em_tree->lock);
-
if (!em) {
kfree(failrec);
return -EIO;
}
+
logical = start - em->start;
logical = em->block_start + logical;
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
@@ -2225,8 +2222,10 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
extent_set_compress_type(&failrec->bio_flags,
em->compress_type);
}
- pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
- "len=%llu\n", logical, start, failrec->len);
+
+ pr_debug("Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu\n",
+ logical, start, failrec->len);
+
failrec->logical = logical;
free_extent_map(em);
@@ -2246,8 +2245,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
}
} else {
failrec = (struct io_failure_record *)(unsigned long)private;
- pr_debug("bio_readpage_error: (found) logical=%llu, "
- "start=%llu, len=%llu, validation=%d\n",
+ pr_debug("Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d\n",
failrec->logical, failrec->start, failrec->len,
failrec->in_validation);
/*
@@ -2256,6 +2254,17 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
* clean_io_failure() clean all those errors at once.
*/
}
+
+ *failrec_ret = failrec;
+
+ return 0;
+}
+
+int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
+ struct io_failure_record *failrec, int failed_mirror)
+{
+ int num_copies;
+
num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
failrec->logical, failrec->len);
if (num_copies == 1) {
@@ -2264,10 +2273,9 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
* all the retry and error correction code that follows. no
* matter what the error is, it is very likely to persist.
*/
- pr_debug("bio_readpage_error: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
+ pr_debug("Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
num_copies, failrec->this_mirror, failed_mirror);
- free_io_failure(inode, failrec, 0);
- return -EIO;
+ return 0;
}
/*
@@ -2287,7 +2295,6 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
BUG_ON(failrec->in_validation);
failrec->in_validation = 1;
failrec->this_mirror = failed_mirror;
- read_mode = READ_SYNC | REQ_FAILFAST_DEV;
} else {
/*
* we're ready to fulfill a) and b) alongside. get a good copy
@@ -2303,25 +2310,36 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
failrec->this_mirror++;
if (failrec->this_mirror == failed_mirror)
failrec->this_mirror++;
- read_mode = READ_SYNC;
}
if (failrec->this_mirror > num_copies) {
- pr_debug("bio_readpage_error: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
+ pr_debug("Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
num_copies, failrec->this_mirror, failed_mirror);
- free_io_failure(inode, failrec, 0);
- return -EIO;
+ return 0;
}
+ return 1;
+}
+
+
+struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
+ struct io_failure_record *failrec,
+ struct page *page, int pg_offset, int icsum,
+ bio_end_io_t *endio_func, void *data)
+{
+ struct bio *bio;
+ struct btrfs_io_bio *btrfs_failed_bio;
+ struct btrfs_io_bio *btrfs_bio;
+
bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
- if (!bio) {
- free_io_failure(inode, failrec, 0);
- return -EIO;
- }
- bio->bi_end_io = failed_bio->bi_end_io;
+ if (!bio)
+ return NULL;
+
+ bio->bi_end_io = endio_func;
bio->bi_iter.bi_sector = failrec->logical >> 9;
bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
bio->bi_iter.bi_size = 0;
+ bio->bi_private = data;
btrfs_failed_bio = btrfs_io_bio(failed_bio);
if (btrfs_failed_bio->csum) {
@@ -2330,21 +2348,73 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
btrfs_bio = btrfs_io_bio(bio);
btrfs_bio->csum = btrfs_bio->csum_inline;
- phy_offset >>= inode->i_sb->s_blocksize_bits;
- phy_offset *= csum_size;
- memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + phy_offset,
+ icsum *= csum_size;
+ memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
csum_size);
}
- bio_add_page(bio, page, failrec->len, start - page_offset(page));
+ bio_add_page(bio, page, failrec->len, pg_offset);
+
+ return bio;
+}
+
+/*
+ * this is a generic handler for readpage errors (default
+ * readpage_io_failed_hook). if other copies exist, read those and write back
+ * good data to the failed position. does not investigate in remapping the
+ * failed extent elsewhere, hoping the device will be smart enough to do this as
+ * needed
+ */
+
+static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
+ struct page *page, u64 start, u64 end,
+ int failed_mirror)
+{
+ struct io_failure_record *failrec;
+ struct inode *inode = page->mapping->host;
+ struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
+ struct bio *bio;
+ int read_mode;
+ int ret;
+
+ BUG_ON(failed_bio->bi_rw & REQ_WRITE);
+
+ ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
+ if (ret)
+ return ret;
+
+ ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
+ if (!ret) {
+ free_io_failure(inode, failrec);
+ return -EIO;
+ }
+
+ if (failed_bio->bi_vcnt > 1)
+ read_mode = READ_SYNC | REQ_FAILFAST_DEV;
+ else
+ read_mode = READ_SYNC;
+
+ phy_offset >>= inode->i_sb->s_blocksize_bits;
+ bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
+ start - page_offset(page),
+ (int)phy_offset, failed_bio->bi_end_io,
+ NULL);
+ if (!bio) {
+ free_io_failure(inode, failrec);
+ return -EIO;
+ }
- pr_debug("bio_readpage_error: submitting new read[%#x] to "
- "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
- failrec->this_mirror, num_copies, failrec->in_validation);
+ pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n",
+ read_mode, failrec->this_mirror, failrec->in_validation);
ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
failrec->this_mirror,
failrec->bio_flags, 0);
+ if (ret) {
+ free_io_failure(inode, failrec);
+ bio_put(bio);
+ }
+
return ret;
}
@@ -2469,7 +2539,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
struct inode *inode = page->mapping->host;
pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
- "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err,
+ "mirror=%u\n", (u64)bio->bi_iter.bi_sector, err,
io_bio->mirror_num);
tree = &BTRFS_I(inode)->io_tree;
@@ -2503,7 +2573,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
if (ret)
uptodate = 0;
else
- clean_io_failure(start, page);
+ clean_io_failure(inode, start, page, 0);
}
if (likely(uptodate))
@@ -2540,12 +2610,12 @@ readpage_ok:
if (likely(uptodate)) {
loff_t i_size = i_size_read(inode);
pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
- unsigned offset;
+ unsigned off;
/* Zero out the end if this page straddles i_size */
- offset = i_size & (PAGE_CACHE_SIZE-1);
- if (page->index == end_index && offset)
- zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ off = i_size & (PAGE_CACHE_SIZE-1);
+ if (page->index == end_index && off)
+ zero_user_segment(page, off, PAGE_CACHE_SIZE);
SetPageUptodate(page);
} else {
ClearPageUptodate(page);
@@ -2618,9 +2688,18 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
{
- return bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
-}
+ struct btrfs_io_bio *btrfs_bio;
+ struct bio *new;
+ new = bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
+ if (new) {
+ btrfs_bio = btrfs_io_bio(new);
+ btrfs_bio->csum = NULL;
+ btrfs_bio->csum_allocated = NULL;
+ btrfs_bio->end_io = NULL;
+ }
+ return new;
+}
/* this also allocates from the btrfs_bioset */
struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
@@ -3501,7 +3580,7 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
num_pages = num_extent_pages(eb->start, eb->len);
for (i = 0; i < num_pages; i++) {
- struct page *p = extent_buffer_page(eb, i);
+ struct page *p = eb->pages[i];
if (!trylock_page(p)) {
if (!flush) {
@@ -3522,6 +3601,68 @@ static void end_extent_buffer_writeback(struct extent_buffer *eb)
wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
}
+static void set_btree_ioerr(struct page *page)
+{
+ struct extent_buffer *eb = (struct extent_buffer *)page->private;
+ struct btrfs_inode *btree_ino = BTRFS_I(eb->fs_info->btree_inode);
+
+ SetPageError(page);
+ if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
+ return;
+
+ /*
+ * If writeback for a btree extent that doesn't belong to a log tree
+ * failed, increment the counter transaction->eb_write_errors.
+ * We do this because while the transaction is running and before it's
+ * committing (when we call filemap_fdata[write|wait]_range against
+ * the btree inode), we might have
+ * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
+ * returns an error or an error happens during writeback, when we're
+ * committing the transaction we wouldn't know about it, since the pages
+ * can be no longer dirty nor marked anymore for writeback (if a
+ * subsequent modification to the extent buffer didn't happen before the
+ * transaction commit), which makes filemap_fdata[write|wait]_range not
+ * able to find the pages tagged with SetPageError at transaction
+ * commit time. So if this happens we must abort the transaction,
+ * otherwise we commit a super block with btree roots that point to
+ * btree nodes/leafs whose content on disk is invalid - either garbage
+ * or the content of some node/leaf from a past generation that got
+ * cowed or deleted and is no longer valid.
+ *
+ * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
+ * not be enough - we need to distinguish between log tree extents vs
+ * non-log tree extents, and the next filemap_fdatawait_range() call
+ * will catch and clear such errors in the mapping - and that call might
+ * be from a log sync and not from a transaction commit. Also, checking
+ * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
+ * not done and would not be reliable - the eb might have been released
+ * from memory and reading it back again means that flag would not be
+ * set (since it's a runtime flag, not persisted on disk).
+ *
+ * Using the flags below in the btree inode also makes us achieve the
+ * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
+ * writeback for all dirty pages and before filemap_fdatawait_range()
+ * is called, the writeback for all dirty pages had already finished
+ * with errors - because we were not using AS_EIO/AS_ENOSPC,
+ * filemap_fdatawait_range() would return success, as it could not know
+ * that writeback errors happened (the pages were no longer tagged for
+ * writeback).
+ */
+ switch (eb->log_index) {
+ case -1:
+ set_bit(BTRFS_INODE_BTREE_ERR, &btree_ino->runtime_flags);
+ break;
+ case 0:
+ set_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
+ break;
+ case 1:
+ set_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
+ break;
+ default:
+ BUG(); /* unexpected, logic error */
+ }
+}
+
static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
{
struct bio_vec *bvec;
@@ -3535,10 +3676,9 @@ static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
BUG_ON(!eb);
done = atomic_dec_and_test(&eb->io_pages);
- if (err || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
- set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ if (err || test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
ClearPageUptodate(page);
- SetPageError(page);
+ set_btree_ioerr(page);
}
end_page_writeback(page);
@@ -3565,14 +3705,14 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
int ret = 0;
- clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
num_pages = num_extent_pages(eb->start, eb->len);
atomic_set(&eb->io_pages, num_pages);
if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
bio_flags = EXTENT_BIO_TREE_LOG;
for (i = 0; i < num_pages; i++) {
- struct page *p = extent_buffer_page(eb, i);
+ struct page *p = eb->pages[i];
clear_page_dirty_for_io(p);
set_page_writeback(p);
@@ -3582,8 +3722,8 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
0, epd->bio_flags, bio_flags);
epd->bio_flags = bio_flags;
if (ret) {
- set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
- SetPageError(p);
+ set_btree_ioerr(p);
+ end_page_writeback(p);
if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
end_extent_buffer_writeback(eb);
ret = -EIO;
@@ -3596,7 +3736,8 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
if (unlikely(ret)) {
for (; i < num_pages; i++) {
- struct page *p = extent_buffer_page(eb, i);
+ struct page *p = eb->pages[i];
+ clear_page_dirty_for_io(p);
unlock_page(p);
}
}
@@ -4166,19 +4307,6 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
return NULL;
}
-static noinline int count_ext_ref(u64 inum, u64 offset, u64 root_id, void *ctx)
-{
- unsigned long cnt = *((unsigned long *)ctx);
-
- cnt++;
- *((unsigned long *)ctx) = cnt;
-
- /* Now we're sure that the extent is shared. */
- if (cnt > 1)
- return 1;
- return 0;
-}
-
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len, get_extent_t *get_extent)
{
@@ -4195,6 +4323,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
struct btrfs_path *path;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
int end = 0;
u64 em_start = 0;
u64 em_len = 0;
@@ -4215,8 +4344,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
* lookup the last file extent. We're not using i_size here
* because there might be preallocation past i_size
*/
- ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
- path, btrfs_ino(inode), -1, 0);
+ ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
+ 0);
if (ret < 0) {
btrfs_free_path(path);
return ret;
@@ -4224,7 +4353,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
WARN_ON(!ret);
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
- found_type = btrfs_key_type(&found_key);
+ found_type = found_key.type;
/* No extents, but there might be delalloc bits */
if (found_key.objectid != btrfs_ino(inode) ||
@@ -4309,25 +4438,27 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
} else if (em->block_start == EXTENT_MAP_DELALLOC) {
flags |= (FIEMAP_EXTENT_DELALLOC |
FIEMAP_EXTENT_UNKNOWN);
- } else {
- unsigned long ref_cnt = 0;
+ } else if (fieinfo->fi_extents_max) {
+ u64 bytenr = em->block_start -
+ (em->start - em->orig_start);
disko = em->block_start + offset_in_extent;
/*
* As btrfs supports shared space, this information
* can be exported to userspace tools via
- * flag FIEMAP_EXTENT_SHARED.
+ * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0
+ * then we're just getting a count and we can skip the
+ * lookup stuff.
*/
- ret = iterate_inodes_from_logical(
- em->block_start,
- BTRFS_I(inode)->root->fs_info,
- path, count_ext_ref, &ref_cnt);
- if (ret < 0 && ret != -ENOENT)
+ ret = btrfs_check_shared(NULL, root->fs_info,
+ root->objectid,
+ btrfs_ino(inode), bytenr);
+ if (ret < 0)
goto out_free;
-
- if (ref_cnt > 1)
+ if (ret)
flags |= FIEMAP_EXTENT_SHARED;
+ ret = 0;
}
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
flags |= FIEMAP_EXTENT_ENCODED;
@@ -4381,24 +4512,21 @@ int extent_buffer_under_io(struct extent_buffer *eb)
/*
* Helper for releasing extent buffer page.
*/
-static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
- unsigned long start_idx)
+static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
{
unsigned long index;
- unsigned long num_pages;
struct page *page;
int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
BUG_ON(extent_buffer_under_io(eb));
- num_pages = num_extent_pages(eb->start, eb->len);
- index = start_idx + num_pages;
- if (start_idx >= index)
+ index = num_extent_pages(eb->start, eb->len);
+ if (index == 0)
return;
do {
index--;
- page = extent_buffer_page(eb, index);
+ page = eb->pages[index];
if (page && mapped) {
spin_lock(&page->mapping->private_lock);
/*
@@ -4429,7 +4557,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
/* One for when we alloced the page */
page_cache_release(page);
}
- } while (index != start_idx);
+ } while (index != 0);
}
/*
@@ -4437,7 +4565,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
*/
static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
{
- btrfs_release_extent_buffer_page(eb, 0);
+ btrfs_release_extent_buffer_page(eb);
__free_extent_buffer(eb);
}
@@ -4580,7 +4708,8 @@ static void mark_extent_buffer_accessed(struct extent_buffer *eb,
num_pages = num_extent_pages(eb->start, eb->len);
for (i = 0; i < num_pages; i++) {
- struct page *p = extent_buffer_page(eb, i);
+ struct page *p = eb->pages[i];
+
if (p != accessed)
mark_page_accessed(p);
}
@@ -4749,7 +4878,7 @@ again:
*/
SetPageChecked(eb->pages[0]);
for (i = 1; i < num_pages; i++) {
- p = extent_buffer_page(eb, i);
+ p = eb->pages[i];
ClearPageChecked(p);
unlock_page(p);
}
@@ -4794,7 +4923,7 @@ static int release_extent_buffer(struct extent_buffer *eb)
}
/* Should be safe to release our pages at this point */
- btrfs_release_extent_buffer_page(eb, 0);
+ btrfs_release_extent_buffer_page(eb);
call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
return 1;
}
@@ -4860,7 +4989,7 @@ void clear_extent_buffer_dirty(struct extent_buffer *eb)
num_pages = num_extent_pages(eb->start, eb->len);
for (i = 0; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
+ page = eb->pages[i];
if (!PageDirty(page))
continue;
@@ -4896,7 +5025,7 @@ int set_extent_buffer_dirty(struct extent_buffer *eb)
WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
for (i = 0; i < num_pages; i++)
- set_page_dirty(extent_buffer_page(eb, i));
+ set_page_dirty(eb->pages[i]);
return was_dirty;
}
@@ -4909,7 +5038,7 @@ int clear_extent_buffer_uptodate(struct extent_buffer *eb)
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
num_pages = num_extent_pages(eb->start, eb->len);
for (i = 0; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
+ page = eb->pages[i];
if (page)
ClearPageUptodate(page);
}
@@ -4925,7 +5054,7 @@ int set_extent_buffer_uptodate(struct extent_buffer *eb)
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
num_pages = num_extent_pages(eb->start, eb->len);
for (i = 0; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
+ page = eb->pages[i];
SetPageUptodate(page);
}
return 0;
@@ -4965,7 +5094,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
num_pages = num_extent_pages(eb->start, eb->len);
for (i = start_i; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
+ page = eb->pages[i];
if (wait == WAIT_NONE) {
if (!trylock_page(page))
goto unlock_exit;
@@ -4984,11 +5113,11 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
goto unlock_exit;
}
- clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
+ clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
eb->read_mirror = 0;
atomic_set(&eb->io_pages, num_reads);
for (i = start_i; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
+ page = eb->pages[i];
if (!PageUptodate(page)) {
ClearPageError(page);
err = __extent_read_full_page(tree, page,
@@ -5013,7 +5142,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
return ret;
for (i = start_i; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
+ page = eb->pages[i];
wait_on_page_locked(page);
if (!PageUptodate(page))
ret = -EIO;
@@ -5024,7 +5153,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
unlock_exit:
i = start_i;
while (locked_pages > 0) {
- page = extent_buffer_page(eb, i);
+ page = eb->pages[i];
i++;
unlock_page(page);
locked_pages--;
@@ -5050,7 +5179,7 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
while (len > 0) {
- page = extent_buffer_page(eb, i);
+ page = eb->pages[i];
cur = min(len, (PAGE_CACHE_SIZE - offset));
kaddr = page_address(page);
@@ -5082,7 +5211,7 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
while (len > 0) {
- page = extent_buffer_page(eb, i);
+ page = eb->pages[i];
cur = min(len, (PAGE_CACHE_SIZE - offset));
kaddr = page_address(page);
@@ -5131,7 +5260,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
return -EINVAL;
}
- p = extent_buffer_page(eb, i);
+ p = eb->pages[i];
kaddr = page_address(p);
*map = kaddr + offset;
*map_len = PAGE_CACHE_SIZE - offset;
@@ -5157,7 +5286,7 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
while (len > 0) {
- page = extent_buffer_page(eb, i);
+ page = eb->pages[i];
cur = min(len, (PAGE_CACHE_SIZE - offset));
@@ -5191,7 +5320,7 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
while (len > 0) {
- page = extent_buffer_page(eb, i);
+ page = eb->pages[i];
WARN_ON(!PageUptodate(page));
cur = min(len, PAGE_CACHE_SIZE - offset);
@@ -5221,7 +5350,7 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
while (len > 0) {
- page = extent_buffer_page(eb, i);
+ page = eb->pages[i];
WARN_ON(!PageUptodate(page));
cur = min(len, PAGE_CACHE_SIZE - offset);
@@ -5252,7 +5381,7 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
(PAGE_CACHE_SIZE - 1);
while (len > 0) {
- page = extent_buffer_page(dst, i);
+ page = dst->pages[i];
WARN_ON(!PageUptodate(page));
cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
@@ -5330,8 +5459,7 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
cur = min_t(unsigned long, cur,
(unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
- copy_pages(extent_buffer_page(dst, dst_i),
- extent_buffer_page(dst, src_i),
+ copy_pages(dst->pages[dst_i], dst->pages[src_i],
dst_off_in_page, src_off_in_page, cur);
src_offset += cur;
@@ -5377,8 +5505,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
cur = min_t(unsigned long, len, src_off_in_page + 1);
cur = min(cur, dst_off_in_page + 1);
- copy_pages(extent_buffer_page(dst, dst_i),
- extent_buffer_page(dst, src_i),
+ copy_pages(dst->pages[dst_i], dst->pages[src_i],
dst_off_in_page - cur + 1,
src_off_in_page - cur + 1, cur);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index ccc264e7bde1..6d4b938be986 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -11,8 +11,6 @@
#define EXTENT_NEW (1 << 4)
#define EXTENT_DELALLOC (1 << 5)
#define EXTENT_DEFRAG (1 << 6)
-#define EXTENT_DEFRAG_DONE (1 << 7)
-#define EXTENT_BUFFER_FILLED (1 << 8)
#define EXTENT_BOUNDARY (1 << 9)
#define EXTENT_NODATASUM (1 << 10)
#define EXTENT_DO_ACCOUNTING (1 << 11)
@@ -34,16 +32,16 @@
/* these are bit numbers for test/set bit */
#define EXTENT_BUFFER_UPTODATE 0
-#define EXTENT_BUFFER_BLOCKING 1
#define EXTENT_BUFFER_DIRTY 2
#define EXTENT_BUFFER_CORRUPT 3
#define EXTENT_BUFFER_READAHEAD 4 /* this got triggered by readahead */
#define EXTENT_BUFFER_TREE_REF 5
#define EXTENT_BUFFER_STALE 6
#define EXTENT_BUFFER_WRITEBACK 7
-#define EXTENT_BUFFER_IOERR 8
+#define EXTENT_BUFFER_READ_ERR 8 /* read IO error */
#define EXTENT_BUFFER_DUMMY 9
#define EXTENT_BUFFER_IN_TREE 10
+#define EXTENT_BUFFER_WRITE_ERR 11 /* write IO error */
/* these are flags for extent_clear_unlock_delalloc */
#define PAGE_UNLOCK (1 << 0)
@@ -57,7 +55,6 @@
* map has page->private set to one.
*/
#define EXTENT_PAGE_PRIVATE 1
-#define EXTENT_PAGE_PRIVATE_FIRST_PAGE 3
struct extent_state;
struct btrfs_root;
@@ -108,7 +105,6 @@ struct extent_state {
struct rb_node rb_node;
/* ADD NEW ELEMENTS AFTER THIS */
- struct extent_io_tree *tree;
wait_queue_head_t wq;
atomic_t refs;
unsigned long state;
@@ -126,8 +122,6 @@ struct extent_state {
struct extent_buffer {
u64 start;
unsigned long len;
- unsigned long map_start;
- unsigned long map_len;
unsigned long bflags;
struct btrfs_fs_info *fs_info;
spinlock_t refs_lock;
@@ -144,7 +138,9 @@ struct extent_buffer {
atomic_t blocking_readers;
atomic_t spinning_readers;
atomic_t spinning_writers;
- int lock_nested;
+ short lock_nested;
+ /* >= 0 if eb belongs to a log tree, -1 otherwise */
+ short log_index;
/* protects write locks */
rwlock_t lock;
@@ -286,12 +282,6 @@ static inline unsigned long num_extent_pages(u64 start, u64 len)
(start >> PAGE_CACHE_SHIFT);
}
-static inline struct page *extent_buffer_page(struct extent_buffer *eb,
- unsigned long i)
-{
- return eb->pages[i];
-}
-
static inline void extent_buffer_get(struct extent_buffer *eb)
{
atomic_inc(&eb->refs);
@@ -341,18 +331,50 @@ struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask);
struct btrfs_fs_info;
-int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
- u64 length, u64 logical, struct page *page,
- int mirror_num);
+int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
+ struct page *page, unsigned int pg_offset,
+ int mirror_num);
+int clean_io_failure(struct inode *inode, u64 start, struct page *page,
+ unsigned int pg_offset);
int end_extent_writepage(struct page *page, int err, u64 start, u64 end);
int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
int mirror_num);
+
+/*
+ * When IO fails, either with EIO or csum verification fails, we
+ * try other mirrors that might have a good copy of the data. This
+ * io_failure_record is used to record state as we go through all the
+ * mirrors. If another mirror has good data, the page is set up to date
+ * and things continue. If a good mirror can't be found, the original
+ * bio end_io callback is called to indicate things have failed.
+ */
+struct io_failure_record {
+ struct page *page;
+ u64 start;
+ u64 len;
+ u64 logical;
+ unsigned long bio_flags;
+ int this_mirror;
+ int failed_mirror;
+ int in_validation;
+};
+
+void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end);
+int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
+ struct io_failure_record **failrec_ret);
+int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
+ struct io_failure_record *failrec, int fail_mirror);
+struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
+ struct io_failure_record *failrec,
+ struct page *page, int pg_offset, int icsum,
+ bio_end_io_t *endio_func, void *data);
+int free_io_failure(struct inode *inode, struct io_failure_record *rec);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
noinline u64 find_lock_delalloc_range(struct inode *inode,
struct extent_io_tree *tree,
struct page *locked_page, u64 *start,
u64 *end, u64 max_bytes);
+#endif
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, unsigned long len);
#endif
-#endif
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 54c84daec9b5..783a94355efd 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -55,7 +55,7 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
return -ENOMEM;
file_key.objectid = objectid;
file_key.offset = pos;
- btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY);
+ file_key.type = BTRFS_EXTENT_DATA_KEY;
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, root, path, &file_key,
@@ -100,7 +100,7 @@ btrfs_lookup_csum(struct btrfs_trans_handle *trans,
file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
file_key.offset = bytenr;
- btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY);
+ file_key.type = BTRFS_EXTENT_CSUM_KEY;
ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
if (ret < 0)
goto fail;
@@ -111,7 +111,7 @@ btrfs_lookup_csum(struct btrfs_trans_handle *trans,
goto fail;
path->slots[0]--;
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY)
+ if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
goto fail;
csum_offset = (bytenr - found_key.offset) >>
@@ -148,7 +148,7 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
file_key.objectid = objectid;
file_key.offset = offset;
- btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY);
+ file_key.type = BTRFS_EXTENT_DATA_KEY;
ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
return ret;
}
@@ -299,19 +299,9 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
}
int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
- struct btrfs_dio_private *dip, struct bio *bio,
- u64 offset)
+ struct bio *bio, u64 offset)
{
- int len = (bio->bi_iter.bi_sector << 9) - dip->disk_bytenr;
- u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
- int ret;
-
- len >>= inode->i_sb->s_blocksize_bits;
- len *= csum_size;
-
- ret = __btrfs_lookup_bio_sums(root, inode, bio, offset,
- (u32 *)(dip->csum + len), 1);
- return ret;
+ return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
}
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
@@ -329,8 +319,8 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
u64 csum_end;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
- ASSERT(start == ALIGN(start, root->sectorsize) &&
- (end + 1) == ALIGN(end + 1, root->sectorsize));
+ ASSERT(IS_ALIGNED(start, root->sectorsize) &&
+ IS_ALIGNED(end + 1, root->sectorsize));
path = btrfs_alloc_path();
if (!path)
@@ -720,7 +710,7 @@ again:
bytenr = sums->bytenr + total_bytes;
file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
file_key.offset = bytenr;
- btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY);
+ file_key.type = BTRFS_EXTENT_CSUM_KEY;
item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
if (!IS_ERR(item)) {
@@ -790,7 +780,7 @@ again:
csum_offset = (bytenr - found_key.offset) >>
root->fs_info->sb->s_blocksize_bits;
- if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY ||
+ if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) {
goto insert;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index ff1cc0399b9a..a18ceabd99a8 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -299,7 +299,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
/* get the inode */
key.objectid = defrag->root;
- btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
+ key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
index = srcu_read_lock(&fs_info->subvol_srcu);
@@ -311,7 +311,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
}
key.objectid = defrag->ino;
- btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+ key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
if (IS_ERR(inode)) {
@@ -452,7 +452,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
if (unlikely(copied == 0))
break;
- if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
+ if (copied < PAGE_CACHE_SIZE - offset) {
offset += copied;
} else {
pg++;
@@ -1481,9 +1481,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
bool force_page_uptodate = false;
bool need_unlock;
- nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
- PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
- (sizeof(struct page *)));
+ nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE),
+ PAGE_CACHE_SIZE / (sizeof(struct page *)));
nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
nrptrs = max(nrptrs, 8);
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
@@ -1497,8 +1496,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
size_t write_bytes = min(iov_iter_count(i),
nrptrs * (size_t)PAGE_CACHE_SIZE -
offset);
- size_t num_pages = (write_bytes + offset +
- PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
+ PAGE_CACHE_SIZE);
size_t reserve_bytes;
size_t dirty_pages;
size_t copied;
@@ -1526,9 +1525,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
* our prealloc extent may be smaller than
* write_bytes, so scale down.
*/
- num_pages = (write_bytes + offset +
- PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ num_pages = DIV_ROUND_UP(write_bytes + offset,
+ PAGE_CACHE_SIZE);
reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
ret = 0;
} else {
@@ -1590,9 +1588,8 @@ again:
dirty_pages = 0;
} else {
force_page_uptodate = false;
- dirty_pages = (copied + offset +
- PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ dirty_pages = DIV_ROUND_UP(copied + offset,
+ PAGE_CACHE_SIZE);
}
/*
@@ -1653,7 +1650,7 @@ again:
cond_resched();
balance_dirty_pages_ratelimited(inode->i_mapping);
- if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
+ if (dirty_pages < (root->nodesize >> PAGE_CACHE_SHIFT) + 1)
btrfs_btree_balance_dirty(root);
pos += copied;
@@ -1795,7 +1792,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
if (sync)
atomic_inc(&BTRFS_I(inode)->sync_writers);
- if (unlikely(file->f_flags & O_DIRECT)) {
+ if (file->f_flags & O_DIRECT) {
num_written = __btrfs_direct_write(iocb, from, pos);
} else {
num_written = __btrfs_buffered_write(file, from, pos);
@@ -1852,6 +1849,20 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
return 0;
}
+static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
+{
+ int ret;
+
+ atomic_inc(&BTRFS_I(inode)->sync_writers);
+ ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+ if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+ &BTRFS_I(inode)->runtime_flags))
+ ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+ atomic_dec(&BTRFS_I(inode)->sync_writers);
+
+ return ret;
+}
+
/*
* fsync call for both files and directories. This logs the inode into
* the tree log instead of forcing full commits whenever possible.
@@ -1881,30 +1892,64 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* multi-task, and make the performance up. See
* btrfs_wait_ordered_range for an explanation of the ASYNC check.
*/
- atomic_inc(&BTRFS_I(inode)->sync_writers);
- ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
- if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
- &BTRFS_I(inode)->runtime_flags))
- ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
- atomic_dec(&BTRFS_I(inode)->sync_writers);
+ ret = start_ordered_ops(inode, start, end);
if (ret)
return ret;
mutex_lock(&inode->i_mutex);
-
- /*
- * We flush the dirty pages again to avoid some dirty pages in the
- * range being left.
- */
atomic_inc(&root->log_batch);
full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
+ /*
+ * We might have have had more pages made dirty after calling
+ * start_ordered_ops and before acquiring the inode's i_mutex.
+ */
if (full_sync) {
+ /*
+ * For a full sync, we need to make sure any ordered operations
+ * start and finish before we start logging the inode, so that
+ * all extents are persisted and the respective file extent
+ * items are in the fs/subvol btree.
+ */
ret = btrfs_wait_ordered_range(inode, start, end - start + 1);
- if (ret) {
- mutex_unlock(&inode->i_mutex);
- goto out;
- }
+ } else {
+ /*
+ * Start any new ordered operations before starting to log the
+ * inode. We will wait for them to finish in btrfs_sync_log().
+ *
+ * Right before acquiring the inode's mutex, we might have new
+ * writes dirtying pages, which won't immediately start the
+ * respective ordered operations - that is done through the
+ * fill_delalloc callbacks invoked from the writepage and
+ * writepages address space operations. So make sure we start
+ * all ordered operations before starting to log our inode. Not
+ * doing this means that while logging the inode, writeback
+ * could start and invoke writepage/writepages, which would call
+ * the fill_delalloc callbacks (cow_file_range,
+ * submit_compressed_extents). These callbacks add first an
+ * extent map to the modified list of extents and then create
+ * the respective ordered operation, which means in
+ * tree-log.c:btrfs_log_inode() we might capture all existing
+ * ordered operations (with btrfs_get_logged_extents()) before
+ * the fill_delalloc callback adds its ordered operation, and by
+ * the time we visit the modified list of extent maps (with
+ * btrfs_log_changed_extents()), we see and process the extent
+ * map they created. We then use the extent map to construct a
+ * file extent item for logging without waiting for the
+ * respective ordered operation to finish - this file extent
+ * item points to a disk location that might not have yet been
+ * written to, containing random data - so after a crash a log
+ * replay will make our inode have file extent items that point
+ * to disk locations containing invalid data, as we returned
+ * success to userspace without waiting for the respective
+ * ordered operation to finish, because it wasn't captured by
+ * btrfs_get_logged_extents().
+ */
+ ret = start_ordered_ops(inode, start, end);
+ }
+ if (ret) {
+ mutex_unlock(&inode->i_mutex);
+ goto out;
}
atomic_inc(&root->log_batch);
@@ -1984,6 +2029,25 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/
mutex_unlock(&inode->i_mutex);
+ /*
+ * If any of the ordered extents had an error, just return it to user
+ * space, so that the application knows some writes didn't succeed and
+ * can take proper action (retry for e.g.). Blindly committing the
+ * transaction in this case, would fool userspace that everything was
+ * successful. And we also want to make sure our log doesn't contain
+ * file extent items pointing to extents that weren't fully written to -
+ * just like in the non fast fsync path, where we check for the ordered
+ * operation's error flag before writing to the log tree and return -EIO
+ * if any of them had this flag set (btrfs_wait_ordered_range) -
+ * therefore we need to check for errors in the ordered operations,
+ * which are indicated by ctx.io_err.
+ */
+ if (ctx.io_err) {
+ btrfs_end_transaction(trans, root);
+ ret = ctx.io_err;
+ goto out;
+ }
+
if (ret != BTRFS_NO_LOG_SYNC) {
if (!ret) {
ret = btrfs_sync_log(trans, root, &ctx);
@@ -2621,23 +2685,28 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
- u64 lockstart = *offset;
- u64 lockend = i_size_read(inode);
- u64 start = *offset;
- u64 len = i_size_read(inode);
+ u64 lockstart;
+ u64 lockend;
+ u64 start;
+ u64 len;
int ret = 0;
- lockend = max_t(u64, root->sectorsize, lockend);
+ if (inode->i_size == 0)
+ return -ENXIO;
+
+ /*
+ * *offset can be negative, in this case we start finding DATA/HOLE from
+ * the very start of the file.
+ */
+ start = max_t(loff_t, 0, *offset);
+
+ lockstart = round_down(start, root->sectorsize);
+ lockend = round_up(i_size_read(inode), root->sectorsize);
if (lockend <= lockstart)
lockend = lockstart + root->sectorsize;
-
lockend--;
len = lockend - lockstart + 1;
- len = max_t(u64, len, root->sectorsize);
- if (inode->i_size == 0)
- return -ENXIO;
-
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
&cached_state);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 2b0a627cb5f9..33848196550e 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -279,8 +279,7 @@ static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
int num_pages;
int check_crcs = 0;
- num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
check_crcs = 1;
@@ -1998,6 +1997,128 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
return merged;
}
+static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info,
+ bool update_stat)
+{
+ struct btrfs_free_space *bitmap;
+ unsigned long i;
+ unsigned long j;
+ const u64 end = info->offset + info->bytes;
+ const u64 bitmap_offset = offset_to_bitmap(ctl, end);
+ u64 bytes;
+
+ bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
+ if (!bitmap)
+ return false;
+
+ i = offset_to_bit(bitmap->offset, ctl->unit, end);
+ j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
+ if (j == i)
+ return false;
+ bytes = (j - i) * ctl->unit;
+ info->bytes += bytes;
+
+ if (update_stat)
+ bitmap_clear_bits(ctl, bitmap, end, bytes);
+ else
+ __bitmap_clear_bits(ctl, bitmap, end, bytes);
+
+ if (!bitmap->bytes)
+ free_bitmap(ctl, bitmap);
+
+ return true;
+}
+
+static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info,
+ bool update_stat)
+{
+ struct btrfs_free_space *bitmap;
+ u64 bitmap_offset;
+ unsigned long i;
+ unsigned long j;
+ unsigned long prev_j;
+ u64 bytes;
+
+ bitmap_offset = offset_to_bitmap(ctl, info->offset);
+ /* If we're on a boundary, try the previous logical bitmap. */
+ if (bitmap_offset == info->offset) {
+ if (info->offset == 0)
+ return false;
+ bitmap_offset = offset_to_bitmap(ctl, info->offset - 1);
+ }
+
+ bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
+ if (!bitmap)
+ return false;
+
+ i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
+ j = 0;
+ prev_j = (unsigned long)-1;
+ for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
+ if (j > i)
+ break;
+ prev_j = j;
+ }
+ if (prev_j == i)
+ return false;
+
+ if (prev_j == (unsigned long)-1)
+ bytes = (i + 1) * ctl->unit;
+ else
+ bytes = (i - prev_j) * ctl->unit;
+
+ info->offset -= bytes;
+ info->bytes += bytes;
+
+ if (update_stat)
+ bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
+ else
+ __bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
+
+ if (!bitmap->bytes)
+ free_bitmap(ctl, bitmap);
+
+ return true;
+}
+
+/*
+ * We prefer always to allocate from extent entries, both for clustered and
+ * non-clustered allocation requests. So when attempting to add a new extent
+ * entry, try to see if there's adjacent free space in bitmap entries, and if
+ * there is, migrate that space from the bitmaps to the extent.
+ * Like this we get better chances of satisfying space allocation requests
+ * because we attempt to satisfy them based on a single cache entry, and never
+ * on 2 or more entries - even if the entries represent a contiguous free space
+ * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
+ * ends).
+ */
+static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info,
+ bool update_stat)
+{
+ /*
+ * Only work with disconnected entries, as we can change their offset,
+ * and must be extent entries.
+ */
+ ASSERT(!info->bitmap);
+ ASSERT(RB_EMPTY_NODE(&info->offset_index));
+
+ if (ctl->total_bitmaps > 0) {
+ bool stole_end;
+ bool stole_front = false;
+
+ stole_end = steal_from_bitmap_to_end(ctl, info, update_stat);
+ if (ctl->total_bitmaps > 0)
+ stole_front = steal_from_bitmap_to_front(ctl, info,
+ update_stat);
+
+ if (stole_end || stole_front)
+ try_merge_free_space(ctl, info, update_stat);
+ }
+}
+
int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
u64 offset, u64 bytes)
{
@@ -2010,6 +2131,7 @@ int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
info->offset = offset;
info->bytes = bytes;
+ RB_CLEAR_NODE(&info->offset_index);
spin_lock(&ctl->tree_lock);
@@ -2029,6 +2151,14 @@ int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
goto out;
}
link:
+ /*
+ * Only steal free space from adjacent bitmaps if we're sure we're not
+ * going to add the new free space to existing bitmap entries - because
+ * that would mean unnecessary work that would be reverted. Therefore
+ * attempt to steal space from bitmaps if we're adding an extent entry.
+ */
+ steal_from_bitmap(ctl, info, true);
+
ret = link_free_space(ctl, info);
if (ret)
kmem_cache_free(btrfs_free_space_cachep, info);
@@ -2205,10 +2335,13 @@ __btrfs_return_cluster_to_free_space(
entry = rb_entry(node, struct btrfs_free_space, offset_index);
node = rb_next(&entry->offset_index);
rb_erase(&entry->offset_index, &cluster->root);
+ RB_CLEAR_NODE(&entry->offset_index);
bitmap = (entry->bitmap != NULL);
- if (!bitmap)
+ if (!bitmap) {
try_merge_free_space(ctl, entry, false);
+ steal_from_bitmap(ctl, entry, false);
+ }
tree_insert_offset(&ctl->free_space_offset,
entry->offset, &entry->offset_index, bitmap);
}
@@ -3033,10 +3166,10 @@ struct inode *lookup_free_ino_inode(struct btrfs_root *root,
{
struct inode *inode = NULL;
- spin_lock(&root->cache_lock);
- if (root->cache_inode)
- inode = igrab(root->cache_inode);
- spin_unlock(&root->cache_lock);
+ spin_lock(&root->ino_cache_lock);
+ if (root->ino_cache_inode)
+ inode = igrab(root->ino_cache_inode);
+ spin_unlock(&root->ino_cache_lock);
if (inode)
return inode;
@@ -3044,10 +3177,10 @@ struct inode *lookup_free_ino_inode(struct btrfs_root *root,
if (IS_ERR(inode))
return inode;
- spin_lock(&root->cache_lock);
+ spin_lock(&root->ino_cache_lock);
if (!btrfs_fs_closing(root->fs_info))
- root->cache_inode = igrab(inode);
- spin_unlock(&root->cache_lock);
+ root->ino_cache_inode = igrab(inode);
+ spin_unlock(&root->ino_cache_lock);
return inode;
}
@@ -3176,6 +3309,7 @@ again:
map = NULL;
add_new_bitmap(ctl, info, offset);
bitmap_info = info;
+ info = NULL;
}
bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
@@ -3186,6 +3320,8 @@ again:
if (bytes)
goto again;
+ if (info)
+ kmem_cache_free(btrfs_free_space_cachep, info);
if (map)
kfree(map);
return 0;
@@ -3260,6 +3396,7 @@ have_info:
goto have_info;
}
+ ret = 0;
goto out;
}
diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c
index 85889aa82c62..64f15bb30a81 100644
--- a/fs/btrfs/hash.c
+++ b/fs/btrfs/hash.c
@@ -20,10 +20,8 @@ static struct crypto_shash *tfm;
int __init btrfs_hash_init(void)
{
tfm = crypto_alloc_shash("crc32c", 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
- return 0;
+ return PTR_ERR_OR_ZERO(tfm);
}
void btrfs_hash_exit(void)
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 2be38df703c9..8ffa4783cbf4 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -135,7 +135,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
u32 item_size;
key.objectid = inode_objectid;
- btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
+ key.type = BTRFS_INODE_EXTREF_KEY;
key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
path = btrfs_alloc_path();
@@ -209,7 +209,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
key.objectid = inode_objectid;
key.offset = ref_objectid;
- btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
+ key.type = BTRFS_INODE_REF_KEY;
path = btrfs_alloc_path();
if (!path)
@@ -337,7 +337,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
key.objectid = inode_objectid;
key.offset = ref_objectid;
- btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
+ key.type = BTRFS_INODE_REF_KEY;
path = btrfs_alloc_path();
if (!path)
@@ -400,7 +400,7 @@ int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
struct btrfs_key key;
int ret;
key.objectid = objectid;
- btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+ key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
ret = btrfs_insert_empty_item(trans, root, path, &key,
@@ -420,13 +420,13 @@ int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root
struct btrfs_key found_key;
ret = btrfs_search_slot(trans, root, location, path, ins_len, cow);
- if (ret > 0 && btrfs_key_type(location) == BTRFS_ROOT_ITEM_KEY &&
+ if (ret > 0 && location->type == BTRFS_ROOT_ITEM_KEY &&
location->offset == (u64)-1 && path->slots[0] != 0) {
slot = path->slots[0] - 1;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.objectid == location->objectid &&
- btrfs_key_type(&found_key) == btrfs_key_type(location)) {
+ found_key.type == location->type) {
path->slots[0]--;
return 0;
}
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 888fbe19079f..83d646bd2e4b 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -87,7 +87,7 @@ again:
*/
btrfs_item_key_to_cpu(leaf, &key, 0);
btrfs_release_path(path);
- root->cache_progress = last;
+ root->ino_cache_progress = last;
up_read(&fs_info->commit_root_sem);
schedule_timeout(1);
goto again;
@@ -106,7 +106,7 @@ again:
if (last != (u64)-1 && last + 1 != key.objectid) {
__btrfs_add_free_space(ctl, last + 1,
key.objectid - last - 1);
- wake_up(&root->cache_wait);
+ wake_up(&root->ino_cache_wait);
}
last = key.objectid;
@@ -119,14 +119,14 @@ next:
root->highest_objectid - last - 1);
}
- spin_lock(&root->cache_lock);
- root->cached = BTRFS_CACHE_FINISHED;
- spin_unlock(&root->cache_lock);
+ spin_lock(&root->ino_cache_lock);
+ root->ino_cache_state = BTRFS_CACHE_FINISHED;
+ spin_unlock(&root->ino_cache_lock);
- root->cache_progress = (u64)-1;
+ root->ino_cache_progress = (u64)-1;
btrfs_unpin_free_ino(root);
out:
- wake_up(&root->cache_wait);
+ wake_up(&root->ino_cache_wait);
up_read(&fs_info->commit_root_sem);
btrfs_free_path(path);
@@ -144,20 +144,20 @@ static void start_caching(struct btrfs_root *root)
if (!btrfs_test_opt(root, INODE_MAP_CACHE))
return;
- spin_lock(&root->cache_lock);
- if (root->cached != BTRFS_CACHE_NO) {
- spin_unlock(&root->cache_lock);
+ spin_lock(&root->ino_cache_lock);
+ if (root->ino_cache_state != BTRFS_CACHE_NO) {
+ spin_unlock(&root->ino_cache_lock);
return;
}
- root->cached = BTRFS_CACHE_STARTED;
- spin_unlock(&root->cache_lock);
+ root->ino_cache_state = BTRFS_CACHE_STARTED;
+ spin_unlock(&root->ino_cache_lock);
ret = load_free_ino_cache(root->fs_info, root);
if (ret == 1) {
- spin_lock(&root->cache_lock);
- root->cached = BTRFS_CACHE_FINISHED;
- spin_unlock(&root->cache_lock);
+ spin_lock(&root->ino_cache_lock);
+ root->ino_cache_state = BTRFS_CACHE_FINISHED;
+ spin_unlock(&root->ino_cache_lock);
return;
}
@@ -196,11 +196,11 @@ again:
start_caching(root);
- wait_event(root->cache_wait,
- root->cached == BTRFS_CACHE_FINISHED ||
+ wait_event(root->ino_cache_wait,
+ root->ino_cache_state == BTRFS_CACHE_FINISHED ||
root->free_ino_ctl->free_space > 0);
- if (root->cached == BTRFS_CACHE_FINISHED &&
+ if (root->ino_cache_state == BTRFS_CACHE_FINISHED &&
root->free_ino_ctl->free_space == 0)
return -ENOSPC;
else
@@ -214,17 +214,17 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
if (!btrfs_test_opt(root, INODE_MAP_CACHE))
return;
again:
- if (root->cached == BTRFS_CACHE_FINISHED) {
+ if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
__btrfs_add_free_space(pinned, objectid, 1);
} else {
down_write(&root->fs_info->commit_root_sem);
- spin_lock(&root->cache_lock);
- if (root->cached == BTRFS_CACHE_FINISHED) {
- spin_unlock(&root->cache_lock);
+ spin_lock(&root->ino_cache_lock);
+ if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
+ spin_unlock(&root->ino_cache_lock);
up_write(&root->fs_info->commit_root_sem);
goto again;
}
- spin_unlock(&root->cache_lock);
+ spin_unlock(&root->ino_cache_lock);
start_caching(root);
@@ -235,10 +235,10 @@ again:
}
/*
- * When a transaction is committed, we'll move those inode numbers which
- * are smaller than root->cache_progress from pinned tree to free_ino tree,
- * and others will just be dropped, because the commit root we were
- * searching has changed.
+ * When a transaction is committed, we'll move those inode numbers which are
+ * smaller than root->ino_cache_progress from pinned tree to free_ino tree, and
+ * others will just be dropped, because the commit root we were searching has
+ * changed.
*
* Must be called with root->fs_info->commit_root_sem held
*/
@@ -261,10 +261,10 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
info = rb_entry(n, struct btrfs_free_space, offset_index);
BUG_ON(info->bitmap); /* Logic error */
- if (info->offset > root->cache_progress)
+ if (info->offset > root->ino_cache_progress)
goto free;
- else if (info->offset + info->bytes > root->cache_progress)
- count = root->cache_progress - info->offset + 1;
+ else if (info->offset + info->bytes > root->ino_cache_progress)
+ count = root->ino_cache_progress - info->offset + 1;
else
count = info->bytes;
@@ -462,13 +462,13 @@ again:
}
}
- spin_lock(&root->cache_lock);
- if (root->cached != BTRFS_CACHE_FINISHED) {
+ spin_lock(&root->ino_cache_lock);
+ if (root->ino_cache_state != BTRFS_CACHE_FINISHED) {
ret = -1;
- spin_unlock(&root->cache_lock);
+ spin_unlock(&root->ino_cache_lock);
goto out_put;
}
- spin_unlock(&root->cache_lock);
+ spin_unlock(&root->ino_cache_lock);
spin_lock(&ctl->tree_lock);
prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 016c403bfe7e..fc9c0439caa3 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -153,7 +153,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
key.objectid = btrfs_ino(inode);
key.offset = start;
- btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
+ key.type = BTRFS_EXTENT_DATA_KEY;
datasize = btrfs_file_extent_calc_inline_size(cur_size);
path->leave_spinning = 1;
@@ -249,8 +249,8 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
data_len = compressed_size;
if (start > 0 ||
- actual_end >= PAGE_CACHE_SIZE ||
- data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
+ actual_end > PAGE_CACHE_SIZE ||
+ data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
(!compressed_size &&
(actual_end & (root->sectorsize - 1)) == 0) ||
end + 1 < isize ||
@@ -348,6 +348,23 @@ static noinline int add_async_extent(struct async_cow *cow,
return 0;
}
+static inline int inode_need_compress(struct inode *inode)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+
+ /* force compress */
+ if (btrfs_test_opt(root, FORCE_COMPRESS))
+ return 1;
+ /* bad compression ratios */
+ if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
+ return 0;
+ if (btrfs_test_opt(root, COMPRESS) ||
+ BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
+ BTRFS_I(inode)->force_compress)
+ return 1;
+ return 0;
+}
+
/*
* we create compressed extents in two phases. The first
* phase compresses a range of pages that have already been
@@ -444,10 +461,7 @@ again:
* inode has not been flagged as nocompress. This flag can
* change at any time if we discover bad compression ratios.
*/
- if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
- (btrfs_test_opt(root, COMPRESS) ||
- (BTRFS_I(inode)->force_compress) ||
- (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
+ if (inode_need_compress(inode)) {
WARN_ON(pages);
pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
if (!pages) {
@@ -1094,7 +1108,8 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
async_cow->locked_page = locked_page;
async_cow->start = start;
- if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
+ if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
+ !btrfs_test_opt(root, FORCE_COMPRESS))
cur_end = end;
else
cur_end = min(end, start + 512 * 1024 - 1);
@@ -1445,6 +1460,26 @@ error:
return ret;
}
+static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
+{
+
+ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
+ !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
+ return 0;
+
+ /*
+ * @defrag_bytes is a hint value, no spinlock held here,
+ * if is not zero, it means the file is defragging.
+ * Force cow if given extent needs to be defragged.
+ */
+ if (BTRFS_I(inode)->defrag_bytes &&
+ test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
+ EXTENT_DEFRAG, 0, NULL))
+ return 1;
+
+ return 0;
+}
+
/*
* extent_io.c call back to do delayed allocation processing
*/
@@ -1453,17 +1488,15 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
unsigned long *nr_written)
{
int ret;
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ int force_cow = need_force_cow(inode, start, end);
- if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
+ if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 1, nr_written);
- } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
+ } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written);
- } else if (!btrfs_test_opt(root, COMPRESS) &&
- !(BTRFS_I(inode)->force_compress) &&
- !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
+ } else if (!inode_need_compress(inode)) {
ret = cow_file_range(inode, locked_page, start, end,
page_started, nr_written, 1);
} else {
@@ -1555,6 +1588,8 @@ static void btrfs_set_bit_hook(struct inode *inode,
struct extent_state *state, unsigned long *bits)
{
+ if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
+ WARN_ON(1);
/*
* set_bit and clear bit hooks normally require _irqsave/restore
* but in this case, we are only testing for the DELALLOC
@@ -1577,6 +1612,8 @@ static void btrfs_set_bit_hook(struct inode *inode,
root->fs_info->delalloc_batch);
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->delalloc_bytes += len;
+ if (*bits & EXTENT_DEFRAG)
+ BTRFS_I(inode)->defrag_bytes += len;
if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
&BTRFS_I(inode)->runtime_flags))
btrfs_add_delalloc_inodes(root, inode);
@@ -1591,6 +1628,13 @@ static void btrfs_clear_bit_hook(struct inode *inode,
struct extent_state *state,
unsigned long *bits)
{
+ u64 len = state->end + 1 - state->start;
+
+ spin_lock(&BTRFS_I(inode)->lock);
+ if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG))
+ BTRFS_I(inode)->defrag_bytes -= len;
+ spin_unlock(&BTRFS_I(inode)->lock);
+
/*
* set_bit and clear bit hooks normally require _irqsave/restore
* but in this case, we are only testing for the DELALLOC
@@ -1598,7 +1642,6 @@ static void btrfs_clear_bit_hook(struct inode *inode,
*/
if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 len = state->end + 1 - state->start;
bool do_list = !btrfs_is_free_space_inode(inode);
if (*bits & EXTENT_FIRST_DELALLOC) {
@@ -2660,6 +2703,10 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
goto out;
}
+ btrfs_free_io_failure_record(inode, ordered_extent->file_offset,
+ ordered_extent->file_offset +
+ ordered_extent->len - 1);
+
if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
truncated = true;
logical_len = ordered_extent->truncated_len;
@@ -2856,6 +2903,40 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
return 0;
}
+static int __readpage_endio_check(struct inode *inode,
+ struct btrfs_io_bio *io_bio,
+ int icsum, struct page *page,
+ int pgoff, u64 start, size_t len)
+{
+ char *kaddr;
+ u32 csum_expected;
+ u32 csum = ~(u32)0;
+ static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ csum_expected = *(((u32 *)io_bio->csum) + icsum);
+
+ kaddr = kmap_atomic(page);
+ csum = btrfs_csum_data(kaddr + pgoff, csum, len);
+ btrfs_csum_final(csum, (char *)&csum);
+ if (csum != csum_expected)
+ goto zeroit;
+
+ kunmap_atomic(kaddr);
+ return 0;
+zeroit:
+ if (__ratelimit(&_rs))
+ btrfs_info(BTRFS_I(inode)->root->fs_info,
+ "csum failed ino %llu off %llu csum %u expected csum %u",
+ btrfs_ino(inode), start, csum, csum_expected);
+ memset(kaddr + pgoff, 1, len);
+ flush_dcache_page(page);
+ kunmap_atomic(kaddr);
+ if (csum_expected == 0)
+ return 0;
+ return -EIO;
+}
+
/*
* when reads are done, we need to check csums to verify the data is correct
* if there's a match, we allow the bio to finish. If not, the code in
@@ -2868,20 +2949,15 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
size_t offset = start - page_offset(page);
struct inode *inode = page->mapping->host;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- char *kaddr;
struct btrfs_root *root = BTRFS_I(inode)->root;
- u32 csum_expected;
- u32 csum = ~(u32)0;
- static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
- DEFAULT_RATELIMIT_BURST);
if (PageChecked(page)) {
ClearPageChecked(page);
- goto good;
+ return 0;
}
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
- goto good;
+ return 0;
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
@@ -2891,28 +2967,8 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
}
phy_offset >>= inode->i_sb->s_blocksize_bits;
- csum_expected = *(((u32 *)io_bio->csum) + phy_offset);
-
- kaddr = kmap_atomic(page);
- csum = btrfs_csum_data(kaddr + offset, csum, end - start + 1);
- btrfs_csum_final(csum, (char *)&csum);
- if (csum != csum_expected)
- goto zeroit;
-
- kunmap_atomic(kaddr);
-good:
- return 0;
-
-zeroit:
- if (__ratelimit(&_rs))
- btrfs_info(root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u",
- btrfs_ino(page->mapping->host), start, csum, csum_expected);
- memset(kaddr + offset, 1, end - start + 1);
- flush_dcache_page(page);
- kunmap_atomic(kaddr);
- if (csum_expected == 0)
- return 0;
- return -EIO;
+ return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
+ start, (size_t)(end - start + 1));
}
struct delayed_iput {
@@ -3159,7 +3215,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
path->reada = -1;
key.objectid = BTRFS_ORPHAN_OBJECTID;
- btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
+ key.type = BTRFS_ORPHAN_ITEM_KEY;
key.offset = (u64)-1;
while (1) {
@@ -3186,7 +3242,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
/* make sure the item matches what we want */
if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
break;
- if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
+ if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
break;
/* release the path since we're done with it */
@@ -3662,7 +3718,8 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
* without delay
*/
if (!btrfs_is_free_space_inode(inode)
- && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
+ && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
+ && !root->fs_info->log_root_recovering) {
btrfs_update_root_times(trans, root);
ret = btrfs_delayed_update_inode(trans, root, inode);
@@ -4085,7 +4142,7 @@ search_again:
fi = NULL;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- found_type = btrfs_key_type(&found_key);
+ found_type = found_key.type;
if (found_key.objectid != ino)
break;
@@ -4747,6 +4804,8 @@ void btrfs_evict_inode(struct inode *inode)
/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ btrfs_free_io_failure_record(inode, 0, (u64)-1);
+
if (root->fs_info->log_root_recovering) {
BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags));
@@ -5331,7 +5390,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
btrfs_get_delayed_items(inode, &ins_list, &del_list);
}
- btrfs_set_key_type(&key, key_type);
+ key.type = key_type;
key.offset = ctx->pos;
key.objectid = btrfs_ino(inode);
@@ -5356,7 +5415,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
if (found_key.objectid != key.objectid)
break;
- if (btrfs_key_type(&found_key) != key_type)
+ if (found_key.type != key_type)
break;
if (found_key.offset < ctx->pos)
goto next;
@@ -5568,7 +5627,7 @@ static int btrfs_set_inode_index_count(struct inode *inode)
int ret;
key.objectid = btrfs_ino(inode);
- btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
+ key.type = BTRFS_DIR_INDEX_KEY;
key.offset = (u64)-1;
path = btrfs_alloc_path();
@@ -5600,7 +5659,7 @@ static int btrfs_set_inode_index_count(struct inode *inode)
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != btrfs_ino(inode) ||
- btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
+ found_key.type != BTRFS_DIR_INDEX_KEY) {
BTRFS_I(inode)->index_cnt = 2;
goto out;
}
@@ -5718,7 +5777,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
key[0].objectid = objectid;
- btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
+ key[0].type = BTRFS_INODE_ITEM_KEY;
key[0].offset = 0;
sizes[0] = sizeof(struct btrfs_inode_item);
@@ -5731,7 +5790,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
* add more hard links than can fit in the ref item.
*/
key[1].objectid = objectid;
- btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
+ key[1].type = BTRFS_INODE_REF_KEY;
key[1].offset = ref_objectid;
sizes[1] = name_len + sizeof(*ref);
@@ -5740,7 +5799,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
location = &BTRFS_I(inode)->location;
location->objectid = objectid;
location->offset = 0;
- btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
+ location->type = BTRFS_INODE_ITEM_KEY;
ret = btrfs_insert_inode_locked(inode);
if (ret < 0)
@@ -5832,7 +5891,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
} else {
key.objectid = ino;
- btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+ key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
}
@@ -6191,21 +6250,60 @@ out_fail_inode:
goto out_fail;
}
+/* Find next extent map of a given extent map, caller needs to ensure locks */
+static struct extent_map *next_extent_map(struct extent_map *em)
+{
+ struct rb_node *next;
+
+ next = rb_next(&em->rb_node);
+ if (!next)
+ return NULL;
+ return container_of(next, struct extent_map, rb_node);
+}
+
+static struct extent_map *prev_extent_map(struct extent_map *em)
+{
+ struct rb_node *prev;
+
+ prev = rb_prev(&em->rb_node);
+ if (!prev)
+ return NULL;
+ return container_of(prev, struct extent_map, rb_node);
+}
+
/* helper for btfs_get_extent. Given an existing extent in the tree,
+ * the existing extent is the nearest extent to map_start,
* and an extent that you want to insert, deal with overlap and insert
- * the new extent into the tree.
+ * the best fitted new extent into the tree.
*/
static int merge_extent_mapping(struct extent_map_tree *em_tree,
struct extent_map *existing,
struct extent_map *em,
u64 map_start)
{
+ struct extent_map *prev;
+ struct extent_map *next;
+ u64 start;
+ u64 end;
u64 start_diff;
BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
- start_diff = map_start - em->start;
- em->start = map_start;
- em->len = existing->start - em->start;
+
+ if (existing->start > map_start) {
+ next = existing;
+ prev = prev_extent_map(next);
+ } else {
+ prev = existing;
+ next = next_extent_map(prev);
+ }
+
+ start = prev ? extent_map_end(prev) : em->start;
+ start = max_t(u64, start, em->start);
+ end = next ? next->start : extent_map_end(em);
+ end = min_t(u64, end, extent_map_end(em));
+ start_diff = start - em->start;
+ em->start = start;
+ em->len = end - start;
if (em->block_start < EXTENT_MAP_LAST_BYTE &&
!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
em->block_start += start_diff;
@@ -6333,7 +6431,7 @@ again:
struct btrfs_file_extent_item);
/* are we inside the extent that was found? */
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
- found_type = btrfs_key_type(&found_key);
+ found_type = found_key.type;
if (found_key.objectid != objectid ||
found_type != BTRFS_EXTENT_DATA_KEY) {
/*
@@ -6482,25 +6580,21 @@ insert:
ret = 0;
- existing = lookup_extent_mapping(em_tree, start, len);
- if (existing && (existing->start > start ||
- existing->start + existing->len <= start)) {
+ existing = search_extent_mapping(em_tree, start, len);
+ /*
+ * existing will always be non-NULL, since there must be
+ * extent causing the -EEXIST.
+ */
+ if (start >= extent_map_end(existing) ||
+ start <= existing->start) {
+ /*
+ * The existing extent map is the one nearest to
+ * the [start, start + len) range which overlaps
+ */
+ err = merge_extent_mapping(em_tree, existing,
+ em, start);
free_extent_map(existing);
- existing = NULL;
- }
- if (!existing) {
- existing = lookup_extent_mapping(em_tree, em->start,
- em->len);
- if (existing) {
- err = merge_extent_mapping(em_tree, existing,
- em, start);
- free_extent_map(existing);
- if (err) {
- free_extent_map(em);
- em = NULL;
- }
- } else {
- err = -EIO;
+ if (err) {
free_extent_map(em);
em = NULL;
}
@@ -7112,8 +7206,10 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
block_start, len,
orig_block_len,
ram_bytes, type);
- if (IS_ERR(em))
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
goto unlock_err;
+ }
}
ret = btrfs_add_ordered_extent_dio(inode, start,
@@ -7188,45 +7284,277 @@ unlock_err:
return ret;
}
-static void btrfs_endio_direct_read(struct bio *bio, int err)
+static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
+ int rw, int mirror_num)
{
- struct btrfs_dio_private *dip = bio->bi_private;
- struct bio_vec *bvec;
- struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct bio *dio_bio;
- u32 *csums = (u32 *)dip->csum;
+ int ret;
+
+ BUG_ON(rw & REQ_WRITE);
+
+ bio_get(bio);
+
+ ret = btrfs_bio_wq_end_io(root->fs_info, bio,
+ BTRFS_WQ_ENDIO_DIO_REPAIR);
+ if (ret)
+ goto err;
+
+ ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
+err:
+ bio_put(bio);
+ return ret;
+}
+
+static int btrfs_check_dio_repairable(struct inode *inode,
+ struct bio *failed_bio,
+ struct io_failure_record *failrec,
+ int failed_mirror)
+{
+ int num_copies;
+
+ num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
+ failrec->logical, failrec->len);
+ if (num_copies == 1) {
+ /*
+ * we only have a single copy of the data, so don't bother with
+ * all the retry and error correction code that follows. no
+ * matter what the error is, it is very likely to persist.
+ */
+ pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
+ num_copies, failrec->this_mirror, failed_mirror);
+ return 0;
+ }
+
+ failrec->failed_mirror = failed_mirror;
+ failrec->this_mirror++;
+ if (failrec->this_mirror == failed_mirror)
+ failrec->this_mirror++;
+
+ if (failrec->this_mirror > num_copies) {
+ pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
+ num_copies, failrec->this_mirror, failed_mirror);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int dio_read_error(struct inode *inode, struct bio *failed_bio,
+ struct page *page, u64 start, u64 end,
+ int failed_mirror, bio_end_io_t *repair_endio,
+ void *repair_arg)
+{
+ struct io_failure_record *failrec;
+ struct bio *bio;
+ int isector;
+ int read_mode;
+ int ret;
+
+ BUG_ON(failed_bio->bi_rw & REQ_WRITE);
+
+ ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
+ if (ret)
+ return ret;
+
+ ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
+ failed_mirror);
+ if (!ret) {
+ free_io_failure(inode, failrec);
+ return -EIO;
+ }
+
+ if (failed_bio->bi_vcnt > 1)
+ read_mode = READ_SYNC | REQ_FAILFAST_DEV;
+ else
+ read_mode = READ_SYNC;
+
+ isector = start - btrfs_io_bio(failed_bio)->logical;
+ isector >>= inode->i_sb->s_blocksize_bits;
+ bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
+ 0, isector, repair_endio, repair_arg);
+ if (!bio) {
+ free_io_failure(inode, failrec);
+ return -EIO;
+ }
+
+ btrfs_debug(BTRFS_I(inode)->root->fs_info,
+ "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
+ read_mode, failrec->this_mirror, failrec->in_validation);
+
+ ret = submit_dio_repair_bio(inode, bio, read_mode,
+ failrec->this_mirror);
+ if (ret) {
+ free_io_failure(inode, failrec);
+ bio_put(bio);
+ }
+
+ return ret;
+}
+
+struct btrfs_retry_complete {
+ struct completion done;
+ struct inode *inode;
+ u64 start;
+ int uptodate;
+};
+
+static void btrfs_retry_endio_nocsum(struct bio *bio, int err)
+{
+ struct btrfs_retry_complete *done = bio->bi_private;
+ struct bio_vec *bvec;
+ int i;
+
+ if (err)
+ goto end;
+
+ done->uptodate = 1;
+ bio_for_each_segment_all(bvec, bio, i)
+ clean_io_failure(done->inode, done->start, bvec->bv_page, 0);
+end:
+ complete(&done->done);
+ bio_put(bio);
+}
+
+static int __btrfs_correct_data_nocsum(struct inode *inode,
+ struct btrfs_io_bio *io_bio)
+{
+ struct bio_vec *bvec;
+ struct btrfs_retry_complete done;
u64 start;
int i;
+ int ret;
+
+ start = io_bio->logical;
+ done.inode = inode;
+
+ bio_for_each_segment_all(bvec, &io_bio->bio, i) {
+try_again:
+ done.uptodate = 0;
+ done.start = start;
+ init_completion(&done.done);
+
+ ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
+ start + bvec->bv_len - 1,
+ io_bio->mirror_num,
+ btrfs_retry_endio_nocsum, &done);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&done.done);
+
+ if (!done.uptodate) {
+ /* We might have another mirror, so try again */
+ goto try_again;
+ }
+
+ start += bvec->bv_len;
+ }
+
+ return 0;
+}
+
+static void btrfs_retry_endio(struct bio *bio, int err)
+{
+ struct btrfs_retry_complete *done = bio->bi_private;
+ struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
+ struct bio_vec *bvec;
+ int uptodate;
+ int ret;
+ int i;
+
+ if (err)
+ goto end;
- start = dip->logical_offset;
+ uptodate = 1;
bio_for_each_segment_all(bvec, bio, i) {
- if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
- struct page *page = bvec->bv_page;
- char *kaddr;
- u32 csum = ~(u32)0;
- unsigned long flags;
-
- local_irq_save(flags);
- kaddr = kmap_atomic(page);
- csum = btrfs_csum_data(kaddr + bvec->bv_offset,
- csum, bvec->bv_len);
- btrfs_csum_final(csum, (char *)&csum);
- kunmap_atomic(kaddr);
- local_irq_restore(flags);
-
- flush_dcache_page(bvec->bv_page);
- if (csum != csums[i]) {
- btrfs_err(root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u",
- btrfs_ino(inode), start, csum,
- csums[i]);
- err = -EIO;
- }
+ ret = __readpage_endio_check(done->inode, io_bio, i,
+ bvec->bv_page, 0,
+ done->start, bvec->bv_len);
+ if (!ret)
+ clean_io_failure(done->inode, done->start,
+ bvec->bv_page, 0);
+ else
+ uptodate = 0;
+ }
+
+ done->uptodate = uptodate;
+end:
+ complete(&done->done);
+ bio_put(bio);
+}
+
+static int __btrfs_subio_endio_read(struct inode *inode,
+ struct btrfs_io_bio *io_bio, int err)
+{
+ struct bio_vec *bvec;
+ struct btrfs_retry_complete done;
+ u64 start;
+ u64 offset = 0;
+ int i;
+ int ret;
+
+ err = 0;
+ start = io_bio->logical;
+ done.inode = inode;
+
+ bio_for_each_segment_all(bvec, &io_bio->bio, i) {
+ ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
+ 0, start, bvec->bv_len);
+ if (likely(!ret))
+ goto next;
+try_again:
+ done.uptodate = 0;
+ done.start = start;
+ init_completion(&done.done);
+
+ ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
+ start + bvec->bv_len - 1,
+ io_bio->mirror_num,
+ btrfs_retry_endio, &done);
+ if (ret) {
+ err = ret;
+ goto next;
}
+ wait_for_completion(&done.done);
+
+ if (!done.uptodate) {
+ /* We might have another mirror, so try again */
+ goto try_again;
+ }
+next:
+ offset += bvec->bv_len;
start += bvec->bv_len;
}
+ return err;
+}
+
+static int btrfs_subio_endio_read(struct inode *inode,
+ struct btrfs_io_bio *io_bio, int err)
+{
+ bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
+
+ if (skip_csum) {
+ if (unlikely(err))
+ return __btrfs_correct_data_nocsum(inode, io_bio);
+ else
+ return 0;
+ } else {
+ return __btrfs_subio_endio_read(inode, io_bio, err);
+ }
+}
+
+static void btrfs_endio_direct_read(struct bio *bio, int err)
+{
+ struct btrfs_dio_private *dip = bio->bi_private;
+ struct inode *inode = dip->inode;
+ struct bio *dio_bio;
+ struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
+
+ if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
+ err = btrfs_subio_endio_read(inode, io_bio, err);
+
unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
dip->logical_offset + dip->bytes - 1);
dio_bio = dip->dio_bio;
@@ -7237,6 +7565,9 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
if (err)
clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
dio_end_io(dio_bio, err);
+
+ if (io_bio->end_io)
+ io_bio->end_io(io_bio, err);
bio_put(bio);
}
@@ -7302,12 +7633,17 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
{
struct btrfs_dio_private *dip = bio->bi_private;
+ if (err)
+ btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
+ "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
+ btrfs_ino(dip->inode), bio->bi_rw,
+ (unsigned long long)bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size, err);
+
+ if (dip->subio_endio)
+ err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
+
if (err) {
- btrfs_err(BTRFS_I(dip->inode)->root->fs_info,
- "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
- btrfs_ino(dip->inode), bio->bi_rw,
- (unsigned long long)bio->bi_iter.bi_sector,
- bio->bi_iter.bi_size, err);
dip->errors = 1;
/*
@@ -7338,6 +7674,38 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
}
+static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
+ struct inode *inode,
+ struct btrfs_dio_private *dip,
+ struct bio *bio,
+ u64 file_offset)
+{
+ struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
+ struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
+ int ret;
+
+ /*
+ * We load all the csum data we need when we submit
+ * the first bio to reduce the csum tree search and
+ * contention.
+ */
+ if (dip->logical_offset == file_offset) {
+ ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio,
+ file_offset);
+ if (ret)
+ return ret;
+ }
+
+ if (bio == dip->orig_bio)
+ return 0;
+
+ file_offset -= dip->logical_offset;
+ file_offset >>= inode->i_sb->s_blocksize_bits;
+ io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
+
+ return 0;
+}
+
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
int rw, u64 file_offset, int skip_sum,
int async_submit)
@@ -7353,7 +7721,8 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
bio_get(bio);
if (!write) {
- ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
+ ret = btrfs_bio_wq_end_io(root->fs_info, bio,
+ BTRFS_WQ_ENDIO_DATA);
if (ret)
goto err;
}
@@ -7376,13 +7745,12 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
if (ret)
goto err;
- } else if (!skip_sum) {
- ret = btrfs_lookup_bio_sums_dio(root, inode, dip, bio,
- file_offset);
+ } else {
+ ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio,
+ file_offset);
if (ret)
goto err;
}
-
map:
ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
err:
@@ -7403,7 +7771,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
u64 submit_len = 0;
u64 map_length;
int nr_pages = 0;
- int ret = 0;
+ int ret;
int async_submit = 0;
map_length = orig_bio->bi_iter.bi_size;
@@ -7414,6 +7782,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
if (map_length >= orig_bio->bi_iter.bi_size) {
bio = orig_bio;
+ dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
goto submit;
}
@@ -7430,12 +7799,13 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
+ btrfs_io_bio(bio)->logical = file_offset;
atomic_inc(&dip->pending_bios);
while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
- if (unlikely(map_length < submit_len + bvec->bv_len ||
+ if (map_length < submit_len + bvec->bv_len ||
bio_add_page(bio, bvec->bv_page, bvec->bv_len,
- bvec->bv_offset) < bvec->bv_len)) {
+ bvec->bv_offset) < bvec->bv_len) {
/*
* inc the count before we submit the bio so
* we know the end IO handler won't happen before
@@ -7464,6 +7834,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
goto out_err;
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
+ btrfs_io_bio(bio)->logical = file_offset;
map_length = orig_bio->bi_iter.bi_size;
ret = btrfs_map_block(root->fs_info, rw,
@@ -7507,11 +7878,10 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_dio_private *dip;
struct bio *io_bio;
+ struct btrfs_io_bio *btrfs_bio;
int skip_sum;
- int sum_len;
int write = rw & REQ_WRITE;
int ret = 0;
- u16 csum_size;
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
@@ -7521,16 +7891,7 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
goto free_ordered;
}
- if (!skip_sum && !write) {
- csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
- sum_len = dio_bio->bi_iter.bi_size >>
- inode->i_sb->s_blocksize_bits;
- sum_len *= csum_size;
- } else {
- sum_len = 0;
- }
-
- dip = kmalloc(sizeof(*dip) + sum_len, GFP_NOFS);
+ dip = kzalloc(sizeof(*dip), GFP_NOFS);
if (!dip) {
ret = -ENOMEM;
goto free_io_bio;
@@ -7542,20 +7903,25 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
dip->bytes = dio_bio->bi_iter.bi_size;
dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
io_bio->bi_private = dip;
- dip->errors = 0;
dip->orig_bio = io_bio;
dip->dio_bio = dio_bio;
atomic_set(&dip->pending_bios, 0);
+ btrfs_bio = btrfs_io_bio(io_bio);
+ btrfs_bio->logical = file_offset;
- if (write)
+ if (write) {
io_bio->bi_end_io = btrfs_endio_direct_write;
- else
+ } else {
io_bio->bi_end_io = btrfs_endio_direct_read;
+ dip->subio_endio = btrfs_subio_endio_read;
+ }
ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
if (!ret)
return;
+ if (btrfs_bio->end_io)
+ btrfs_bio->end_io(btrfs_bio, ret);
free_io_bio:
bio_put(io_bio);
@@ -7652,8 +8018,8 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
ret = btrfs_delalloc_reserve_space(inode, count);
if (ret)
goto out;
- } else if (unlikely(test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
- &BTRFS_I(inode)->runtime_flags))) {
+ } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
+ &BTRFS_I(inode)->runtime_flags)) {
inode_dio_done(inode);
flags = DIO_LOCKING | DIO_SKIP_HOLES;
wakeup = false;
@@ -8173,6 +8539,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->last_sub_trans = 0;
ei->logged_trans = 0;
ei->delalloc_bytes = 0;
+ ei->defrag_bytes = 0;
ei->disk_i_size = 0;
ei->flags = 0;
ei->csum_bytes = 0;
@@ -8231,6 +8598,7 @@ void btrfs_destroy_inode(struct inode *inode)
WARN_ON(BTRFS_I(inode)->reserved_extents);
WARN_ON(BTRFS_I(inode)->delalloc_bytes);
WARN_ON(BTRFS_I(inode)->csum_bytes);
+ WARN_ON(BTRFS_I(inode)->defrag_bytes);
/*
* This can happen where we create an inode, but somebody else also
@@ -8646,7 +9014,7 @@ static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
spin_unlock(&root->delalloc_lock);
work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
- if (unlikely(!work)) {
+ if (!work) {
if (delay_iput)
btrfs_add_delayed_iput(inode);
else
@@ -8832,7 +9200,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
}
key.objectid = btrfs_ino(inode);
key.offset = 0;
- btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
+ key.type = BTRFS_EXTENT_DATA_KEY;
datasize = btrfs_file_extent_calc_inline_size(name_len);
err = btrfs_insert_empty_item(trans, root, path, &key,
datasize);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 8a8e29878c34..0fe1aa047f15 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -332,6 +332,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
goto out_drop;
} else {
+ ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0);
+ if (ret && ret != -ENODATA)
+ goto out_drop;
ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
}
@@ -477,8 +480,7 @@ static noinline int create_subvol(struct inode *dir,
if (ret)
goto fail;
- leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
- 0, objectid, NULL, 0, 0, 0);
+ leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0);
if (IS_ERR(leaf)) {
ret = PTR_ERR(leaf);
goto fail;
@@ -503,7 +505,7 @@ static noinline int create_subvol(struct inode *dir,
btrfs_set_stack_inode_generation(inode_item, 1);
btrfs_set_stack_inode_size(inode_item, 3);
btrfs_set_stack_inode_nlink(inode_item, 1);
- btrfs_set_stack_inode_nbytes(inode_item, root->leafsize);
+ btrfs_set_stack_inode_nbytes(inode_item, root->nodesize);
btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
btrfs_set_root_flags(&root_item, 0);
@@ -535,7 +537,7 @@ static noinline int create_subvol(struct inode *dir,
key.objectid = objectid;
key.offset = 0;
- btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
+ key.type = BTRFS_ROOT_ITEM_KEY;
ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
&root_item);
if (ret)
@@ -882,7 +884,7 @@ out_unlock:
* file you want to defrag, we return 0 to let you know to skip this
* part of the file
*/
-static int check_defrag_in_cache(struct inode *inode, u64 offset, int thresh)
+static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh)
{
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_map *em = NULL;
@@ -917,7 +919,7 @@ static int check_defrag_in_cache(struct inode *inode, u64 offset, int thresh)
*/
static int find_new_extents(struct btrfs_root *root,
struct inode *inode, u64 newer_than,
- u64 *off, int thresh)
+ u64 *off, u32 thresh)
{
struct btrfs_path *path;
struct btrfs_key min_key;
@@ -936,12 +938,9 @@ static int find_new_extents(struct btrfs_root *root,
min_key.offset = *off;
while (1) {
- path->keep_locks = 1;
ret = btrfs_search_forward(root, &min_key, path, newer_than);
if (ret != 0)
goto none;
- path->keep_locks = 0;
- btrfs_unlock_up_safe(path, 1);
process_slot:
if (min_key.objectid != ino)
goto none;
@@ -1029,7 +1028,7 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
return ret;
}
-static int should_defrag_range(struct inode *inode, u64 start, int thresh,
+static int should_defrag_range(struct inode *inode, u64 start, u32 thresh,
u64 *last_len, u64 *skip, u64 *defrag_end,
int compress)
{
@@ -1259,7 +1258,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
int ret;
int defrag_count = 0;
int compress_type = BTRFS_COMPRESS_ZLIB;
- int extent_thresh = range->extent_thresh;
+ u32 extent_thresh = range->extent_thresh;
unsigned long max_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT;
unsigned long cluster = max_cluster;
u64 new_align = ~((u64)128 * 1024 - 1);
@@ -1335,8 +1334,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
inode->i_mapping->writeback_index = i;
while (i <= last_index && defrag_count < max_to_defrag &&
- (i < (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT)) {
+ (i < DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE))) {
/*
* make sure we stop running if someone unmounts
* the FS
@@ -1359,7 +1357,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
* the should_defrag function tells us how much to skip
* bump our counter by the suggested amount
*/
- next = (skip + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ next = DIV_ROUND_UP(skip, PAGE_CACHE_SIZE);
i = max(i + 1, next);
continue;
}
@@ -1554,7 +1552,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
goto out_free;
}
- old_size = device->total_bytes;
+ old_size = btrfs_device_get_total_bytes(device);
if (mod < 0) {
if (new_size > old_size) {
@@ -2089,8 +2087,6 @@ static noinline int search_ioctl(struct inode *inode,
key.type = sk->min_type;
key.offset = sk->min_offset;
- path->keep_locks = 1;
-
while (1) {
ret = btrfs_search_forward(root, &key, path, sk->min_transid);
if (ret != 0) {
@@ -2423,9 +2419,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
goto out_dput;
}
- err = d_invalidate(dentry);
- if (err)
- goto out_unlock;
+ d_invalidate(dentry);
down_write(&root->fs_info->subvol_sem);
@@ -2510,7 +2504,6 @@ out_release:
btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
out_up_write:
up_write(&root->fs_info->subvol_sem);
-out_unlock:
if (err) {
spin_lock(&dest->root_item_lock);
root_flags = btrfs_root_flags(&dest->root_item);
@@ -2526,9 +2519,9 @@ out_unlock:
ASSERT(dest->send_in_progress == 0);
/* the last ref */
- if (dest->cache_inode) {
- iput(dest->cache_inode);
- dest->cache_inode = NULL;
+ if (dest->ino_cache_inode) {
+ iput(dest->ino_cache_inode);
+ dest->ino_cache_inode = NULL;
}
}
out_dput:
@@ -2634,6 +2627,9 @@ static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
ret = btrfs_init_new_device(root, vol_args->name);
+ if (!ret)
+ btrfs_info(root->fs_info, "disk added %s",vol_args->name);
+
kfree(vol_args);
out:
mutex_unlock(&root->fs_info->volume_mutex);
@@ -2673,6 +2669,9 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
mutex_unlock(&root->fs_info->volume_mutex);
atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
+ if (!ret)
+ btrfs_info(root->fs_info, "disk deleted %s",vol_args->name);
+
out:
kfree(vol_args);
err_drop:
@@ -2737,8 +2736,8 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
}
di_args->devid = dev->devid;
- di_args->bytes_used = dev->bytes_used;
- di_args->total_bytes = dev->total_bytes;
+ di_args->bytes_used = btrfs_device_get_bytes_used(dev);
+ di_args->total_bytes = btrfs_device_get_total_bytes(dev);
memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
if (dev->name) {
struct rcu_string *name;
@@ -3164,7 +3163,7 @@ static void clone_update_extent_map(struct inode *inode,
em->start + em->len - 1, 0);
}
- if (unlikely(ret))
+ if (ret)
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
}
@@ -3199,7 +3198,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
u64 last_dest_end = destoff;
ret = -ENOMEM;
- buf = vmalloc(btrfs_level_size(root, 0));
+ buf = vmalloc(root->nodesize);
if (!buf)
return ret;
@@ -3252,11 +3251,11 @@ process_slot:
slot = path->slots[0];
btrfs_item_key_to_cpu(leaf, &key, slot);
- if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
+ if (key.type > BTRFS_EXTENT_DATA_KEY ||
key.objectid != btrfs_ino(src))
break;
- if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
+ if (key.type == BTRFS_EXTENT_DATA_KEY) {
struct btrfs_file_extent_item *extent;
int type;
u32 size;
@@ -5283,6 +5282,12 @@ long btrfs_ioctl(struct file *file, unsigned int
if (ret)
return ret;
ret = btrfs_sync_fs(file->f_dentry->d_sb, 1);
+ /*
+ * The transaction thread may want to do more work,
+ * namely it pokes the cleaner ktread that will start
+ * processing uncleaned subvols.
+ */
+ wake_up_process(root->fs_info->transaction_kthread);
return ret;
}
case BTRFS_IOC_START_SYNC:
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index dfad8514f0da..78285f30909e 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -266,8 +266,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
char *data_in;
unsigned long page_in_index = 0;
unsigned long page_out_index = 0;
- unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) /
- PAGE_CACHE_SIZE;
+ unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE);
unsigned long buf_start;
unsigned long buf_offset = 0;
unsigned long bytes;
diff --git a/fs/btrfs/orphan.c b/fs/btrfs/orphan.c
index 65793edb38ca..47767d5b8f0b 100644
--- a/fs/btrfs/orphan.c
+++ b/fs/btrfs/orphan.c
@@ -27,7 +27,7 @@ int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
int ret = 0;
key.objectid = BTRFS_ORPHAN_OBJECTID;
- btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
+ key.type = BTRFS_ORPHAN_ITEM_KEY;
key.offset = offset;
path = btrfs_alloc_path();
@@ -48,7 +48,7 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
int ret = 0;
key.objectid = BTRFS_ORPHAN_OBJECTID;
- btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
+ key.type = BTRFS_ORPHAN_ITEM_KEY;
key.offset = offset;
path = btrfs_alloc_path();
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 9626b4ad3b9a..647ab12fdf5d 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -195,7 +195,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
for (i = 0 ; i < nr ; i++) {
item = btrfs_item_nr(i);
btrfs_item_key_to_cpu(l, &key, i);
- type = btrfs_key_type(&key);
+ type = key.type;
printk(KERN_INFO "\titem %d key (%llu %u %llu) itemoff %d "
"itemsize %d\n",
i, key.objectid, type, key.offset,
@@ -336,7 +336,6 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
for (i = 0; i < nr; i++) {
struct extent_buffer *next = read_tree_block(root,
btrfs_node_blockptr(c, i),
- btrfs_level_size(root, level - 1),
btrfs_node_ptr_generation(c, i));
if (btrfs_is_leaf(next) &&
level != 1)
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index ded5c601d916..48b60dbf807f 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -539,10 +539,9 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
struct btrfs_key key;
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &quota_root->state)))
+ if (btrfs_test_is_dummy_root(quota_root))
return 0;
-#endif
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -551,9 +550,15 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
key.type = BTRFS_QGROUP_INFO_KEY;
key.offset = qgroupid;
+ /*
+ * Avoid a transaction abort by catching -EEXIST here. In that
+ * case, we proceed by re-initializing the existing structure
+ * on disk.
+ */
+
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
sizeof(*qgroup_info));
- if (ret)
+ if (ret && ret != -EEXIST)
goto out;
leaf = path->nodes[0];
@@ -572,7 +577,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
key.type = BTRFS_QGROUP_LIMIT_KEY;
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
sizeof(*qgroup_limit));
- if (ret)
+ if (ret && ret != -EEXIST)
goto out;
leaf = path->nodes[0];
@@ -692,10 +697,9 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
int ret;
int slot;
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state)))
+ if (btrfs_test_is_dummy_root(root))
return 0;
-#endif
+
key.objectid = 0;
key.type = BTRFS_QGROUP_INFO_KEY;
key.offset = qgroup->qgroupid;
@@ -1335,6 +1339,8 @@ int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
INIT_LIST_HEAD(&oper->elem.list);
oper->elem.seq = 0;
+ trace_btrfs_qgroup_record_ref(oper);
+
if (type == BTRFS_QGROUP_OPER_SUB_SUBTREE) {
/*
* If any operation for this bytenr/ref_root combo
@@ -2077,6 +2083,8 @@ static int btrfs_qgroup_account(struct btrfs_trans_handle *trans,
ASSERT(is_fstree(oper->ref_root));
+ trace_btrfs_qgroup_account(oper);
+
switch (oper->type) {
case BTRFS_QGROUP_OPER_ADD_EXCL:
case BTRFS_QGROUP_OPER_SUB_EXCL:
@@ -2237,7 +2245,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
if (srcid) {
struct btrfs_root *srcroot;
struct btrfs_key srckey;
- int srcroot_level;
srckey.objectid = srcid;
srckey.type = BTRFS_ROOT_ITEM_KEY;
@@ -2249,8 +2256,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
}
rcu_read_lock();
- srcroot_level = btrfs_header_level(srcroot->node);
- level_size = btrfs_level_size(srcroot, srcroot_level);
+ level_size = srcroot->nodesize;
rcu_read_unlock();
}
@@ -2566,7 +2572,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
found.type != BTRFS_METADATA_ITEM_KEY)
continue;
if (found.type == BTRFS_METADATA_ITEM_KEY)
- num_bytes = fs_info->extent_root->leafsize;
+ num_bytes = fs_info->extent_root->nodesize;
else
num_bytes = found.offset;
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 0a6b6e4bcbb9..6a41631cb959 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -912,7 +912,7 @@ static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
{
unsigned long nr = stripe_len * nr_stripes;
- return (nr + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ return DIV_ROUND_UP(nr, PAGE_CACHE_SIZE);
}
/*
@@ -1442,7 +1442,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
struct btrfs_bio *bbio = rbio->bbio;
struct bio_list bio_list;
int ret;
- int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
int pagenr;
int stripe;
struct bio *bio;
@@ -1725,7 +1725,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
int pagenr, stripe;
void **pointers;
int faila = -1, failb = -1;
- int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
struct page *page;
int err;
int i;
@@ -1940,7 +1940,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
struct btrfs_bio *bbio = rbio->bbio;
struct bio_list bio_list;
int ret;
- int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
int pagenr;
int stripe;
struct bio *bio;
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 20408c6b665a..b63ae20618fb 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -347,7 +347,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
if (!re)
return NULL;
- blocksize = btrfs_level_size(root, level);
+ blocksize = root->nodesize;
re->logical = logical;
re->blocksize = blocksize;
re->top = *top;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 65245a07275b..74257d6436ad 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -736,7 +736,8 @@ again:
err = ret;
goto out;
}
- BUG_ON(!ret || !path1->slots[0]);
+ ASSERT(ret);
+ ASSERT(path1->slots[0]);
path1->slots[0]--;
@@ -746,10 +747,10 @@ again:
* the backref was added previously when processing
* backref of type BTRFS_TREE_BLOCK_REF_KEY
*/
- BUG_ON(!list_is_singular(&cur->upper));
+ ASSERT(list_is_singular(&cur->upper));
edge = list_entry(cur->upper.next, struct backref_edge,
list[LOWER]);
- BUG_ON(!list_empty(&edge->list[UPPER]));
+ ASSERT(list_empty(&edge->list[UPPER]));
exist = edge->node[UPPER];
/*
* add the upper level block to pending list if we need
@@ -831,7 +832,7 @@ again:
cur->cowonly = 1;
}
#else
- BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
+ ASSERT(key.type != BTRFS_EXTENT_REF_V0_KEY);
if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
#endif
if (key.objectid == key.offset) {
@@ -840,7 +841,7 @@ again:
* backref of this type.
*/
root = find_reloc_root(rc, cur->bytenr);
- BUG_ON(!root);
+ ASSERT(root);
cur->root = root;
break;
}
@@ -868,7 +869,7 @@ again:
} else {
upper = rb_entry(rb_node, struct backref_node,
rb_node);
- BUG_ON(!upper->checked);
+ ASSERT(upper->checked);
INIT_LIST_HEAD(&edge->list[UPPER]);
}
list_add_tail(&edge->list[LOWER], &cur->upper);
@@ -892,7 +893,7 @@ again:
if (btrfs_root_level(&root->root_item) == cur->level) {
/* tree root */
- BUG_ON(btrfs_root_bytenr(&root->root_item) !=
+ ASSERT(btrfs_root_bytenr(&root->root_item) ==
cur->bytenr);
if (should_ignore_root(root))
list_add(&cur->list, &useless);
@@ -927,7 +928,7 @@ again:
need_check = true;
for (; level < BTRFS_MAX_LEVEL; level++) {
if (!path2->nodes[level]) {
- BUG_ON(btrfs_root_bytenr(&root->root_item) !=
+ ASSERT(btrfs_root_bytenr(&root->root_item) ==
lower->bytenr);
if (should_ignore_root(root))
list_add(&lower->list, &useless);
@@ -977,12 +978,15 @@ again:
need_check = false;
list_add_tail(&edge->list[UPPER],
&list);
- } else
+ } else {
+ if (upper->checked)
+ need_check = true;
INIT_LIST_HEAD(&edge->list[UPPER]);
+ }
} else {
upper = rb_entry(rb_node, struct backref_node,
rb_node);
- BUG_ON(!upper->checked);
+ ASSERT(upper->checked);
INIT_LIST_HEAD(&edge->list[UPPER]);
if (!upper->owner)
upper->owner = btrfs_header_owner(eb);
@@ -1026,7 +1030,7 @@ next:
* everything goes well, connect backref nodes and insert backref nodes
* into the cache.
*/
- BUG_ON(!node->checked);
+ ASSERT(node->checked);
cowonly = node->cowonly;
if (!cowonly) {
rb_node = tree_insert(&cache->rb_root, node->bytenr,
@@ -1062,8 +1066,21 @@ next:
continue;
}
- BUG_ON(!upper->checked);
- BUG_ON(cowonly != upper->cowonly);
+ if (!upper->checked) {
+ /*
+ * Still want to blow up for developers since this is a
+ * logic bug.
+ */
+ ASSERT(0);
+ err = -EINVAL;
+ goto out;
+ }
+ if (cowonly != upper->cowonly) {
+ ASSERT(0);
+ err = -EINVAL;
+ goto out;
+ }
+
if (!cowonly) {
rb_node = tree_insert(&cache->rb_root, upper->bytenr,
&upper->rb_node);
@@ -1086,7 +1103,7 @@ next:
while (!list_empty(&useless)) {
upper = list_entry(useless.next, struct backref_node, list);
list_del_init(&upper->list);
- BUG_ON(!list_empty(&upper->upper));
+ ASSERT(list_empty(&upper->upper));
if (upper == node)
node = NULL;
if (upper->lowest) {
@@ -1119,29 +1136,45 @@ out:
if (err) {
while (!list_empty(&useless)) {
lower = list_entry(useless.next,
- struct backref_node, upper);
- list_del_init(&lower->upper);
+ struct backref_node, list);
+ list_del_init(&lower->list);
}
- upper = node;
- INIT_LIST_HEAD(&list);
- while (upper) {
- if (RB_EMPTY_NODE(&upper->rb_node)) {
- list_splice_tail(&upper->upper, &list);
- free_backref_node(cache, upper);
- }
-
- if (list_empty(&list))
- break;
-
- edge = list_entry(list.next, struct backref_edge,
- list[LOWER]);
+ while (!list_empty(&list)) {
+ edge = list_first_entry(&list, struct backref_edge,
+ list[UPPER]);
+ list_del(&edge->list[UPPER]);
list_del(&edge->list[LOWER]);
+ lower = edge->node[LOWER];
upper = edge->node[UPPER];
free_backref_edge(cache, edge);
+
+ /*
+ * Lower is no longer linked to any upper backref nodes
+ * and isn't in the cache, we can free it ourselves.
+ */
+ if (list_empty(&lower->upper) &&
+ RB_EMPTY_NODE(&lower->rb_node))
+ list_add(&lower->list, &useless);
+
+ if (!RB_EMPTY_NODE(&upper->rb_node))
+ continue;
+
+ /* Add this guy's upper edges to the list to proces */
+ list_for_each_entry(edge, &upper->upper, list[LOWER])
+ list_add_tail(&edge->list[UPPER], &list);
+ if (list_empty(&upper->upper))
+ list_add(&upper->list, &useless);
+ }
+
+ while (!list_empty(&useless)) {
+ lower = list_entry(useless.next,
+ struct backref_node, list);
+ list_del_init(&lower->list);
+ free_backref_node(cache, lower);
}
return ERR_PTR(err);
}
- BUG_ON(node && node->detached);
+ ASSERT(!node || !node->detached);
return node;
}
@@ -1787,7 +1820,7 @@ again:
btrfs_node_key_to_cpu(parent, next_key, slot + 1);
old_bytenr = btrfs_node_blockptr(parent, slot);
- blocksize = btrfs_level_size(dest, level - 1);
+ blocksize = dest->nodesize;
old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
if (level <= max_level) {
@@ -1813,8 +1846,7 @@ again:
break;
}
- eb = read_tree_block(dest, old_bytenr, blocksize,
- old_ptr_gen);
+ eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
if (!eb || !extent_buffer_uptodate(eb)) {
ret = (!eb) ? -ENOMEM : -EIO;
free_extent_buffer(eb);
@@ -1944,7 +1976,6 @@ int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
u64 bytenr;
u64 ptr_gen = 0;
u64 last_snapshot;
- u32 blocksize;
u32 nritems;
last_snapshot = btrfs_root_last_snapshot(&root->root_item);
@@ -1970,8 +2001,7 @@ int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
}
bytenr = btrfs_node_blockptr(eb, path->slots[i]);
- blocksize = btrfs_level_size(root, i - 1);
- eb = read_tree_block(root, bytenr, blocksize, ptr_gen);
+ eb = read_tree_block(root, bytenr, ptr_gen);
if (!eb || !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
return -EIO;
@@ -2316,7 +2346,7 @@ void free_reloc_roots(struct list_head *list)
}
static noinline_for_stack
-int merge_reloc_roots(struct reloc_control *rc)
+void merge_reloc_roots(struct reloc_control *rc)
{
struct btrfs_root *root;
struct btrfs_root *reloc_root;
@@ -2397,7 +2427,6 @@ out:
}
BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
- return ret;
}
static void free_block_list(struct rb_root *blocks)
@@ -2544,8 +2573,7 @@ u64 calcu_metadata_size(struct reloc_control *rc,
if (next->processed && (reserve || next != node))
break;
- num_bytes += btrfs_level_size(rc->extent_root,
- next->level);
+ num_bytes += rc->extent_root->nodesize;
if (list_empty(&next->upper))
break;
@@ -2679,9 +2707,9 @@ static int do_relocation(struct btrfs_trans_handle *trans,
goto next;
}
- blocksize = btrfs_level_size(root, node->level);
+ blocksize = root->nodesize;
generation = btrfs_node_ptr_generation(upper->eb, slot);
- eb = read_tree_block(root, bytenr, blocksize, generation);
+ eb = read_tree_block(root, bytenr, generation);
if (!eb || !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
err = -EIO;
@@ -2789,7 +2817,7 @@ static void __mark_block_processed(struct reloc_control *rc,
u32 blocksize;
if (node->level == 0 ||
in_block_group(node->bytenr, rc->block_group)) {
- blocksize = btrfs_level_size(rc->extent_root, node->level);
+ blocksize = rc->extent_root->nodesize;
mark_block_processed(rc, node->bytenr, blocksize);
}
node->processed = 1;
@@ -2843,7 +2871,7 @@ static int get_tree_block_key(struct reloc_control *rc,
BUG_ON(block->key_ready);
eb = read_tree_block(rc->extent_root, block->bytenr,
- block->key.objectid, block->key.offset);
+ block->key.offset);
if (!eb || !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
return -EIO;
@@ -2858,20 +2886,6 @@ static int get_tree_block_key(struct reloc_control *rc,
return 0;
}
-static int reada_tree_block(struct reloc_control *rc,
- struct tree_block *block)
-{
- BUG_ON(block->key_ready);
- if (block->key.type == BTRFS_METADATA_ITEM_KEY)
- readahead_tree_block(rc->extent_root, block->bytenr,
- block->key.objectid,
- rc->extent_root->leafsize);
- else
- readahead_tree_block(rc->extent_root, block->bytenr,
- block->key.objectid, block->key.offset);
- return 0;
-}
-
/*
* helper function to relocate a tree block
*/
@@ -2951,7 +2965,8 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
while (rb_node) {
block = rb_entry(rb_node, struct tree_block, rb_node);
if (!block->key_ready)
- reada_tree_block(rc, block);
+ readahead_tree_block(rc->extent_root, block->bytenr,
+ block->key.objectid);
rb_node = rb_next(rb_node);
}
@@ -3313,7 +3328,7 @@ static int add_tree_block(struct reloc_control *rc,
return -ENOMEM;
block->bytenr = extent_key->objectid;
- block->key.objectid = rc->extent_root->leafsize;
+ block->key.objectid = rc->extent_root->nodesize;
block->key.offset = generation;
block->level = level;
block->key_ready = 0;
@@ -3640,7 +3655,7 @@ int add_data_references(struct reloc_control *rc,
struct btrfs_extent_inline_ref *iref;
unsigned long ptr;
unsigned long end;
- u32 blocksize = btrfs_level_size(rc->extent_root, 0);
+ u32 blocksize = rc->extent_root->nodesize;
int ret = 0;
int err = 0;
@@ -3783,7 +3798,7 @@ next:
}
if (key.type == BTRFS_METADATA_ITEM_KEY &&
- key.objectid + rc->extent_root->leafsize <=
+ key.objectid + rc->extent_root->nodesize <=
rc->search_start) {
path->slots[0]++;
goto next;
@@ -3801,7 +3816,7 @@ next:
rc->search_start = key.objectid + key.offset;
else
rc->search_start = key.objectid +
- rc->extent_root->leafsize;
+ rc->extent_root->nodesize;
memcpy(extent_key, &key, sizeof(key));
return 0;
}
@@ -4096,7 +4111,6 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
BTRFS_INODE_PREALLOC);
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(path);
out:
btrfs_free_path(path);
return ret;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index f4a41f37be22..efa083113827 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -137,7 +137,6 @@ struct scrub_ctx {
int pages_per_rd_bio;
u32 sectorsize;
u32 nodesize;
- u32 leafsize;
int is_dev_replace;
struct scrub_wr_ctx wr_ctx;
@@ -178,17 +177,12 @@ struct scrub_copy_nocow_ctx {
struct scrub_warning {
struct btrfs_path *path;
u64 extent_item_size;
- char *scratch_buf;
- char *msg_buf;
const char *errstr;
sector_t sector;
u64 logical;
struct btrfs_device *dev;
- int msg_bufsize;
- int scratch_bufsize;
};
-
static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
@@ -438,7 +432,6 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
}
sctx->first_free = 0;
sctx->nodesize = dev->dev_root->nodesize;
- sctx->leafsize = dev->dev_root->leafsize;
sctx->sectorsize = dev->dev_root->sectorsize;
atomic_set(&sctx->bios_in_flight, 0);
atomic_set(&sctx->workers_pending, 0);
@@ -553,7 +546,6 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
u64 ref_root;
u32 item_size;
u8 ref_level;
- const int bufsize = 4096;
int ret;
WARN_ON(sblock->page_count < 1);
@@ -561,18 +553,13 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
fs_info = sblock->sctx->dev_root->fs_info;
path = btrfs_alloc_path();
+ if (!path)
+ return;
- swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
- swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
swarn.sector = (sblock->pagev[0]->physical) >> 9;
swarn.logical = sblock->pagev[0]->logical;
swarn.errstr = errstr;
swarn.dev = NULL;
- swarn.msg_bufsize = bufsize;
- swarn.scratch_bufsize = bufsize;
-
- if (!path || !swarn.scratch_buf || !swarn.msg_buf)
- goto out;
ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
&flags);
@@ -613,8 +600,6 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
out:
btrfs_free_path(path);
- kfree(swarn.scratch_buf);
- kfree(swarn.msg_buf);
}
static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
@@ -681,9 +666,9 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
ret = -EIO;
goto out;
}
- fs_info = BTRFS_I(inode)->root->fs_info;
- ret = repair_io_failure(fs_info, offset, PAGE_SIZE,
+ ret = repair_io_failure(inode, offset, PAGE_SIZE,
fixup->logical, page,
+ offset - page_offset(page),
fixup->mirror_num);
unlock_page(page);
corrected = !ret;
@@ -1361,6 +1346,16 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
return;
}
+static inline int scrub_check_fsid(u8 fsid[],
+ struct scrub_page *spage)
+{
+ struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
+ int ret;
+
+ ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
+ return !ret;
+}
+
static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
struct scrub_block *sblock,
int is_metadata, int have_csum,
@@ -1380,7 +1375,7 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
h = (struct btrfs_header *)mapped_buffer;
if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
- memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
+ !scrub_check_fsid(h->fsid, sblock->pagev[0]) ||
memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
BTRFS_UUID_SIZE)) {
sblock->header_error = 1;
@@ -1751,14 +1746,13 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
++fail;
- if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
+ if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
++fail;
if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
BTRFS_UUID_SIZE))
++fail;
- WARN_ON(sctx->nodesize != sctx->leafsize);
len = sctx->nodesize - BTRFS_CSUM_SIZE;
mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
@@ -1791,8 +1785,6 @@ static int scrub_checksum_super(struct scrub_block *sblock)
{
struct btrfs_super_block *s;
struct scrub_ctx *sctx = sblock->sctx;
- struct btrfs_root *root = sctx->dev_root;
- struct btrfs_fs_info *fs_info = root->fs_info;
u8 calculated_csum[BTRFS_CSUM_SIZE];
u8 on_disk_csum[BTRFS_CSUM_SIZE];
struct page *page;
@@ -1817,7 +1809,7 @@ static int scrub_checksum_super(struct scrub_block *sblock)
if (sblock->pagev[0]->generation != btrfs_super_generation(s))
++fail_gen;
- if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
+ if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
++fail_cor;
len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
@@ -2196,7 +2188,6 @@ static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
sctx->stat.data_bytes_scrubbed += len;
spin_unlock(&sctx->stat_lock);
} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
- WARN_ON(sctx->nodesize != sctx->leafsize);
blocksize = sctx->nodesize;
spin_lock(&sctx->stat_lock);
sctx->stat.tree_extents_scrubbed++;
@@ -2487,7 +2478,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
btrfs_item_key_to_cpu(l, &key, slot);
if (key.type == BTRFS_METADATA_ITEM_KEY)
- bytes = root->leafsize;
+ bytes = root->nodesize;
else
bytes = key.offset;
@@ -2714,7 +2705,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
if (found_key.objectid != scrub_dev->devid)
break;
- if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
+ if (found_key.type != BTRFS_DEV_EXTENT_KEY)
break;
if (found_key.offset >= end)
@@ -2828,11 +2819,16 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
return -EIO;
- gen = root->fs_info->last_trans_committed;
+ /* Seed devices of a new filesystem has their own generation. */
+ if (scrub_dev->fs_devices != root->fs_info->fs_devices)
+ gen = scrub_dev->generation;
+ else
+ gen = root->fs_info->last_trans_committed;
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
- if (bytenr + BTRFS_SUPER_INFO_SIZE > scrub_dev->total_bytes)
+ if (bytenr + BTRFS_SUPER_INFO_SIZE >
+ scrub_dev->commit_total_bytes)
break;
ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
@@ -2910,17 +2906,6 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (btrfs_fs_closing(fs_info))
return -EINVAL;
- /*
- * check some assumptions
- */
- if (fs_info->chunk_root->nodesize != fs_info->chunk_root->leafsize) {
- btrfs_err(fs_info,
- "scrub: size assumption nodesize == leafsize (%d == %d) fails",
- fs_info->chunk_root->nodesize,
- fs_info->chunk_root->leafsize);
- return -EINVAL;
- }
-
if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
/*
* in this case scrub is unable to calculate the checksum
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 6528aa662181..874828dd0a86 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -515,7 +515,8 @@ static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
set_fs(KERNEL_DS);
while (pos < len) {
- ret = vfs_write(filp, (char *)buf + pos, len - pos, off);
+ ret = vfs_write(filp, (__force const char __user *)buf + pos,
+ len - pos, off);
/* TODO handle that correctly */
/*if (ret == -ERESTARTSYS) {
continue;
@@ -985,11 +986,13 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
int num;
u8 type;
- if (found_key->type == BTRFS_XATTR_ITEM_KEY)
- buf_len = BTRFS_MAX_XATTR_SIZE(root);
- else
- buf_len = PATH_MAX;
-
+ /*
+ * Start with a small buffer (1 page). If later we end up needing more
+ * space, which can happen for xattrs on a fs with a leaf size greater
+ * then the page size, attempt to increase the buffer. Typically xattr
+ * values are small.
+ */
+ buf_len = PATH_MAX;
buf = kmalloc(buf_len, GFP_NOFS);
if (!buf) {
ret = -ENOMEM;
@@ -1016,7 +1019,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
ret = -ENAMETOOLONG;
goto out;
}
- if (name_len + data_len > buf_len) {
+ if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root)) {
ret = -E2BIG;
goto out;
}
@@ -1024,12 +1027,34 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
/*
* Path too long
*/
- if (name_len + data_len > buf_len) {
+ if (name_len + data_len > PATH_MAX) {
ret = -ENAMETOOLONG;
goto out;
}
}
+ if (name_len + data_len > buf_len) {
+ buf_len = name_len + data_len;
+ if (is_vmalloc_addr(buf)) {
+ vfree(buf);
+ buf = NULL;
+ } else {
+ char *tmp = krealloc(buf, buf_len,
+ GFP_NOFS | __GFP_NOWARN);
+
+ if (!tmp)
+ kfree(buf);
+ buf = tmp;
+ }
+ if (!buf) {
+ buf = vmalloc(buf_len);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ }
+
read_extent_buffer(eb, buf, (unsigned long)(di + 1),
name_len + data_len);
@@ -1050,7 +1075,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
}
out:
- kfree(buf);
+ kvfree(buf);
return ret;
}
@@ -3302,7 +3327,7 @@ static int wait_for_parent_move(struct send_ctx *sctx,
if (ret < 0 && ret != -ENOENT) {
goto out;
} else if (ret == -ENOENT) {
- ret = 1;
+ ret = 0;
break;
}
@@ -5703,7 +5728,7 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
NULL);
sort_clone_roots = 1;
- current->journal_info = (void *)BTRFS_SEND_TRANS_STUB;
+ current->journal_info = BTRFS_SEND_TRANS_STUB;
ret = send_subvol(sctx);
current->journal_info = NULL;
if (ret < 0)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index c4124de4435b..a2b97ef10317 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -60,6 +60,7 @@
#include "backref.h"
#include "tests/btrfs-tests.h"
+#include "qgroup.h"
#define CREATE_TRACE_POINTS
#include <trace/events/btrfs.h>
@@ -307,13 +308,7 @@ void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
static void btrfs_put_super(struct super_block *sb)
{
- (void)close_ctree(btrfs_sb(sb)->tree_root);
- /* FIXME: need to fix VFS to return error? */
- /* AV: return it _where_? ->put_super() can be triggered by any number
- * of async events, up to and including delivery of SIGKILL to the
- * last process that kept it busy. Or segfault in the aforementioned
- * process... Whom would you report that to?
- */
+ close_ctree(btrfs_sb(sb)->tree_root);
}
enum {
@@ -400,7 +395,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
int ret = 0;
char *compress_type;
bool compress_force = false;
- bool compress = false;
cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
if (cache_gen)
@@ -478,7 +472,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
/* Fallthrough */
case Opt_compress:
case Opt_compress_type:
- compress = true;
if (token == Opt_compress ||
token == Opt_compress_force ||
strcmp(args[0].from, "zlib") == 0) {
@@ -508,11 +501,18 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
btrfs_set_and_info(root, FORCE_COMPRESS,
"force %s compression",
compress_type);
- } else if (compress) {
+ } else {
if (!btrfs_test_opt(root, COMPRESS))
btrfs_info(root->fs_info,
"btrfs: use %s compression",
compress_type);
+ /*
+ * If we remount from compress-force=xxx to
+ * compress=xxx, we need clear FORCE_COMPRESS
+ * flag, otherwise, there is no way for users
+ * to disable forcible compression separately.
+ */
+ btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
}
break;
case Opt_ssd:
@@ -1014,7 +1014,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_puts(seq, ",nodatacow");
if (btrfs_test_opt(root, NOBARRIER))
seq_puts(seq, ",nobarrier");
- if (info->max_inline != 8192 * 1024)
+ if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
seq_printf(seq, ",max_inline=%llu", info->max_inline);
if (info->alloc_start != 0)
seq_printf(seq, ",alloc_start=%llu", info->alloc_start);
@@ -1215,6 +1215,56 @@ static struct dentry *mount_subvol(const char *subvol_name, int flags,
return root;
}
+static int parse_security_options(char *orig_opts,
+ struct security_mnt_opts *sec_opts)
+{
+ char *secdata = NULL;
+ int ret = 0;
+
+ secdata = alloc_secdata();
+ if (!secdata)
+ return -ENOMEM;
+ ret = security_sb_copy_data(orig_opts, secdata);
+ if (ret) {
+ free_secdata(secdata);
+ return ret;
+ }
+ ret = security_sb_parse_opts_str(secdata, sec_opts);
+ free_secdata(secdata);
+ return ret;
+}
+
+static int setup_security_options(struct btrfs_fs_info *fs_info,
+ struct super_block *sb,
+ struct security_mnt_opts *sec_opts)
+{
+ int ret = 0;
+
+ /*
+ * Call security_sb_set_mnt_opts() to check whether new sec_opts
+ * is valid.
+ */
+ ret = security_sb_set_mnt_opts(sb, sec_opts, 0, NULL);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_SECURITY
+ if (!fs_info->security_opts.num_mnt_opts) {
+ /* first time security setup, copy sec_opts to fs_info */
+ memcpy(&fs_info->security_opts, sec_opts, sizeof(*sec_opts));
+ } else {
+ /*
+ * Since SELinux(the only one supports security_mnt_opts) does
+ * NOT support changing context during remount/mount same sb,
+ * This must be the same or part of the same security options,
+ * just free it.
+ */
+ security_free_mnt_opts(sec_opts);
+ }
+#endif
+ return ret;
+}
+
/*
* Find a superblock for the given device / mount point.
*
@@ -1229,6 +1279,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
struct dentry *root;
struct btrfs_fs_devices *fs_devices = NULL;
struct btrfs_fs_info *fs_info = NULL;
+ struct security_mnt_opts new_sec_opts;
fmode_t mode = FMODE_READ;
char *subvol_name = NULL;
u64 subvol_objectid = 0;
@@ -1251,9 +1302,16 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
return root;
}
+ security_init_mnt_opts(&new_sec_opts);
+ if (data) {
+ error = parse_security_options(data, &new_sec_opts);
+ if (error)
+ return ERR_PTR(error);
+ }
+
error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
if (error)
- return ERR_PTR(error);
+ goto error_sec_opts;
/*
* Setup a dummy root and fs_info for test/set super. This is because
@@ -1262,13 +1320,16 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
* then open_ctree will properly initialize everything later.
*/
fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS);
- if (!fs_info)
- return ERR_PTR(-ENOMEM);
+ if (!fs_info) {
+ error = -ENOMEM;
+ goto error_sec_opts;
+ }
fs_info->fs_devices = fs_devices;
fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
+ security_init_mnt_opts(&fs_info->security_opts);
if (!fs_info->super_copy || !fs_info->super_for_commit) {
error = -ENOMEM;
goto error_fs_info;
@@ -1306,8 +1367,19 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
}
root = !error ? get_default_root(s, subvol_objectid) : ERR_PTR(error);
- if (IS_ERR(root))
+ if (IS_ERR(root)) {
+ deactivate_locked_super(s);
+ error = PTR_ERR(root);
+ goto error_sec_opts;
+ }
+
+ fs_info = btrfs_sb(s);
+ error = setup_security_options(fs_info, s, &new_sec_opts);
+ if (error) {
+ dput(root);
deactivate_locked_super(s);
+ goto error_sec_opts;
+ }
return root;
@@ -1315,6 +1387,8 @@ error_close_devices:
btrfs_close_devices(fs_devices);
error_fs_info:
free_fs_info(fs_info);
+error_sec_opts:
+ security_free_mnt_opts(&new_sec_opts);
return ERR_PTR(error);
}
@@ -1396,6 +1470,21 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
sync_filesystem(sb);
btrfs_remount_prepare(fs_info);
+ if (data) {
+ struct security_mnt_opts new_sec_opts;
+
+ security_init_mnt_opts(&new_sec_opts);
+ ret = parse_security_options(data, &new_sec_opts);
+ if (ret)
+ goto restore;
+ ret = setup_security_options(fs_info, sb,
+ &new_sec_opts);
+ if (ret) {
+ security_free_mnt_opts(&new_sec_opts);
+ goto restore;
+ }
+ }
+
ret = btrfs_parse_options(root, data);
if (ret) {
ret = -EINVAL;
@@ -1694,7 +1783,11 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
int ret;
- /* holding chunk_muext to avoid allocating new chunks */
+ /*
+ * holding chunk_muext to avoid allocating new chunks, holding
+ * device_list_mutex to avoid the device being removed
+ */
+ mutex_lock(&fs_info->fs_devices->device_list_mutex);
mutex_lock(&fs_info->chunk_mutex);
rcu_read_lock();
list_for_each_entry_rcu(found, head, list) {
@@ -1735,11 +1828,13 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
ret = btrfs_calc_avail_data_space(fs_info->tree_root, &total_free_data);
if (ret) {
mutex_unlock(&fs_info->chunk_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
return ret;
}
buf->f_bavail += div_u64(total_free_data, factor);
buf->f_bavail = buf->f_bavail >> bits;
mutex_unlock(&fs_info->chunk_mutex);
+ mutex_unlock(&fs_info->fs_devices->device_list_mutex);
buf->f_type = BTRFS_SUPER_MAGIC;
buf->f_bsize = dentry->d_sb->s_blocksize;
@@ -1769,7 +1864,7 @@ static struct file_system_type btrfs_fs_type = {
.name = "btrfs",
.mount = btrfs_mount,
.kill_sb = btrfs_kill_super,
- .fs_flags = FS_REQUIRES_DEV,
+ .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
};
MODULE_ALIAS_FS("btrfs");
@@ -1993,11 +2088,15 @@ static int __init init_btrfs_fs(void)
err = btrfs_prelim_ref_init();
if (err)
+ goto free_delayed_ref;
+
+ err = btrfs_end_io_wq_init();
+ if (err)
goto free_prelim_ref;
err = btrfs_interface_init();
if (err)
- goto free_delayed_ref;
+ goto free_end_io_wq;
btrfs_init_lockdep();
@@ -2015,6 +2114,8 @@ static int __init init_btrfs_fs(void)
unregister_ioctl:
btrfs_interface_exit();
+free_end_io_wq:
+ btrfs_end_io_wq_exit();
free_prelim_ref:
btrfs_prelim_ref_exit();
free_delayed_ref:
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 12e53556e214..b2e7bb4393f6 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -242,7 +242,7 @@ static ssize_t global_rsv_size_show(struct kobject *kobj,
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
return btrfs_show_u64(&block_rsv->size, &block_rsv->lock, buf);
}
-BTRFS_ATTR(global_rsv_size, 0444, global_rsv_size_show);
+BTRFS_ATTR(global_rsv_size, global_rsv_size_show);
static ssize_t global_rsv_reserved_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
@@ -251,7 +251,7 @@ static ssize_t global_rsv_reserved_show(struct kobject *kobj,
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
return btrfs_show_u64(&block_rsv->reserved, &block_rsv->lock, buf);
}
-BTRFS_ATTR(global_rsv_reserved, 0444, global_rsv_reserved_show);
+BTRFS_ATTR(global_rsv_reserved, global_rsv_reserved_show);
#define to_space_info(_kobj) container_of(_kobj, struct btrfs_space_info, kobj)
#define to_raid_kobj(_kobj) container_of(_kobj, struct raid_kobject, kobj)
@@ -306,7 +306,7 @@ static ssize_t btrfs_space_info_show_##field(struct kobject *kobj, \
struct btrfs_space_info *sinfo = to_space_info(kobj); \
return btrfs_show_u64(&sinfo->field, &sinfo->lock, buf); \
} \
-BTRFS_ATTR(field, 0444, btrfs_space_info_show_##field)
+BTRFS_ATTR(field, btrfs_space_info_show_##field)
static ssize_t btrfs_space_info_show_total_bytes_pinned(struct kobject *kobj,
struct kobj_attribute *a,
@@ -325,7 +325,7 @@ SPACE_INFO_ATTR(bytes_reserved);
SPACE_INFO_ATTR(bytes_may_use);
SPACE_INFO_ATTR(disk_used);
SPACE_INFO_ATTR(disk_total);
-BTRFS_ATTR(total_bytes_pinned, 0444, btrfs_space_info_show_total_bytes_pinned);
+BTRFS_ATTR(total_bytes_pinned, btrfs_space_info_show_total_bytes_pinned);
static struct attribute *space_info_attrs[] = {
BTRFS_ATTR_PTR(flags),
@@ -363,7 +363,8 @@ static ssize_t btrfs_label_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return snprintf(buf, PAGE_SIZE, "%s\n", fs_info->super_copy->label);
+ char *label = fs_info->super_copy->label;
+ return snprintf(buf, PAGE_SIZE, label[0] ? "%s\n" : "%s", label);
}
static ssize_t btrfs_label_store(struct kobject *kobj,
@@ -374,8 +375,18 @@ static ssize_t btrfs_label_store(struct kobject *kobj,
struct btrfs_trans_handle *trans;
struct btrfs_root *root = fs_info->fs_root;
int ret;
+ size_t p_len;
- if (len >= BTRFS_LABEL_SIZE)
+ if (fs_info->sb->s_flags & MS_RDONLY)
+ return -EROFS;
+
+ /*
+ * p_len is the len until the first occurrence of either
+ * '\n' or '\0'
+ */
+ p_len = strcspn(buf, "\n");
+
+ if (p_len >= BTRFS_LABEL_SIZE)
return -EINVAL;
trans = btrfs_start_transaction(root, 0);
@@ -383,7 +394,8 @@ static ssize_t btrfs_label_store(struct kobject *kobj,
return PTR_ERR(trans);
spin_lock(&root->fs_info->super_lock);
- strcpy(fs_info->super_copy->label, buf);
+ memset(fs_info->super_copy->label, 0, BTRFS_LABEL_SIZE);
+ memcpy(fs_info->super_copy->label, buf, p_len);
spin_unlock(&root->fs_info->super_lock);
ret = btrfs_commit_transaction(trans, root);
@@ -392,14 +404,7 @@ static ssize_t btrfs_label_store(struct kobject *kobj,
return ret;
}
-BTRFS_ATTR_RW(label, 0644, btrfs_label_show, btrfs_label_store);
-
-static ssize_t btrfs_no_store(struct kobject *kobj,
- struct kobj_attribute *a,
- const char *buf, size_t len)
-{
- return -EPERM;
-}
+BTRFS_ATTR_RW(label, btrfs_label_show, btrfs_label_store);
static ssize_t btrfs_nodesize_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
@@ -409,7 +414,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj,
return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->nodesize);
}
-BTRFS_ATTR_RW(nodesize, 0444, btrfs_nodesize_show, btrfs_no_store);
+BTRFS_ATTR(nodesize, btrfs_nodesize_show);
static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
@@ -419,7 +424,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->sectorsize);
}
-BTRFS_ATTR_RW(sectorsize, 0444, btrfs_sectorsize_show, btrfs_no_store);
+BTRFS_ATTR(sectorsize, btrfs_sectorsize_show);
static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
@@ -429,7 +434,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->sectorsize);
}
-BTRFS_ATTR_RW(clone_alignment, 0444, btrfs_clone_alignment_show, btrfs_no_store);
+BTRFS_ATTR(clone_alignment, btrfs_clone_alignment_show);
static struct attribute *btrfs_attrs[] = {
BTRFS_ATTR_PTR(label),
diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h
index ac46df37504c..f7dd298b3cf6 100644
--- a/fs/btrfs/sysfs.h
+++ b/fs/btrfs/sysfs.h
@@ -20,16 +20,20 @@ enum btrfs_feature_set {
.store = _store, \
}
-#define BTRFS_ATTR_RW(_name, _mode, _show, _store) \
-static struct kobj_attribute btrfs_attr_##_name = \
- __INIT_KOBJ_ATTR(_name, _mode, _show, _store)
-#define BTRFS_ATTR(_name, _mode, _show) \
- BTRFS_ATTR_RW(_name, _mode, _show, NULL)
+#define BTRFS_ATTR_RW(_name, _show, _store) \
+ static struct kobj_attribute btrfs_attr_##_name = \
+ __INIT_KOBJ_ATTR(_name, 0644, _show, _store)
+
+#define BTRFS_ATTR(_name, _show) \
+ static struct kobj_attribute btrfs_attr_##_name = \
+ __INIT_KOBJ_ATTR(_name, 0444, _show, NULL)
+
#define BTRFS_ATTR_PTR(_name) (&btrfs_attr_##_name.attr)
#define BTRFS_RAID_ATTR(_name, _show) \
-static struct kobj_attribute btrfs_raid_attr_##_name = \
+ static struct kobj_attribute btrfs_raid_attr_##_name = \
__INIT_KOBJ_ATTR(_name, 0444, _show, NULL)
+
#define BTRFS_RAID_ATTR_PTR(_name) (&btrfs_raid_attr_##_name.attr)
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index c8d9ddf84c69..2299bfde39ee 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -40,11 +40,12 @@ static struct btrfs_block_group_cache *init_test_block_group(void)
cache->key.offset = 1024 * 1024 * 1024;
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
cache->sectorsize = 4096;
+ cache->full_stripe_len = 4096;
spin_lock_init(&cache->lock);
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
- INIT_LIST_HEAD(&cache->new_bg_list);
+ INIT_LIST_HEAD(&cache->bg_list);
btrfs_init_free_space_ctl(cache);
@@ -364,6 +365,517 @@ static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
return 0;
}
+/* Used by test_steal_space_from_bitmap_to_extent(). */
+static bool test_use_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info)
+{
+ return ctl->free_extents > 0;
+}
+
+/* Used by test_steal_space_from_bitmap_to_extent(). */
+static int
+check_num_extents_and_bitmaps(const struct btrfs_block_group_cache *cache,
+ const int num_extents,
+ const int num_bitmaps)
+{
+ if (cache->free_space_ctl->free_extents != num_extents) {
+ test_msg("Incorrect # of extent entries in the cache: %d, expected %d\n",
+ cache->free_space_ctl->free_extents, num_extents);
+ return -EINVAL;
+ }
+ if (cache->free_space_ctl->total_bitmaps != num_bitmaps) {
+ test_msg("Incorrect # of extent entries in the cache: %d, expected %d\n",
+ cache->free_space_ctl->total_bitmaps, num_bitmaps);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Used by test_steal_space_from_bitmap_to_extent(). */
+static int check_cache_empty(struct btrfs_block_group_cache *cache)
+{
+ u64 offset;
+ u64 max_extent_size;
+
+ /*
+ * Now lets confirm that there's absolutely no free space left to
+ * allocate.
+ */
+ if (cache->free_space_ctl->free_space != 0) {
+ test_msg("Cache free space is not 0\n");
+ return -EINVAL;
+ }
+
+ /* And any allocation request, no matter how small, should fail now. */
+ offset = btrfs_find_space_for_alloc(cache, 0, 4096, 0,
+ &max_extent_size);
+ if (offset != 0) {
+ test_msg("Space allocation did not fail, returned offset: %llu",
+ offset);
+ return -EINVAL;
+ }
+
+ /* And no extent nor bitmap entries in the cache anymore. */
+ return check_num_extents_and_bitmaps(cache, 0, 0);
+}
+
+/*
+ * Before we were able to steal free space from a bitmap entry to an extent
+ * entry, we could end up with 2 entries representing a contiguous free space.
+ * One would be an extent entry and the other a bitmap entry. Since in order
+ * to allocate space to a caller we use only 1 entry, we couldn't return that
+ * whole range to the caller if it was requested. This forced the caller to
+ * either assume ENOSPC or perform several smaller space allocations, which
+ * wasn't optimal as they could be spread all over the block group while under
+ * concurrency (extra overhead and fragmentation).
+ *
+ * This stealing approach is benefical, since we always prefer to allocate from
+ * extent entries, both for clustered and non-clustered allocation requests.
+ */
+static int
+test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
+{
+ int ret;
+ u64 offset;
+ u64 max_extent_size;
+
+ bool (*use_bitmap_op)(struct btrfs_free_space_ctl *,
+ struct btrfs_free_space *);
+
+ test_msg("Running space stealing from bitmap to extent\n");
+
+ /*
+ * For this test, we want to ensure we end up with an extent entry
+ * immediately adjacent to a bitmap entry, where the bitmap starts
+ * at an offset where the extent entry ends. We keep adding and
+ * removing free space to reach into this state, but to get there
+ * we need to reach a point where marking new free space doesn't
+ * result in adding new extent entries or merging the new space
+ * with existing extent entries - the space ends up being marked
+ * in an existing bitmap that covers the new free space range.
+ *
+ * To get there, we need to reach the threshold defined set at
+ * cache->free_space_ctl->extents_thresh, which currently is
+ * 256 extents on a x86_64 system at least, and a few other
+ * conditions (check free_space_cache.c). Instead of making the
+ * test much longer and complicated, use a "use_bitmap" operation
+ * that forces use of bitmaps as soon as we have at least 1
+ * extent entry.
+ */
+ use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
+ cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
+
+ /*
+ * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
+ */
+ ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 - 256 * 1024,
+ 128 * 1024, 0);
+ if (ret) {
+ test_msg("Couldn't add extent entry %d\n", ret);
+ return ret;
+ }
+
+ /* Bitmap entry covering free space range [128Mb + 512Kb, 256Mb[ */
+ ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 + 512 * 1024,
+ 128 * 1024 * 1024 - 512 * 1024, 1);
+ if (ret) {
+ test_msg("Couldn't add bitmap entry %d\n", ret);
+ return ret;
+ }
+
+ ret = check_num_extents_and_bitmaps(cache, 2, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Now make only the first 256Kb of the bitmap marked as free, so that
+ * we end up with only the following ranges marked as free space:
+ *
+ * [128Mb - 256Kb, 128Mb - 128Kb[
+ * [128Mb + 512Kb, 128Mb + 768Kb[
+ */
+ ret = btrfs_remove_free_space(cache,
+ 128 * 1024 * 1024 + 768 * 1024,
+ 128 * 1024 * 1024 - 768 * 1024);
+ if (ret) {
+ test_msg("Failed to free part of bitmap space %d\n", ret);
+ return ret;
+ }
+
+ /* Confirm that only those 2 ranges are marked as free. */
+ if (!test_check_exists(cache, 128 * 1024 * 1024 - 256 * 1024,
+ 128 * 1024)) {
+ test_msg("Free space range missing\n");
+ return -ENOENT;
+ }
+ if (!test_check_exists(cache, 128 * 1024 * 1024 + 512 * 1024,
+ 256 * 1024)) {
+ test_msg("Free space range missing\n");
+ return -ENOENT;
+ }
+
+ /*
+ * Confirm that the bitmap range [128Mb + 768Kb, 256Mb[ isn't marked
+ * as free anymore.
+ */
+ if (test_check_exists(cache, 128 * 1024 * 1024 + 768 * 1024,
+ 128 * 1024 * 1024 - 768 * 1024)) {
+ test_msg("Bitmap region not removed from space cache\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Confirm that the region [128Mb + 256Kb, 128Mb + 512Kb[, which is
+ * covered by the bitmap, isn't marked as free.
+ */
+ if (test_check_exists(cache, 128 * 1024 * 1024 + 256 * 1024,
+ 256 * 1024)) {
+ test_msg("Invalid bitmap region marked as free\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Confirm that the region [128Mb, 128Mb + 256Kb[, which is covered
+ * by the bitmap too, isn't marked as free either.
+ */
+ if (test_check_exists(cache, 128 * 1024 * 1024,
+ 256 * 1024)) {
+ test_msg("Invalid bitmap region marked as free\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Now lets mark the region [128Mb, 128Mb + 512Kb[ as free too. But,
+ * lets make sure the free space cache marks it as free in the bitmap,
+ * and doesn't insert a new extent entry to represent this region.
+ */
+ ret = btrfs_add_free_space(cache, 128 * 1024 * 1024, 512 * 1024);
+ if (ret) {
+ test_msg("Error adding free space: %d\n", ret);
+ return ret;
+ }
+ /* Confirm the region is marked as free. */
+ if (!test_check_exists(cache, 128 * 1024 * 1024, 512 * 1024)) {
+ test_msg("Bitmap region not marked as free\n");
+ return -ENOENT;
+ }
+
+ /*
+ * Confirm that no new extent entries or bitmap entries were added to
+ * the cache after adding that free space region.
+ */
+ ret = check_num_extents_and_bitmaps(cache, 2, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Now lets add a small free space region to the right of the previous
+ * one, which is not contiguous with it and is part of the bitmap too.
+ * The goal is to test that the bitmap entry space stealing doesn't
+ * steal this space region.
+ */
+ ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 + 16 * 1024 * 1024,
+ 4096);
+ if (ret) {
+ test_msg("Error adding free space: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Confirm that no new extent entries or bitmap entries were added to
+ * the cache after adding that free space region.
+ */
+ ret = check_num_extents_and_bitmaps(cache, 2, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Now mark the region [128Mb - 128Kb, 128Mb[ as free too. This will
+ * expand the range covered by the existing extent entry that represents
+ * the free space [128Mb - 256Kb, 128Mb - 128Kb[.
+ */
+ ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 - 128 * 1024,
+ 128 * 1024);
+ if (ret) {
+ test_msg("Error adding free space: %d\n", ret);
+ return ret;
+ }
+ /* Confirm the region is marked as free. */
+ if (!test_check_exists(cache, 128 * 1024 * 1024 - 128 * 1024,
+ 128 * 1024)) {
+ test_msg("Extent region not marked as free\n");
+ return -ENOENT;
+ }
+
+ /*
+ * Confirm that our extent entry didn't stole all free space from the
+ * bitmap, because of the small 4Kb free space region.
+ */
+ ret = check_num_extents_and_bitmaps(cache, 2, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * So now we have the range [128Mb - 256Kb, 128Mb + 768Kb[ as free
+ * space. Without stealing bitmap free space into extent entry space,
+ * we would have all this free space represented by 2 entries in the
+ * cache:
+ *
+ * extent entry covering range: [128Mb - 256Kb, 128Mb[
+ * bitmap entry covering range: [128Mb, 128Mb + 768Kb[
+ *
+ * Attempting to allocate the whole free space (1Mb) would fail, because
+ * we can't allocate from multiple entries.
+ * With the bitmap free space stealing, we get a single extent entry
+ * that represents the 1Mb free space, and therefore we're able to
+ * allocate the whole free space at once.
+ */
+ if (!test_check_exists(cache, 128 * 1024 * 1024 - 256 * 1024,
+ 1 * 1024 * 1024)) {
+ test_msg("Expected region not marked as free\n");
+ return -ENOENT;
+ }
+
+ if (cache->free_space_ctl->free_space != (1 * 1024 * 1024 + 4096)) {
+ test_msg("Cache free space is not 1Mb + 4Kb\n");
+ return -EINVAL;
+ }
+
+ offset = btrfs_find_space_for_alloc(cache,
+ 0, 1 * 1024 * 1024, 0,
+ &max_extent_size);
+ if (offset != (128 * 1024 * 1024 - 256 * 1024)) {
+ test_msg("Failed to allocate 1Mb from space cache, returned offset is: %llu\n",
+ offset);
+ return -EINVAL;
+ }
+
+ /* All that remains is a 4Kb free space region in a bitmap. Confirm. */
+ ret = check_num_extents_and_bitmaps(cache, 1, 1);
+ if (ret)
+ return ret;
+
+ if (cache->free_space_ctl->free_space != 4096) {
+ test_msg("Cache free space is not 4Kb\n");
+ return -EINVAL;
+ }
+
+ offset = btrfs_find_space_for_alloc(cache,
+ 0, 4096, 0,
+ &max_extent_size);
+ if (offset != (128 * 1024 * 1024 + 16 * 1024 * 1024)) {
+ test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n",
+ offset);
+ return -EINVAL;
+ }
+
+ ret = check_cache_empty(cache);
+ if (ret)
+ return ret;
+
+ __btrfs_remove_free_space_cache(cache->free_space_ctl);
+
+ /*
+ * Now test a similar scenario, but where our extent entry is located
+ * to the right of the bitmap entry, so that we can check that stealing
+ * space from a bitmap to the front of an extent entry works.
+ */
+
+ /*
+ * Extent entry covering free space range [128Mb + 128Kb, 128Mb + 256Kb[
+ */
+ ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 + 128 * 1024,
+ 128 * 1024, 0);
+ if (ret) {
+ test_msg("Couldn't add extent entry %d\n", ret);
+ return ret;
+ }
+
+ /* Bitmap entry covering free space range [0, 128Mb - 512Kb[ */
+ ret = test_add_free_space_entry(cache, 0,
+ 128 * 1024 * 1024 - 512 * 1024, 1);
+ if (ret) {
+ test_msg("Couldn't add bitmap entry %d\n", ret);
+ return ret;
+ }
+
+ ret = check_num_extents_and_bitmaps(cache, 2, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Now make only the last 256Kb of the bitmap marked as free, so that
+ * we end up with only the following ranges marked as free space:
+ *
+ * [128Mb + 128b, 128Mb + 256Kb[
+ * [128Mb - 768Kb, 128Mb - 512Kb[
+ */
+ ret = btrfs_remove_free_space(cache,
+ 0,
+ 128 * 1024 * 1024 - 768 * 1024);
+ if (ret) {
+ test_msg("Failed to free part of bitmap space %d\n", ret);
+ return ret;
+ }
+
+ /* Confirm that only those 2 ranges are marked as free. */
+ if (!test_check_exists(cache, 128 * 1024 * 1024 + 128 * 1024,
+ 128 * 1024)) {
+ test_msg("Free space range missing\n");
+ return -ENOENT;
+ }
+ if (!test_check_exists(cache, 128 * 1024 * 1024 - 768 * 1024,
+ 256 * 1024)) {
+ test_msg("Free space range missing\n");
+ return -ENOENT;
+ }
+
+ /*
+ * Confirm that the bitmap range [0, 128Mb - 768Kb[ isn't marked
+ * as free anymore.
+ */
+ if (test_check_exists(cache, 0,
+ 128 * 1024 * 1024 - 768 * 1024)) {
+ test_msg("Bitmap region not removed from space cache\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Confirm that the region [128Mb - 512Kb, 128Mb[, which is
+ * covered by the bitmap, isn't marked as free.
+ */
+ if (test_check_exists(cache, 128 * 1024 * 1024 - 512 * 1024,
+ 512 * 1024)) {
+ test_msg("Invalid bitmap region marked as free\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Now lets mark the region [128Mb - 512Kb, 128Mb[ as free too. But,
+ * lets make sure the free space cache marks it as free in the bitmap,
+ * and doesn't insert a new extent entry to represent this region.
+ */
+ ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 - 512 * 1024,
+ 512 * 1024);
+ if (ret) {
+ test_msg("Error adding free space: %d\n", ret);
+ return ret;
+ }
+ /* Confirm the region is marked as free. */
+ if (!test_check_exists(cache, 128 * 1024 * 1024 - 512 * 1024,
+ 512 * 1024)) {
+ test_msg("Bitmap region not marked as free\n");
+ return -ENOENT;
+ }
+
+ /*
+ * Confirm that no new extent entries or bitmap entries were added to
+ * the cache after adding that free space region.
+ */
+ ret = check_num_extents_and_bitmaps(cache, 2, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Now lets add a small free space region to the left of the previous
+ * one, which is not contiguous with it and is part of the bitmap too.
+ * The goal is to test that the bitmap entry space stealing doesn't
+ * steal this space region.
+ */
+ ret = btrfs_add_free_space(cache, 32 * 1024 * 1024, 8192);
+ if (ret) {
+ test_msg("Error adding free space: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Now mark the region [128Mb, 128Mb + 128Kb[ as free too. This will
+ * expand the range covered by the existing extent entry that represents
+ * the free space [128Mb + 128Kb, 128Mb + 256Kb[.
+ */
+ ret = btrfs_add_free_space(cache, 128 * 1024 * 1024, 128 * 1024);
+ if (ret) {
+ test_msg("Error adding free space: %d\n", ret);
+ return ret;
+ }
+ /* Confirm the region is marked as free. */
+ if (!test_check_exists(cache, 128 * 1024 * 1024, 128 * 1024)) {
+ test_msg("Extent region not marked as free\n");
+ return -ENOENT;
+ }
+
+ /*
+ * Confirm that our extent entry didn't stole all free space from the
+ * bitmap, because of the small 8Kb free space region.
+ */
+ ret = check_num_extents_and_bitmaps(cache, 2, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * So now we have the range [128Mb - 768Kb, 128Mb + 256Kb[ as free
+ * space. Without stealing bitmap free space into extent entry space,
+ * we would have all this free space represented by 2 entries in the
+ * cache:
+ *
+ * extent entry covering range: [128Mb, 128Mb + 256Kb[
+ * bitmap entry covering range: [128Mb - 768Kb, 128Mb[
+ *
+ * Attempting to allocate the whole free space (1Mb) would fail, because
+ * we can't allocate from multiple entries.
+ * With the bitmap free space stealing, we get a single extent entry
+ * that represents the 1Mb free space, and therefore we're able to
+ * allocate the whole free space at once.
+ */
+ if (!test_check_exists(cache, 128 * 1024 * 1024 - 768 * 1024,
+ 1 * 1024 * 1024)) {
+ test_msg("Expected region not marked as free\n");
+ return -ENOENT;
+ }
+
+ if (cache->free_space_ctl->free_space != (1 * 1024 * 1024 + 8192)) {
+ test_msg("Cache free space is not 1Mb + 8Kb\n");
+ return -EINVAL;
+ }
+
+ offset = btrfs_find_space_for_alloc(cache,
+ 0, 1 * 1024 * 1024, 0,
+ &max_extent_size);
+ if (offset != (128 * 1024 * 1024 - 768 * 1024)) {
+ test_msg("Failed to allocate 1Mb from space cache, returned offset is: %llu\n",
+ offset);
+ return -EINVAL;
+ }
+
+ /* All that remains is a 8Kb free space region in a bitmap. Confirm. */
+ ret = check_num_extents_and_bitmaps(cache, 1, 1);
+ if (ret)
+ return ret;
+
+ if (cache->free_space_ctl->free_space != 8192) {
+ test_msg("Cache free space is not 8Kb\n");
+ return -EINVAL;
+ }
+
+ offset = btrfs_find_space_for_alloc(cache,
+ 0, 8192, 0,
+ &max_extent_size);
+ if (offset != (32 * 1024 * 1024)) {
+ test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n",
+ offset);
+ return -EINVAL;
+ }
+
+ ret = check_cache_empty(cache);
+ if (ret)
+ return ret;
+
+ cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
+ __btrfs_remove_free_space_cache(cache->free_space_ctl);
+
+ return 0;
+}
+
int btrfs_test_free_space_cache(void)
{
struct btrfs_block_group_cache *cache;
@@ -386,6 +898,8 @@ int btrfs_test_free_space_cache(void)
ret = test_bitmaps_and_extents(cache);
if (ret)
goto out;
+
+ ret = test_steal_space_from_bitmap_to_extent(cache);
out:
__btrfs_remove_free_space_cache(cache->free_space_ctl);
kfree(cache->free_space_ctl);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index d89c6d3542ca..dcaae3616728 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -386,7 +386,7 @@ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
int ret;
/* Send isn't supposed to start transactions. */
- ASSERT(current->journal_info != (void *)BTRFS_SEND_TRANS_STUB);
+ ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
return ERR_PTR(-EROFS);
@@ -408,7 +408,7 @@ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
if (num_items > 0 && root != root->fs_info->chunk_root) {
if (root->fs_info->quota_enabled &&
is_fstree(root->root_key.objectid)) {
- qgroup_reserved = num_items * root->leafsize;
+ qgroup_reserved = num_items * root->nodesize;
ret = btrfs_qgroup_reserve(root, qgroup_reserved);
if (ret)
return ERR_PTR(ret);
@@ -418,7 +418,7 @@ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
/*
* Do the reservation for the relocation root creation
*/
- if (unlikely(need_reserve_reloc_root(root))) {
+ if (need_reserve_reloc_root(root)) {
num_bytes += root->nodesize;
reloc_reserved = true;
}
@@ -609,7 +609,6 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
if (transid <= root->fs_info->last_trans_committed)
goto out;
- ret = -EINVAL;
/* find specified transaction */
spin_lock(&root->fs_info->trans_lock);
list_for_each_entry(t, &root->fs_info->trans_list, list) {
@@ -625,9 +624,16 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
}
}
spin_unlock(&root->fs_info->trans_lock);
- /* The specified transaction doesn't exist */
- if (!cur_trans)
+
+ /*
+ * The specified transaction doesn't exist, or we
+ * raced with btrfs_commit_transaction
+ */
+ if (!cur_trans) {
+ if (transid > root->fs_info->last_trans_committed)
+ ret = -EINVAL;
goto out;
+ }
} else {
/* find newest transaction that is committing | committed */
spin_lock(&root->fs_info->trans_lock);
@@ -851,6 +857,8 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
struct extent_state *cached_state = NULL;
u64 start = 0;
u64 end;
+ struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
+ bool errors = false;
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
EXTENT_NEED_WAIT, &cached_state)) {
@@ -864,6 +872,26 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
}
if (err)
werr = err;
+
+ if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
+ if ((mark & EXTENT_DIRTY) &&
+ test_and_clear_bit(BTRFS_INODE_BTREE_LOG1_ERR,
+ &btree_ino->runtime_flags))
+ errors = true;
+
+ if ((mark & EXTENT_NEW) &&
+ test_and_clear_bit(BTRFS_INODE_BTREE_LOG2_ERR,
+ &btree_ino->runtime_flags))
+ errors = true;
+ } else {
+ if (test_and_clear_bit(BTRFS_INODE_BTREE_ERR,
+ &btree_ino->runtime_flags))
+ errors = true;
+ }
+
+ if (errors && !werr)
+ werr = -EIO;
+
return werr;
}
@@ -1629,6 +1657,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
{
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_transaction *prev_trans = NULL;
+ struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
int ret;
/* Stop the commit early if ->aborted is set */
@@ -1868,6 +1897,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
sizeof(*root->fs_info->super_copy));
+ btrfs_update_commit_device_size(root->fs_info);
+ btrfs_update_commit_device_bytes_used(root, cur_trans);
+
+ clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
+ clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
+
spin_lock(&root->fs_info->trans_lock);
cur_trans->state = TRANS_STATE_UNBLOCKED;
root->fs_info->running_transaction = NULL;
@@ -1981,9 +2016,6 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
ret = btrfs_drop_snapshot(root, NULL, 0, 0);
else
ret = btrfs_drop_snapshot(root, NULL, 1, 0);
- /*
- * If we encounter a transaction abort during snapshot cleaning, we
- * don't want to crash here
- */
+
return (ret < 0) ? 0 : 1;
}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 579be51b27e5..d8f40e1a5d2d 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -79,7 +79,7 @@ struct btrfs_transaction {
#define TRANS_EXTWRITERS (__TRANS_USERSPACE | __TRANS_START | \
__TRANS_ATTACH)
-#define BTRFS_SEND_TRANS_STUB 1
+#define BTRFS_SEND_TRANS_STUB ((void *)1)
struct btrfs_trans_handle {
u64 transid;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index d0262ceb85e1..1475979e5718 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -97,7 +97,8 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
int inode_only,
const loff_t start,
- const loff_t end);
+ const loff_t end,
+ struct btrfs_log_ctx *ctx);
static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 objectid);
@@ -1498,7 +1499,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
return -EIO;
key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
- btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
+ key.type = BTRFS_ORPHAN_ITEM_KEY;
key.offset = objectid;
ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
@@ -1637,6 +1638,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
found_key.type == log_key.type &&
found_key.offset == log_key.offset &&
btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
+ update_size = false;
goto out;
}
@@ -2157,7 +2159,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
- blocksize = btrfs_level_size(root, *level - 1);
+ blocksize = root->nodesize;
parent = path->nodes[*level];
root_owner = btrfs_header_owner(parent);
@@ -2983,8 +2985,6 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
min_key.type = key_type;
min_key.offset = min_offset;
- path->keep_locks = 1;
-
ret = btrfs_search_forward(root, &min_key, path, trans->transid);
/*
@@ -3364,7 +3364,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
* or deletes of this inode don't have to relog the inode
* again
*/
- if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY &&
+ if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
!skip_csum) {
int found_type;
extent = btrfs_item_ptr(src, start_slot + i,
@@ -3573,107 +3573,33 @@ static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
return 0;
}
-static int log_one_extent(struct btrfs_trans_handle *trans,
- struct inode *inode, struct btrfs_root *root,
- struct extent_map *em, struct btrfs_path *path,
- struct list_head *logged_list)
+static int wait_ordered_extents(struct btrfs_trans_handle *trans,
+ struct inode *inode,
+ struct btrfs_root *root,
+ const struct extent_map *em,
+ const struct list_head *logged_list,
+ bool *ordered_io_error)
{
- struct btrfs_root *log = root->log_root;
- struct btrfs_file_extent_item *fi;
- struct extent_buffer *leaf;
struct btrfs_ordered_extent *ordered;
- struct list_head ordered_sums;
- struct btrfs_map_token token;
- struct btrfs_key key;
+ struct btrfs_root *log = root->log_root;
u64 mod_start = em->mod_start;
u64 mod_len = em->mod_len;
+ const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
u64 csum_offset;
u64 csum_len;
- u64 extent_offset = em->start - em->orig_start;
- u64 block_len;
- int ret;
- bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
- int extent_inserted = 0;
-
- INIT_LIST_HEAD(&ordered_sums);
- btrfs_init_map_token(&token);
-
- ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
- em->start + em->len, NULL, 0, 1,
- sizeof(*fi), &extent_inserted);
- if (ret)
- return ret;
-
- if (!extent_inserted) {
- key.objectid = btrfs_ino(inode);
- key.type = BTRFS_EXTENT_DATA_KEY;
- key.offset = em->start;
-
- ret = btrfs_insert_empty_item(trans, log, path, &key,
- sizeof(*fi));
- if (ret)
- return ret;
- }
- leaf = path->nodes[0];
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
-
- btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
- &token);
- if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
- skip_csum = true;
- btrfs_set_token_file_extent_type(leaf, fi,
- BTRFS_FILE_EXTENT_PREALLOC,
- &token);
- } else {
- btrfs_set_token_file_extent_type(leaf, fi,
- BTRFS_FILE_EXTENT_REG,
- &token);
- if (em->block_start == EXTENT_MAP_HOLE)
- skip_csum = true;
- }
-
- block_len = max(em->block_len, em->orig_block_len);
- if (em->compress_type != BTRFS_COMPRESS_NONE) {
- btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
- em->block_start,
- &token);
- btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
- &token);
- } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
- btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
- em->block_start -
- extent_offset, &token);
- btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
- &token);
- } else {
- btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
- btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
- &token);
- }
-
- btrfs_set_token_file_extent_offset(leaf, fi,
- em->start - em->orig_start,
- &token);
- btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
- btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
- btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
- &token);
- btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
- btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
- btrfs_mark_buffer_dirty(leaf);
+ LIST_HEAD(ordered_sums);
+ int ret = 0;
- btrfs_release_path(path);
- if (ret) {
- return ret;
- }
+ *ordered_io_error = false;
- if (skip_csum)
+ if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
+ em->block_start == EXTENT_MAP_HOLE)
return 0;
/*
- * First check and see if our csums are on our outstanding ordered
- * extents.
+ * Wait far any ordered extent that covers our extent map. If it
+ * finishes without an error, first check and see if our csums are on
+ * our outstanding ordered extents.
*/
list_for_each_entry(ordered, logged_list, log_list) {
struct btrfs_ordered_sum *sum;
@@ -3685,6 +3611,24 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
mod_start + mod_len <= ordered->file_offset)
continue;
+ if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
+ !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
+ !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
+ const u64 start = ordered->file_offset;
+ const u64 end = ordered->file_offset + ordered->len - 1;
+
+ WARN_ON(ordered->inode != inode);
+ filemap_fdatawrite_range(inode->i_mapping, start, end);
+ }
+
+ wait_event(ordered->wait,
+ (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) ||
+ test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
+
+ if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
+ *ordered_io_error = true;
+ break;
+ }
/*
* We are going to copy all the csums on this ordered extent, so
* go ahead and adjust mod_start and mod_len in case this
@@ -3716,6 +3660,9 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
}
}
+ if (skip_csum)
+ continue;
+
/*
* To keep us from looping for the above case of an ordered
* extent that falls inside of the logged extent.
@@ -3733,18 +3680,16 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
list_for_each_entry(sum, &ordered->list, list) {
ret = btrfs_csum_file_blocks(trans, log, sum);
if (ret)
- goto unlocked;
+ break;
}
-
}
-unlocked:
- if (!mod_len || ret)
+ if (*ordered_io_error || !mod_len || ret || skip_csum)
return ret;
if (em->compress_type) {
csum_offset = 0;
- csum_len = block_len;
+ csum_len = max(em->block_len, em->orig_block_len);
} else {
csum_offset = mod_start - em->start;
csum_len = mod_len;
@@ -3771,11 +3716,106 @@ unlocked:
return ret;
}
+static int log_one_extent(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct btrfs_root *root,
+ const struct extent_map *em,
+ struct btrfs_path *path,
+ const struct list_head *logged_list,
+ struct btrfs_log_ctx *ctx)
+{
+ struct btrfs_root *log = root->log_root;
+ struct btrfs_file_extent_item *fi;
+ struct extent_buffer *leaf;
+ struct btrfs_map_token token;
+ struct btrfs_key key;
+ u64 extent_offset = em->start - em->orig_start;
+ u64 block_len;
+ int ret;
+ int extent_inserted = 0;
+ bool ordered_io_err = false;
+
+ ret = wait_ordered_extents(trans, inode, root, em, logged_list,
+ &ordered_io_err);
+ if (ret)
+ return ret;
+
+ if (ordered_io_err) {
+ ctx->io_err = -EIO;
+ return 0;
+ }
+
+ btrfs_init_map_token(&token);
+
+ ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
+ em->start + em->len, NULL, 0, 1,
+ sizeof(*fi), &extent_inserted);
+ if (ret)
+ return ret;
+
+ if (!extent_inserted) {
+ key.objectid = btrfs_ino(inode);
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = em->start;
+
+ ret = btrfs_insert_empty_item(trans, log, path, &key,
+ sizeof(*fi));
+ if (ret)
+ return ret;
+ }
+ leaf = path->nodes[0];
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+
+ btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
+ &token);
+ if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+ btrfs_set_token_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_PREALLOC,
+ &token);
+ else
+ btrfs_set_token_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_REG,
+ &token);
+
+ block_len = max(em->block_len, em->orig_block_len);
+ if (em->compress_type != BTRFS_COMPRESS_NONE) {
+ btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
+ em->block_start,
+ &token);
+ btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
+ &token);
+ } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
+ btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
+ em->block_start -
+ extent_offset, &token);
+ btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
+ &token);
+ } else {
+ btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
+ btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
+ &token);
+ }
+
+ btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
+ btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
+ btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
+ btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
+ &token);
+ btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
+ btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
+ btrfs_mark_buffer_dirty(leaf);
+
+ btrfs_release_path(path);
+
+ return ret;
+}
+
static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode,
struct btrfs_path *path,
- struct list_head *logged_list)
+ struct list_head *logged_list,
+ struct btrfs_log_ctx *ctx)
{
struct extent_map *em, *n;
struct list_head extents;
@@ -3833,7 +3873,8 @@ process:
write_unlock(&tree->lock);
- ret = log_one_extent(trans, inode, root, em, path, logged_list);
+ ret = log_one_extent(trans, inode, root, em, path, logged_list,
+ ctx);
write_lock(&tree->lock);
clear_em_logging(tree, em);
free_extent_map(em);
@@ -3863,7 +3904,8 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode,
int inode_only,
const loff_t start,
- const loff_t end)
+ const loff_t end,
+ struct btrfs_log_ctx *ctx)
{
struct btrfs_path *path;
struct btrfs_path *dst_path;
@@ -3964,7 +4006,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
err = ret;
goto out_unlock;
}
- path->keep_locks = 1;
while (1) {
ins_nr = 0;
@@ -4049,7 +4090,7 @@ log_extents:
btrfs_release_path(dst_path);
if (fast_search) {
ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
- &logged_list);
+ &logged_list, ctx);
if (ret) {
err = ret;
goto out_unlock;
@@ -4239,7 +4280,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
if (ret)
goto end_no_trans;
- ret = btrfs_log_inode(trans, root, inode, inode_only, start, end);
+ ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
if (ret)
goto end_trans;
@@ -4268,7 +4309,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
if (BTRFS_I(inode)->generation >
root->fs_info->last_trans_committed) {
ret = btrfs_log_inode(trans, root, inode, inode_only,
- 0, LLONG_MAX);
+ 0, LLONG_MAX, ctx);
if (ret)
goto end_trans;
}
@@ -4360,7 +4401,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
again:
key.objectid = BTRFS_TREE_LOG_OBJECTID;
key.offset = (u64)-1;
- btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
+ key.type = BTRFS_ROOT_ITEM_KEY;
while (1) {
ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index e2e798ae7cd7..154990c26dcb 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -28,6 +28,7 @@
struct btrfs_log_ctx {
int log_ret;
int log_transid;
+ int io_err;
struct list_head list;
};
@@ -35,6 +36,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
{
ctx->log_ret = 0;
ctx->log_transid = 0;
+ ctx->io_err = 0;
INIT_LIST_HEAD(&ctx->list);
}
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index f6a4c03ee7d8..778282944530 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -279,7 +279,6 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info,
key.offset = 0;
again_search_slot:
- path->keep_locks = 1;
ret = btrfs_search_forward(root, &key, path, 0);
if (ret) {
if (ret > 0)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 2c2d6d1d8eee..d47289c715c8 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -50,7 +50,7 @@ static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
-static DEFINE_MUTEX(uuid_mutex);
+DEFINE_MUTEX(uuid_mutex);
static LIST_HEAD(fs_uuids);
static void lock_chunks(struct btrfs_root *root)
@@ -74,6 +74,7 @@ static struct btrfs_fs_devices *__alloc_fs_devices(void)
mutex_init(&fs_devs->device_list_mutex);
INIT_LIST_HEAD(&fs_devs->devices);
+ INIT_LIST_HEAD(&fs_devs->resized_devices);
INIT_LIST_HEAD(&fs_devs->alloc_list);
INIT_LIST_HEAD(&fs_devs->list);
@@ -154,11 +155,13 @@ static struct btrfs_device *__alloc_device(void)
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_alloc_list);
+ INIT_LIST_HEAD(&dev->resized_list);
spin_lock_init(&dev->io_lock);
spin_lock_init(&dev->reada_lock);
atomic_set(&dev->reada_in_flight, 0);
+ atomic_set(&dev->dev_stats_ccnt, 0);
INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_WAIT);
INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_WAIT);
@@ -474,14 +477,13 @@ static noinline int device_list_add(const char *path,
return PTR_ERR(fs_devices);
list_add(&fs_devices->list, &fs_uuids);
- fs_devices->latest_devid = devid;
- fs_devices->latest_trans = found_transid;
device = NULL;
} else {
device = __find_device(&fs_devices->devices, devid,
disk_super->dev_item.uuid);
}
+
if (!device) {
if (fs_devices->opened)
return -EBUSY;
@@ -565,10 +567,6 @@ static noinline int device_list_add(const char *path,
if (!fs_devices->opened)
device->generation = found_transid;
- if (found_transid > fs_devices->latest_trans) {
- fs_devices->latest_devid = devid;
- fs_devices->latest_trans = found_transid;
- }
*fs_devices_ret = fs_devices;
return ret;
@@ -584,8 +582,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
if (IS_ERR(fs_devices))
return fs_devices;
- fs_devices->latest_devid = orig->latest_devid;
- fs_devices->latest_trans = orig->latest_trans;
+ mutex_lock(&orig->device_list_mutex);
fs_devices->total_devices = orig->total_devices;
/* We have held the volume lock, it is safe to get the devices. */
@@ -614,8 +611,10 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
device->fs_devices = fs_devices;
fs_devices->num_devices++;
}
+ mutex_unlock(&orig->device_list_mutex);
return fs_devices;
error:
+ mutex_unlock(&orig->device_list_mutex);
free_fs_devices(fs_devices);
return ERR_PTR(-ENOMEM);
}
@@ -624,10 +623,7 @@ void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
struct btrfs_fs_devices *fs_devices, int step)
{
struct btrfs_device *device, *next;
-
- struct block_device *latest_bdev = NULL;
- u64 latest_devid = 0;
- u64 latest_transid = 0;
+ struct btrfs_device *latest_dev = NULL;
mutex_lock(&uuid_mutex);
again:
@@ -635,11 +631,9 @@ again:
list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
if (device->in_fs_metadata) {
if (!device->is_tgtdev_for_dev_replace &&
- (!latest_transid ||
- device->generation > latest_transid)) {
- latest_devid = device->devid;
- latest_transid = device->generation;
- latest_bdev = device->bdev;
+ (!latest_dev ||
+ device->generation > latest_dev->generation)) {
+ latest_dev = device;
}
continue;
}
@@ -681,9 +675,7 @@ again:
goto again;
}
- fs_devices->latest_bdev = latest_bdev;
- fs_devices->latest_devid = latest_devid;
- fs_devices->latest_trans = latest_transid;
+ fs_devices->latest_bdev = latest_dev->bdev;
mutex_unlock(&uuid_mutex);
}
@@ -732,8 +724,6 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
fs_devices->rw_devices--;
}
- if (device->can_discard)
- fs_devices->num_can_discard--;
if (device->missing)
fs_devices->missing_devices--;
@@ -798,11 +788,9 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
struct block_device *bdev;
struct list_head *head = &fs_devices->devices;
struct btrfs_device *device;
- struct block_device *latest_bdev = NULL;
+ struct btrfs_device *latest_dev = NULL;
struct buffer_head *bh;
struct btrfs_super_block *disk_super;
- u64 latest_devid = 0;
- u64 latest_transid = 0;
u64 devid;
int seeding = 1;
int ret = 0;
@@ -830,11 +818,9 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
goto error_brelse;
device->generation = btrfs_super_generation(disk_super);
- if (!latest_transid || device->generation > latest_transid) {
- latest_devid = devid;
- latest_transid = device->generation;
- latest_bdev = bdev;
- }
+ if (!latest_dev ||
+ device->generation > latest_dev->generation)
+ latest_dev = device;
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
device->writeable = 0;
@@ -844,10 +830,8 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
}
q = bdev_get_queue(bdev);
- if (blk_queue_discard(q)) {
+ if (blk_queue_discard(q))
device->can_discard = 1;
- fs_devices->num_can_discard++;
- }
device->bdev = bdev;
device->in_fs_metadata = 0;
@@ -877,9 +861,7 @@ error_brelse:
}
fs_devices->seeding = seeding;
fs_devices->opened = 1;
- fs_devices->latest_bdev = latest_bdev;
- fs_devices->latest_devid = latest_devid;
- fs_devices->latest_trans = latest_transid;
+ fs_devices->latest_bdev = latest_dev->bdev;
fs_devices->total_rw_bytes = 0;
out:
return ret;
@@ -1053,7 +1035,7 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
if (key.objectid > device->devid)
break;
- if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
+ if (key.type != BTRFS_DEV_EXTENT_KEY)
goto next;
dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
@@ -1205,7 +1187,7 @@ again:
if (key.objectid > device->devid)
break;
- if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
+ if (key.type != BTRFS_DEV_EXTENT_KEY)
goto next;
if (key.offset > search_start) {
@@ -1284,7 +1266,7 @@ out:
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
- u64 start)
+ u64 start, u64 *dev_extent_len)
{
int ret;
struct btrfs_path *path;
@@ -1326,13 +1308,8 @@ again:
goto out;
}
- if (device->bytes_used > 0) {
- u64 len = btrfs_dev_extent_length(leaf, extent);
- device->bytes_used -= len;
- spin_lock(&root->fs_info->free_chunk_lock);
- root->fs_info->free_chunk_space += len;
- spin_unlock(&root->fs_info->free_chunk_lock);
- }
+ *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
+
ret = btrfs_del_item(trans, root, path);
if (ret) {
btrfs_error(root->fs_info, ret,
@@ -1482,8 +1459,10 @@ static int btrfs_add_device(struct btrfs_trans_handle *trans,
btrfs_set_device_io_align(leaf, dev_item, device->io_align);
btrfs_set_device_io_width(leaf, dev_item, device->io_width);
btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
- btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
- btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
+ btrfs_set_device_total_bytes(leaf, dev_item,
+ btrfs_device_get_disk_total_bytes(device));
+ btrfs_set_device_bytes_used(leaf, dev_item,
+ btrfs_device_get_bytes_used(device));
btrfs_set_device_group(leaf, dev_item, 0);
btrfs_set_device_seek_speed(leaf, dev_item, 0);
btrfs_set_device_bandwidth(leaf, dev_item, 0);
@@ -1539,7 +1518,6 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = device->devid;
- lock_chunks(root);
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
@@ -1555,7 +1533,6 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
goto out;
out:
btrfs_free_path(path);
- unlock_chunks(root);
btrfs_commit_transaction(trans, root);
return ret;
}
@@ -1671,8 +1648,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
if (device->writeable) {
lock_chunks(root);
list_del_init(&device->dev_alloc_list);
+ device->fs_devices->rw_devices--;
unlock_chunks(root);
- root->fs_info->fs_devices->rw_devices--;
clear_super = true;
}
@@ -1691,11 +1668,6 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
if (ret)
goto error_undo;
- spin_lock(&root->fs_info->free_chunk_lock);
- root->fs_info->free_chunk_space = device->total_bytes -
- device->bytes_used;
- spin_unlock(&root->fs_info->free_chunk_lock);
-
device->in_fs_metadata = 0;
btrfs_scrub_cancel_dev(root->fs_info, device);
@@ -1749,9 +1721,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
fs_devices = fs_devices->seed;
}
cur_devices->seed = NULL;
- lock_chunks(root);
__btrfs_close_devices(cur_devices);
- unlock_chunks(root);
free_fs_devices(cur_devices);
}
@@ -1824,8 +1794,8 @@ error_undo:
lock_chunks(root);
list_add(&device->dev_alloc_list,
&root->fs_info->fs_devices->alloc_list);
+ device->fs_devices->rw_devices++;
unlock_chunks(root);
- root->fs_info->fs_devices->rw_devices++;
}
goto error_brelse;
}
@@ -1833,29 +1803,57 @@ error_undo:
void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
struct btrfs_device *srcdev)
{
+ struct btrfs_fs_devices *fs_devices;
+
WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
+ /*
+ * in case of fs with no seed, srcdev->fs_devices will point
+ * to fs_devices of fs_info. However when the dev being replaced is
+ * a seed dev it will point to the seed's local fs_devices. In short
+ * srcdev will have its correct fs_devices in both the cases.
+ */
+ fs_devices = srcdev->fs_devices;
+
list_del_rcu(&srcdev->dev_list);
list_del_rcu(&srcdev->dev_alloc_list);
- fs_info->fs_devices->num_devices--;
- if (srcdev->missing) {
- fs_info->fs_devices->missing_devices--;
- fs_info->fs_devices->rw_devices++;
- }
- if (srcdev->can_discard)
- fs_info->fs_devices->num_can_discard--;
- if (srcdev->bdev) {
- fs_info->fs_devices->open_devices--;
+ fs_devices->num_devices--;
+ if (srcdev->missing)
+ fs_devices->missing_devices--;
- /*
- * zero out the old super if it is not writable
- * (e.g. seed device)
- */
- if (srcdev->writeable)
- btrfs_scratch_superblock(srcdev);
+ if (srcdev->writeable) {
+ fs_devices->rw_devices--;
+ /* zero out the old super if it is writable */
+ btrfs_scratch_superblock(srcdev);
}
+ if (srcdev->bdev)
+ fs_devices->open_devices--;
+
call_rcu(&srcdev->rcu, free_device);
+
+ /*
+ * unless fs_devices is seed fs, num_devices shouldn't go
+ * zero
+ */
+ BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);
+
+ /* if this is no devs we rather delete the fs_devices */
+ if (!fs_devices->num_devices) {
+ struct btrfs_fs_devices *tmp_fs_devices;
+
+ tmp_fs_devices = fs_info->fs_devices;
+ while (tmp_fs_devices) {
+ if (tmp_fs_devices->seed == fs_devices) {
+ tmp_fs_devices->seed = fs_devices->seed;
+ break;
+ }
+ tmp_fs_devices = tmp_fs_devices->seed;
+ }
+ fs_devices->seed = NULL;
+ __btrfs_close_devices(fs_devices);
+ free_fs_devices(fs_devices);
+ }
}
void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
@@ -1863,6 +1861,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
{
struct btrfs_device *next_device;
+ mutex_lock(&uuid_mutex);
WARN_ON(!tgtdev);
mutex_lock(&fs_info->fs_devices->device_list_mutex);
if (tgtdev->bdev) {
@@ -1870,8 +1869,6 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
fs_info->fs_devices->open_devices--;
}
fs_info->fs_devices->num_devices--;
- if (tgtdev->can_discard)
- fs_info->fs_devices->num_can_discard++;
next_device = list_entry(fs_info->fs_devices->devices.next,
struct btrfs_device, dev_list);
@@ -1884,6 +1881,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
call_rcu(&tgtdev->rcu, free_device);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&uuid_mutex);
}
static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
@@ -1982,17 +1980,17 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
synchronize_rcu);
+ list_for_each_entry(device, &seed_devices->devices, dev_list)
+ device->fs_devices = seed_devices;
+ lock_chunks(root);
list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
- list_for_each_entry(device, &seed_devices->devices, dev_list) {
- device->fs_devices = seed_devices;
- }
+ unlock_chunks(root);
fs_devices->seeding = 0;
fs_devices->num_devices = 0;
fs_devices->open_devices = 0;
fs_devices->missing_devices = 0;
- fs_devices->num_can_discard = 0;
fs_devices->rotating = 0;
fs_devices->seed = seed_devices;
@@ -2092,7 +2090,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
struct list_head *devices;
struct super_block *sb = root->fs_info->sb;
struct rcu_string *name;
- u64 total_bytes;
+ u64 tmp;
int seeding_dev = 0;
int ret = 0;
@@ -2148,8 +2146,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
goto error;
}
- lock_chunks(root);
-
q = bdev_get_queue(bdev);
if (blk_queue_discard(q))
device->can_discard = 1;
@@ -2160,6 +2156,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
device->sector_size = root->sectorsize;
device->total_bytes = i_size_read(bdev->bd_inode);
device->disk_total_bytes = device->total_bytes;
+ device->commit_total_bytes = device->total_bytes;
device->dev_root = root->fs_info->dev_root;
device->bdev = bdev;
device->in_fs_metadata = 1;
@@ -2177,6 +2174,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
device->fs_devices = root->fs_info->fs_devices;
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+ lock_chunks(root);
list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
list_add(&device->dev_alloc_list,
&root->fs_info->fs_devices->alloc_list);
@@ -2184,8 +2182,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
root->fs_info->fs_devices->open_devices++;
root->fs_info->fs_devices->rw_devices++;
root->fs_info->fs_devices->total_devices++;
- if (device->can_discard)
- root->fs_info->fs_devices->num_can_discard++;
root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
spin_lock(&root->fs_info->free_chunk_lock);
@@ -2195,26 +2191,45 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
if (!blk_queue_nonrot(bdev_get_queue(bdev)))
root->fs_info->fs_devices->rotating = 1;
- total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
+ tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
btrfs_set_super_total_bytes(root->fs_info->super_copy,
- total_bytes + device->total_bytes);
+ tmp + device->total_bytes);
- total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
+ tmp = btrfs_super_num_devices(root->fs_info->super_copy);
btrfs_set_super_num_devices(root->fs_info->super_copy,
- total_bytes + 1);
+ tmp + 1);
/* add sysfs device entry */
btrfs_kobj_add_device(root->fs_info, device);
+ /*
+ * we've got more storage, clear any full flags on the space
+ * infos
+ */
+ btrfs_clear_space_info_full(root->fs_info);
+
+ unlock_chunks(root);
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
if (seeding_dev) {
- char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
+ lock_chunks(root);
ret = init_first_rw_device(trans, root, device);
+ unlock_chunks(root);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto error_trans;
}
+ }
+
+ ret = btrfs_add_device(trans, root, device);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto error_trans;
+ }
+
+ if (seeding_dev) {
+ char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
+
ret = btrfs_finish_sprout(trans, root);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
@@ -2228,21 +2243,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
root->fs_info->fsid);
if (kobject_rename(&root->fs_info->super_kobj, fsid_buf))
goto error_trans;
- } else {
- ret = btrfs_add_device(trans, root, device);
- if (ret) {
- btrfs_abort_transaction(trans, root, ret);
- goto error_trans;
- }
}
- /*
- * we've got more storage, clear any full flags on the space
- * infos
- */
- btrfs_clear_space_info_full(root->fs_info);
-
- unlock_chunks(root);
root->fs_info->num_tolerated_disk_barrier_failures =
btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
ret = btrfs_commit_transaction(trans, root);
@@ -2274,7 +2276,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
return ret;
error_trans:
- unlock_chunks(root);
btrfs_end_transaction(trans, root);
rcu_string_free(device->name);
btrfs_kobj_rm_device(root->fs_info, device);
@@ -2289,6 +2290,7 @@ error:
}
int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
+ struct btrfs_device *srcdev,
struct btrfs_device **device_out)
{
struct request_queue *q;
@@ -2301,24 +2303,38 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
int ret = 0;
*device_out = NULL;
- if (fs_info->fs_devices->seeding)
+ if (fs_info->fs_devices->seeding) {
+ btrfs_err(fs_info, "the filesystem is a seed filesystem!");
return -EINVAL;
+ }
bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
fs_info->bdev_holder);
- if (IS_ERR(bdev))
+ if (IS_ERR(bdev)) {
+ btrfs_err(fs_info, "target device %s is invalid!", device_path);
return PTR_ERR(bdev);
+ }
filemap_write_and_wait(bdev->bd_inode->i_mapping);
devices = &fs_info->fs_devices->devices;
list_for_each_entry(device, devices, dev_list) {
if (device->bdev == bdev) {
+ btrfs_err(fs_info, "target device is in the filesystem!");
ret = -EEXIST;
goto error;
}
}
+
+ if (i_size_read(bdev->bd_inode) <
+ btrfs_device_get_total_bytes(srcdev)) {
+ btrfs_err(fs_info, "target device is smaller than source device!");
+ ret = -EINVAL;
+ goto error;
+ }
+
+
device = btrfs_alloc_device(NULL, &devid, NULL);
if (IS_ERR(device)) {
ret = PTR_ERR(device);
@@ -2342,8 +2358,12 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
device->io_width = root->sectorsize;
device->io_align = root->sectorsize;
device->sector_size = root->sectorsize;
- device->total_bytes = i_size_read(bdev->bd_inode);
- device->disk_total_bytes = device->total_bytes;
+ device->total_bytes = btrfs_device_get_total_bytes(srcdev);
+ device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
+ device->bytes_used = btrfs_device_get_bytes_used(srcdev);
+ ASSERT(list_empty(&srcdev->resized_list));
+ device->commit_total_bytes = srcdev->commit_total_bytes;
+ device->commit_bytes_used = device->bytes_used;
device->dev_root = fs_info->dev_root;
device->bdev = bdev;
device->in_fs_metadata = 1;
@@ -2355,8 +2375,6 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
list_add(&device->dev_list, &fs_info->fs_devices->devices);
fs_info->fs_devices->num_devices++;
fs_info->fs_devices->open_devices++;
- if (device->can_discard)
- fs_info->fs_devices->num_can_discard++;
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
*device_out = device;
@@ -2415,8 +2433,10 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
btrfs_set_device_io_align(leaf, dev_item, device->io_align);
btrfs_set_device_io_width(leaf, dev_item, device->io_width);
btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
- btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
- btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
+ btrfs_set_device_total_bytes(leaf, dev_item,
+ btrfs_device_get_disk_total_bytes(device));
+ btrfs_set_device_bytes_used(leaf, dev_item,
+ btrfs_device_get_bytes_used(device));
btrfs_mark_buffer_dirty(leaf);
out:
@@ -2424,40 +2444,44 @@ out:
return ret;
}
-static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
+int btrfs_grow_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 new_size)
{
struct btrfs_super_block *super_copy =
device->dev_root->fs_info->super_copy;
- u64 old_total = btrfs_super_total_bytes(super_copy);
- u64 diff = new_size - device->total_bytes;
+ struct btrfs_fs_devices *fs_devices;
+ u64 old_total;
+ u64 diff;
if (!device->writeable)
return -EACCES;
+
+ lock_chunks(device->dev_root);
+ old_total = btrfs_super_total_bytes(super_copy);
+ diff = new_size - device->total_bytes;
+
if (new_size <= device->total_bytes ||
- device->is_tgtdev_for_dev_replace)
+ device->is_tgtdev_for_dev_replace) {
+ unlock_chunks(device->dev_root);
return -EINVAL;
+ }
+
+ fs_devices = device->dev_root->fs_info->fs_devices;
btrfs_set_super_total_bytes(super_copy, old_total + diff);
device->fs_devices->total_rw_bytes += diff;
- device->total_bytes = new_size;
- device->disk_total_bytes = new_size;
+ btrfs_device_set_total_bytes(device, new_size);
+ btrfs_device_set_disk_total_bytes(device, new_size);
btrfs_clear_space_info_full(device->dev_root->fs_info);
+ if (list_empty(&device->resized_list))
+ list_add_tail(&device->resized_list,
+ &fs_devices->resized_devices);
+ unlock_chunks(device->dev_root);
return btrfs_update_device(trans, device);
}
-int btrfs_grow_device(struct btrfs_trans_handle *trans,
- struct btrfs_device *device, u64 new_size)
-{
- int ret;
- lock_chunks(device->dev_root);
- ret = __btrfs_grow_device(trans, device, new_size);
- unlock_chunks(device->dev_root);
- return ret;
-}
-
static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 chunk_tree, u64 chunk_objectid,
@@ -2509,6 +2533,7 @@ static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
u32 cur;
struct btrfs_key key;
+ lock_chunks(root);
array_size = btrfs_super_sys_array_size(super_copy);
ptr = super_copy->sys_chunk_array;
@@ -2538,79 +2563,95 @@ static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
cur += len;
}
}
+ unlock_chunks(root);
return ret;
}
-static int btrfs_relocate_chunk(struct btrfs_root *root,
- u64 chunk_tree, u64 chunk_objectid,
- u64 chunk_offset)
+int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 chunk_offset)
{
struct extent_map_tree *em_tree;
- struct btrfs_root *extent_root;
- struct btrfs_trans_handle *trans;
struct extent_map *em;
+ struct btrfs_root *extent_root = root->fs_info->extent_root;
struct map_lookup *map;
- int ret;
- int i;
+ u64 dev_extent_len = 0;
+ u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
+ u64 chunk_tree = root->fs_info->chunk_root->objectid;
+ int i, ret = 0;
+ /* Just in case */
root = root->fs_info->chunk_root;
- extent_root = root->fs_info->extent_root;
em_tree = &root->fs_info->mapping_tree.map_tree;
- ret = btrfs_can_relocate(extent_root, chunk_offset);
- if (ret)
- return -ENOSPC;
-
- /* step one, relocate all the extents inside this chunk */
- ret = btrfs_relocate_block_group(extent_root, chunk_offset);
- if (ret)
- return ret;
-
- trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- btrfs_std_error(root->fs_info, ret);
- return ret;
- }
-
- lock_chunks(root);
-
- /*
- * step two, delete the device extents and the
- * chunk tree entries
- */
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, chunk_offset, 1);
read_unlock(&em_tree->lock);
- BUG_ON(!em || em->start > chunk_offset ||
- em->start + em->len < chunk_offset);
+ if (!em || em->start > chunk_offset ||
+ em->start + em->len < chunk_offset) {
+ /*
+ * This is a logic error, but we don't want to just rely on the
+ * user having built with ASSERT enabled, so if ASSERT doens't
+ * do anything we still error out.
+ */
+ ASSERT(0);
+ if (em)
+ free_extent_map(em);
+ return -EINVAL;
+ }
map = (struct map_lookup *)em->bdev;
for (i = 0; i < map->num_stripes; i++) {
- ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
- map->stripes[i].physical);
- BUG_ON(ret);
+ struct btrfs_device *device = map->stripes[i].dev;
+ ret = btrfs_free_dev_extent(trans, device,
+ map->stripes[i].physical,
+ &dev_extent_len);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
+
+ if (device->bytes_used > 0) {
+ lock_chunks(root);
+ btrfs_device_set_bytes_used(device,
+ device->bytes_used - dev_extent_len);
+ spin_lock(&root->fs_info->free_chunk_lock);
+ root->fs_info->free_chunk_space += dev_extent_len;
+ spin_unlock(&root->fs_info->free_chunk_lock);
+ btrfs_clear_space_info_full(root->fs_info);
+ unlock_chunks(root);
+ }
if (map->stripes[i].dev) {
ret = btrfs_update_device(trans, map->stripes[i].dev);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
}
}
ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
chunk_offset);
-
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
}
ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, extent_root, ret);
+ goto out;
+ }
write_lock(&em_tree->lock);
remove_extent_mapping(em_tree, em);
@@ -2618,12 +2659,46 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
/* once for the tree */
free_extent_map(em);
+out:
/* once for us */
free_extent_map(em);
+ return ret;
+}
- unlock_chunks(root);
+static int btrfs_relocate_chunk(struct btrfs_root *root,
+ u64 chunk_tree, u64 chunk_objectid,
+ u64 chunk_offset)
+{
+ struct btrfs_root *extent_root;
+ struct btrfs_trans_handle *trans;
+ int ret;
+
+ root = root->fs_info->chunk_root;
+ extent_root = root->fs_info->extent_root;
+
+ ret = btrfs_can_relocate(extent_root, chunk_offset);
+ if (ret)
+ return -ENOSPC;
+
+ /* step one, relocate all the extents inside this chunk */
+ ret = btrfs_relocate_block_group(extent_root, chunk_offset);
+ if (ret)
+ return ret;
+
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ btrfs_std_error(root->fs_info, ret);
+ return ret;
+ }
+
+ /*
+ * step two, delete the device extents and the
+ * chunk tree entries
+ */
+ ret = btrfs_remove_chunk(trans, root, chunk_offset);
btrfs_end_transaction(trans, root);
- return 0;
+ return ret;
}
static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
@@ -2676,8 +2751,8 @@ again:
found_key.offset);
if (ret == -ENOSPC)
failed++;
- else if (ret)
- BUG();
+ else
+ BUG_ON(ret);
}
if (found_key.offset == 0)
@@ -3084,11 +3159,12 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
/* step one make some room on all the devices */
devices = &fs_info->fs_devices->devices;
list_for_each_entry(device, devices, dev_list) {
- old_size = device->total_bytes;
+ old_size = btrfs_device_get_total_bytes(device);
size_to_free = div_factor(old_size, 1);
size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
if (!device->writeable ||
- device->total_bytes - device->bytes_used > size_to_free ||
+ btrfs_device_get_total_bytes(device) -
+ btrfs_device_get_bytes_used(device) > size_to_free ||
device->is_tgtdev_for_dev_replace)
continue;
@@ -3643,8 +3719,6 @@ static int btrfs_uuid_scan_kthread(void *data)
max_key.type = BTRFS_ROOT_ITEM_KEY;
max_key.offset = (u64)-1;
- path->keep_locks = 1;
-
while (1) {
ret = btrfs_search_forward(root, &key, path, 0);
if (ret) {
@@ -3896,8 +3970,8 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
struct btrfs_key key;
struct btrfs_super_block *super_copy = root->fs_info->super_copy;
u64 old_total = btrfs_super_total_bytes(super_copy);
- u64 old_size = device->total_bytes;
- u64 diff = device->total_bytes - new_size;
+ u64 old_size = btrfs_device_get_total_bytes(device);
+ u64 diff = old_size - new_size;
if (device->is_tgtdev_for_dev_replace)
return -EINVAL;
@@ -3910,7 +3984,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
lock_chunks(root);
- device->total_bytes = new_size;
+ btrfs_device_set_total_bytes(device, new_size);
if (device->writeable) {
device->fs_devices->total_rw_bytes -= diff;
spin_lock(&root->fs_info->free_chunk_lock);
@@ -3976,7 +4050,7 @@ again:
ret = -ENOSPC;
lock_chunks(root);
- device->total_bytes = old_size;
+ btrfs_device_set_total_bytes(device, old_size);
if (device->writeable)
device->fs_devices->total_rw_bytes += diff;
spin_lock(&root->fs_info->free_chunk_lock);
@@ -3994,18 +4068,17 @@ again:
}
lock_chunks(root);
+ btrfs_device_set_disk_total_bytes(device, new_size);
+ if (list_empty(&device->resized_list))
+ list_add_tail(&device->resized_list,
+ &root->fs_info->fs_devices->resized_devices);
- device->disk_total_bytes = new_size;
- /* Now btrfs_update_device() will change the on-disk size. */
- ret = btrfs_update_device(trans, device);
- if (ret) {
- unlock_chunks(root);
- btrfs_end_transaction(trans, root);
- goto done;
- }
WARN_ON(diff > old_total);
btrfs_set_super_total_bytes(super_copy, old_total - diff);
unlock_chunks(root);
+
+ /* Now btrfs_update_device() will change the on-disk size. */
+ ret = btrfs_update_device(trans, device);
btrfs_end_transaction(trans, root);
done:
btrfs_free_path(path);
@@ -4021,10 +4094,13 @@ static int btrfs_add_system_chunk(struct btrfs_root *root,
u32 array_size;
u8 *ptr;
+ lock_chunks(root);
array_size = btrfs_super_sys_array_size(super_copy);
if (array_size + item_size + sizeof(disk_key)
- > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
+ > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
+ unlock_chunks(root);
return -EFBIG;
+ }
ptr = super_copy->sys_chunk_array + array_size;
btrfs_cpu_key_to_disk(&disk_key, key);
@@ -4033,6 +4109,8 @@ static int btrfs_add_system_chunk(struct btrfs_root *root,
memcpy(ptr, chunk, item_size);
item_size += sizeof(disk_key);
btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
+ unlock_chunks(root);
+
return 0;
}
@@ -4402,6 +4480,16 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
if (ret)
goto error_del_extent;
+ for (i = 0; i < map->num_stripes; i++) {
+ num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
+ btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
+ }
+
+ spin_lock(&extent_root->fs_info->free_chunk_lock);
+ extent_root->fs_info->free_chunk_space -= (stripe_size *
+ map->num_stripes);
+ spin_unlock(&extent_root->fs_info->free_chunk_lock);
+
free_extent_map(em);
check_raid56_incompat_flag(extent_root->fs_info, type);
@@ -4473,7 +4561,6 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
device = map->stripes[i].dev;
dev_offset = map->stripes[i].physical;
- device->bytes_used += stripe_size;
ret = btrfs_update_device(trans, device);
if (ret)
goto out;
@@ -4486,11 +4573,6 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
goto out;
}
- spin_lock(&extent_root->fs_info->free_chunk_lock);
- extent_root->fs_info->free_chunk_space -= (stripe_size *
- map->num_stripes);
- spin_unlock(&extent_root->fs_info->free_chunk_lock);
-
stripe = &chunk->stripe;
for (i = 0; i < map->num_stripes; i++) {
device = map->stripes[i].dev;
@@ -4570,16 +4652,25 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
alloc_profile);
- if (ret) {
- btrfs_abort_transaction(trans, root, ret);
- goto out;
+ return ret;
+}
+
+static inline int btrfs_chunk_max_errors(struct map_lookup *map)
+{
+ int max_errors;
+
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_DUP)) {
+ max_errors = 1;
+ } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
+ max_errors = 2;
+ } else {
+ max_errors = 0;
}
- ret = btrfs_add_device(trans, fs_info->chunk_root, device);
- if (ret)
- btrfs_abort_transaction(trans, root, ret);
-out:
- return ret;
+ return max_errors;
}
int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
@@ -4588,6 +4679,7 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
struct map_lookup *map;
struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
int readonly = 0;
+ int miss_ndevs = 0;
int i;
read_lock(&map_tree->map_tree.lock);
@@ -4596,18 +4688,27 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
if (!em)
return 1;
- if (btrfs_test_opt(root, DEGRADED)) {
- free_extent_map(em);
- return 0;
- }
-
map = (struct map_lookup *)em->bdev;
for (i = 0; i < map->num_stripes; i++) {
+ if (map->stripes[i].dev->missing) {
+ miss_ndevs++;
+ continue;
+ }
+
if (!map->stripes[i].dev->writeable) {
readonly = 1;
- break;
+ goto end;
}
}
+
+ /*
+ * If the number of missing devices is larger than max errors,
+ * we can not write the data into that chunk successfully, so
+ * set it readonly.
+ */
+ if (miss_ndevs > btrfs_chunk_max_errors(map))
+ readonly = 1;
+end:
free_extent_map(em);
return readonly;
}
@@ -5008,6 +5109,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
num_stripes = min_t(u64, map->num_stripes,
stripe_nr_end - stripe_nr_orig);
stripe_index = do_div(stripe_nr, map->num_stripes);
+ if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
+ mirror_num = 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
num_stripes = map->num_stripes;
@@ -5111,6 +5214,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
/* We distribute the parity blocks across stripes */
tmp = stripe_nr + stripe_index;
stripe_index = do_div(tmp, map->num_stripes);
+ if (!(rw & (REQ_WRITE | REQ_DISCARD |
+ REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
+ mirror_num = 1;
}
} else {
/*
@@ -5218,16 +5324,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
}
}
- if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
- if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10 |
- BTRFS_BLOCK_GROUP_RAID5 |
- BTRFS_BLOCK_GROUP_DUP)) {
- max_errors = 1;
- } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
- max_errors = 2;
- }
- }
+ if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
+ max_errors = btrfs_chunk_max_errors(map);
if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
dev_replace->tgtdev != NULL) {
@@ -5610,8 +5708,8 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
name = rcu_dereference(dev->name);
pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
"(%s id %llu), size=%u\n", rw,
- (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
- name->str, dev->devid, bio->bi_size);
+ (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
+ name->str, dev->devid, bio->bi_iter.bi_size);
rcu_read_unlock();
}
#endif
@@ -5789,10 +5887,10 @@ struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
}
static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
+ struct btrfs_fs_devices *fs_devices,
u64 devid, u8 *dev_uuid)
{
struct btrfs_device *device;
- struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
device = btrfs_alloc_device(NULL, &devid, dev_uuid);
if (IS_ERR(device))
@@ -5929,7 +6027,8 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
}
if (!map->stripes[i].dev) {
map->stripes[i].dev =
- add_missing_dev(root, devid, uuid);
+ add_missing_dev(root, root->fs_info->fs_devices,
+ devid, uuid);
if (!map->stripes[i].dev) {
free_extent_map(em);
return -EIO;
@@ -5956,7 +6055,9 @@ static void fill_device_from_item(struct extent_buffer *leaf,
device->devid = btrfs_device_id(leaf, dev_item);
device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
device->total_bytes = device->disk_total_bytes;
+ device->commit_total_bytes = device->disk_total_bytes;
device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
+ device->commit_bytes_used = device->bytes_used;
device->type = btrfs_device_type(leaf, dev_item);
device->io_align = btrfs_device_io_align(leaf, dev_item);
device->io_width = btrfs_device_io_width(leaf, dev_item);
@@ -5968,7 +6069,8 @@ static void fill_device_from_item(struct extent_buffer *leaf,
read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
}
-static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
+static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
+ u8 *fsid)
{
struct btrfs_fs_devices *fs_devices;
int ret;
@@ -5977,49 +6079,56 @@ static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
fs_devices = root->fs_info->fs_devices->seed;
while (fs_devices) {
- if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
- ret = 0;
- goto out;
- }
+ if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
+ return fs_devices;
+
fs_devices = fs_devices->seed;
}
fs_devices = find_fsid(fsid);
if (!fs_devices) {
- ret = -ENOENT;
- goto out;
+ if (!btrfs_test_opt(root, DEGRADED))
+ return ERR_PTR(-ENOENT);
+
+ fs_devices = alloc_fs_devices(fsid);
+ if (IS_ERR(fs_devices))
+ return fs_devices;
+
+ fs_devices->seeding = 1;
+ fs_devices->opened = 1;
+ return fs_devices;
}
fs_devices = clone_fs_devices(fs_devices);
- if (IS_ERR(fs_devices)) {
- ret = PTR_ERR(fs_devices);
- goto out;
- }
+ if (IS_ERR(fs_devices))
+ return fs_devices;
ret = __btrfs_open_devices(fs_devices, FMODE_READ,
root->fs_info->bdev_holder);
if (ret) {
free_fs_devices(fs_devices);
+ fs_devices = ERR_PTR(ret);
goto out;
}
if (!fs_devices->seeding) {
__btrfs_close_devices(fs_devices);
free_fs_devices(fs_devices);
- ret = -EINVAL;
+ fs_devices = ERR_PTR(-EINVAL);
goto out;
}
fs_devices->seed = root->fs_info->fs_devices->seed;
root->fs_info->fs_devices->seed = fs_devices;
out:
- return ret;
+ return fs_devices;
}
static int read_one_dev(struct btrfs_root *root,
struct extent_buffer *leaf,
struct btrfs_dev_item *dev_item)
{
+ struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
struct btrfs_device *device;
u64 devid;
int ret;
@@ -6033,31 +6142,48 @@ static int read_one_dev(struct btrfs_root *root,
BTRFS_UUID_SIZE);
if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
- ret = open_seed_devices(root, fs_uuid);
- if (ret && !btrfs_test_opt(root, DEGRADED))
- return ret;
+ fs_devices = open_seed_devices(root, fs_uuid);
+ if (IS_ERR(fs_devices))
+ return PTR_ERR(fs_devices);
}
device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
- if (!device || !device->bdev) {
+ if (!device) {
if (!btrfs_test_opt(root, DEGRADED))
return -EIO;
- if (!device) {
- btrfs_warn(root->fs_info, "devid %llu missing", devid);
- device = add_missing_dev(root, devid, dev_uuid);
- if (!device)
- return -ENOMEM;
- } else if (!device->missing) {
+ btrfs_warn(root->fs_info, "devid %llu missing", devid);
+ device = add_missing_dev(root, fs_devices, devid, dev_uuid);
+ if (!device)
+ return -ENOMEM;
+ } else {
+ if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
+ return -EIO;
+
+ if(!device->bdev && !device->missing) {
/*
* this happens when a device that was properly setup
* in the device info lists suddenly goes bad.
* device->bdev is NULL, and so we have to set
* device->missing to one here
*/
- root->fs_info->fs_devices->missing_devices++;
+ device->fs_devices->missing_devices++;
device->missing = 1;
}
+
+ /* Move the device to its own fs_devices */
+ if (device->fs_devices != fs_devices) {
+ ASSERT(device->missing);
+
+ list_move(&device->dev_list, &fs_devices->devices);
+ device->fs_devices->num_devices--;
+ fs_devices->num_devices++;
+
+ device->fs_devices->missing_devices--;
+ fs_devices->missing_devices++;
+
+ device->fs_devices = fs_devices;
+ }
}
if (device->fs_devices != root->fs_info->fs_devices) {
@@ -6373,16 +6499,18 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
struct btrfs_root *dev_root = fs_info->dev_root;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
+ int stats_cnt;
int ret = 0;
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list) {
- if (!device->dev_stats_valid || !device->dev_stats_dirty)
+ if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
continue;
+ stats_cnt = atomic_read(&device->dev_stats_ccnt);
ret = update_dev_stat_item(trans, dev_root, device);
if (!ret)
- device->dev_stats_dirty = 0;
+ atomic_sub(stats_cnt, &device->dev_stats_ccnt);
}
mutex_unlock(&fs_devices->device_list_mutex);
@@ -6481,3 +6609,51 @@ int btrfs_scratch_superblock(struct btrfs_device *device)
return 0;
}
+
+/*
+ * Update the size of all devices, which is used for writing out the
+ * super blocks.
+ */
+void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ struct btrfs_device *curr, *next;
+
+ if (list_empty(&fs_devices->resized_devices))
+ return;
+
+ mutex_lock(&fs_devices->device_list_mutex);
+ lock_chunks(fs_info->dev_root);
+ list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
+ resized_list) {
+ list_del_init(&curr->resized_list);
+ curr->commit_total_bytes = curr->disk_total_bytes;
+ }
+ unlock_chunks(fs_info->dev_root);
+ mutex_unlock(&fs_devices->device_list_mutex);
+}
+
+/* Must be invoked during the transaction commit */
+void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
+ struct btrfs_transaction *transaction)
+{
+ struct extent_map *em;
+ struct map_lookup *map;
+ struct btrfs_device *dev;
+ int i;
+
+ if (list_empty(&transaction->pending_chunks))
+ return;
+
+ /* In order to kick the device replace finish process */
+ lock_chunks(root);
+ list_for_each_entry(em, &transaction->pending_chunks, list) {
+ map = (struct map_lookup *)em->bdev;
+
+ for (i = 0; i < map->num_stripes; i++) {
+ dev = map->stripes[i].dev;
+ dev->commit_bytes_used = dev->bytes_used;
+ }
+ }
+ unlock_chunks(root);
+}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 2aaa00c47816..08980fa23039 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -24,6 +24,8 @@
#include <linux/btrfs.h>
#include "async-thread.h"
+extern struct mutex uuid_mutex;
+
#define BTRFS_STRIPE_LEN (64 * 1024)
struct buffer_head;
@@ -32,41 +34,59 @@ struct btrfs_pending_bios {
struct bio *tail;
};
+/*
+ * Use sequence counter to get consistent device stat data on
+ * 32-bit processors.
+ */
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+#include <linux/seqlock.h>
+#define __BTRFS_NEED_DEVICE_DATA_ORDERED
+#define btrfs_device_data_ordered_init(device) \
+ seqcount_init(&device->data_seqcount)
+#else
+#define btrfs_device_data_ordered_init(device) do { } while (0)
+#endif
+
struct btrfs_device {
struct list_head dev_list;
struct list_head dev_alloc_list;
struct btrfs_fs_devices *fs_devices;
+
struct btrfs_root *dev_root;
+ struct rcu_string *name;
+
+ u64 generation;
+
+ spinlock_t io_lock ____cacheline_aligned;
+ int running_pending;
/* regular prio bios */
struct btrfs_pending_bios pending_bios;
/* WRITE_SYNC bios */
struct btrfs_pending_bios pending_sync_bios;
- u64 generation;
- int running_pending;
+ struct block_device *bdev;
+
+ /* the mode sent to blkdev_get */
+ fmode_t mode;
+
int writeable;
int in_fs_metadata;
int missing;
int can_discard;
int is_tgtdev_for_dev_replace;
- spinlock_t io_lock;
- /* the mode sent to blkdev_get */
- fmode_t mode;
-
- struct block_device *bdev;
-
-
- struct rcu_string *name;
+#ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
+ seqcount_t data_seqcount;
+#endif
/* the internal btrfs device id */
u64 devid;
- /* size of the device */
+ /* size of the device in memory */
u64 total_bytes;
- /* size of the disk */
+ /* size of the device on disk */
u64 disk_total_bytes;
/* bytes used */
@@ -83,10 +103,26 @@ struct btrfs_device {
/* minimal io size for this device */
u32 sector_size;
-
/* physical drive uuid (or lvm uuid) */
u8 uuid[BTRFS_UUID_SIZE];
+ /*
+ * size of the device on the current transaction
+ *
+ * This variant is update when committing the transaction,
+ * and protected by device_list_mutex
+ */
+ u64 commit_total_bytes;
+
+ /* bytes used on the current transaction */
+ u64 commit_bytes_used;
+ /*
+ * used to manage the device which is resized
+ *
+ * It is protected by chunk_lock.
+ */
+ struct list_head resized_list;
+
/* for sending down flush barriers */
int nobarriers;
struct bio *flush_bio;
@@ -107,26 +143,90 @@ struct btrfs_device {
struct radix_tree_root reada_zones;
struct radix_tree_root reada_extents;
-
/* disk I/O failure stats. For detailed description refer to
* enum btrfs_dev_stat_values in ioctl.h */
int dev_stats_valid;
- int dev_stats_dirty; /* counters need to be written to disk */
+
+ /* Counter to record the change of device stats */
+ atomic_t dev_stats_ccnt;
atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
};
+/*
+ * If we read those variants at the context of their own lock, we needn't
+ * use the following helpers, reading them directly is safe.
+ */
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+#define BTRFS_DEVICE_GETSET_FUNCS(name) \
+static inline u64 \
+btrfs_device_get_##name(const struct btrfs_device *dev) \
+{ \
+ u64 size; \
+ unsigned int seq; \
+ \
+ do { \
+ seq = read_seqcount_begin(&dev->data_seqcount); \
+ size = dev->name; \
+ } while (read_seqcount_retry(&dev->data_seqcount, seq)); \
+ return size; \
+} \
+ \
+static inline void \
+btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
+{ \
+ preempt_disable(); \
+ write_seqcount_begin(&dev->data_seqcount); \
+ dev->name = size; \
+ write_seqcount_end(&dev->data_seqcount); \
+ preempt_enable(); \
+}
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+#define BTRFS_DEVICE_GETSET_FUNCS(name) \
+static inline u64 \
+btrfs_device_get_##name(const struct btrfs_device *dev) \
+{ \
+ u64 size; \
+ \
+ preempt_disable(); \
+ size = dev->name; \
+ preempt_enable(); \
+ return size; \
+} \
+ \
+static inline void \
+btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
+{ \
+ preempt_disable(); \
+ dev->name = size; \
+ preempt_enable(); \
+}
+#else
+#define BTRFS_DEVICE_GETSET_FUNCS(name) \
+static inline u64 \
+btrfs_device_get_##name(const struct btrfs_device *dev) \
+{ \
+ return dev->name; \
+} \
+ \
+static inline void \
+btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
+{ \
+ dev->name = size; \
+}
+#endif
+
+BTRFS_DEVICE_GETSET_FUNCS(total_bytes);
+BTRFS_DEVICE_GETSET_FUNCS(disk_total_bytes);
+BTRFS_DEVICE_GETSET_FUNCS(bytes_used);
+
struct btrfs_fs_devices {
u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
- /* the device with this id has the most recent copy of the super */
- u64 latest_devid;
- u64 latest_trans;
u64 num_devices;
u64 open_devices;
u64 rw_devices;
u64 missing_devices;
u64 total_rw_bytes;
- u64 num_can_discard;
u64 total_devices;
struct block_device *latest_bdev;
@@ -139,6 +239,7 @@ struct btrfs_fs_devices {
struct mutex device_list_mutex;
struct list_head devices;
+ struct list_head resized_devices;
/* devices not currently being allocated */
struct list_head alloc_list;
struct list_head list;
@@ -167,8 +268,9 @@ struct btrfs_fs_devices {
*/
typedef void (btrfs_io_bio_end_io_t) (struct btrfs_io_bio *bio, int err);
struct btrfs_io_bio {
- unsigned long mirror_num;
- unsigned long stripe_index;
+ unsigned int mirror_num;
+ unsigned int stripe_index;
+ u64 logical;
u8 *csum;
u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
u8 *csum_allocated;
@@ -325,6 +427,7 @@ struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
int btrfs_init_new_device(struct btrfs_root *root, char *path);
int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
+ struct btrfs_device *srcdev,
struct btrfs_device **device_out);
int btrfs_balance(struct btrfs_balance_control *bctl,
struct btrfs_ioctl_balance_args *bargs);
@@ -360,11 +463,20 @@ unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root,
u64 chunk_offset, u64 chunk_size);
+int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 chunk_offset);
+
+static inline int btrfs_dev_stats_dirty(struct btrfs_device *dev)
+{
+ return atomic_read(&dev->dev_stats_ccnt);
+}
+
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
int index)
{
atomic_inc(dev->dev_stat_values + index);
- dev->dev_stats_dirty = 1;
+ smp_mb__before_atomic();
+ atomic_inc(&dev->dev_stats_ccnt);
}
static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
@@ -379,7 +491,8 @@ static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
int ret;
ret = atomic_xchg(dev->dev_stat_values + index, 0);
- dev->dev_stats_dirty = 1;
+ smp_mb__before_atomic();
+ atomic_inc(&dev->dev_stats_ccnt);
return ret;
}
@@ -387,7 +500,8 @@ static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
int index, unsigned long val)
{
atomic_set(dev->dev_stat_values + index, val);
- dev->dev_stats_dirty = 1;
+ smp_mb__before_atomic();
+ atomic_inc(&dev->dev_stats_ccnt);
}
static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
@@ -395,4 +509,8 @@ static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
{
btrfs_dev_stat_set(dev, index, 0);
}
+
+void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info);
+void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
+ struct btrfs_transaction *transaction);
#endif
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index ad8328d797ea..dcf20131fbe4 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -237,7 +237,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
* first xattr that we find and walk forward
*/
key.objectid = btrfs_ino(inode);
- btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
+ key.type = BTRFS_XATTR_ITEM_KEY;
key.offset = 0;
path = btrfs_alloc_path();
@@ -273,7 +273,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
/* check to make sure this item is what we want */
if (found_key.objectid != key.objectid)
break;
- if (btrfs_key_type(&found_key) != BTRFS_XATTR_ITEM_KEY)
+ if (found_key.type != BTRFS_XATTR_ITEM_KEY)
break;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index b67d8fc81277..759fa4e2de8f 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -33,8 +33,7 @@
#include "compression.h"
struct workspace {
- z_stream inf_strm;
- z_stream def_strm;
+ z_stream strm;
char *buf;
struct list_head list;
};
@@ -43,8 +42,7 @@ static void zlib_free_workspace(struct list_head *ws)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
- vfree(workspace->def_strm.workspace);
- vfree(workspace->inf_strm.workspace);
+ vfree(workspace->strm.workspace);
kfree(workspace->buf);
kfree(workspace);
}
@@ -52,17 +50,17 @@ static void zlib_free_workspace(struct list_head *ws)
static struct list_head *zlib_alloc_workspace(void)
{
struct workspace *workspace;
+ int workspacesize;
workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
if (!workspace)
return ERR_PTR(-ENOMEM);
- workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize(
- MAX_WBITS, MAX_MEM_LEVEL));
- workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
+ workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
+ zlib_inflate_workspacesize());
+ workspace->strm.workspace = vmalloc(workspacesize);
workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS);
- if (!workspace->def_strm.workspace ||
- !workspace->inf_strm.workspace || !workspace->buf)
+ if (!workspace->strm.workspace || !workspace->buf)
goto fail;
INIT_LIST_HEAD(&workspace->list);
@@ -96,14 +94,14 @@ static int zlib_compress_pages(struct list_head *ws,
*total_out = 0;
*total_in = 0;
- if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) {
+ if (Z_OK != zlib_deflateInit(&workspace->strm, 3)) {
printk(KERN_WARNING "BTRFS: deflateInit failed\n");
ret = -EIO;
goto out;
}
- workspace->def_strm.total_in = 0;
- workspace->def_strm.total_out = 0;
+ workspace->strm.total_in = 0;
+ workspace->strm.total_out = 0;
in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
data_in = kmap(in_page);
@@ -117,25 +115,25 @@ static int zlib_compress_pages(struct list_head *ws,
pages[0] = out_page;
nr_pages = 1;
- workspace->def_strm.next_in = data_in;
- workspace->def_strm.next_out = cpage_out;
- workspace->def_strm.avail_out = PAGE_CACHE_SIZE;
- workspace->def_strm.avail_in = min(len, PAGE_CACHE_SIZE);
+ workspace->strm.next_in = data_in;
+ workspace->strm.next_out = cpage_out;
+ workspace->strm.avail_out = PAGE_CACHE_SIZE;
+ workspace->strm.avail_in = min(len, PAGE_CACHE_SIZE);
- while (workspace->def_strm.total_in < len) {
- ret = zlib_deflate(&workspace->def_strm, Z_SYNC_FLUSH);
+ while (workspace->strm.total_in < len) {
+ ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
if (ret != Z_OK) {
printk(KERN_DEBUG "BTRFS: deflate in loop returned %d\n",
ret);
- zlib_deflateEnd(&workspace->def_strm);
+ zlib_deflateEnd(&workspace->strm);
ret = -EIO;
goto out;
}
/* we're making it bigger, give up */
- if (workspace->def_strm.total_in > 8192 &&
- workspace->def_strm.total_in <
- workspace->def_strm.total_out) {
+ if (workspace->strm.total_in > 8192 &&
+ workspace->strm.total_in <
+ workspace->strm.total_out) {
ret = -E2BIG;
goto out;
}
@@ -143,7 +141,7 @@ static int zlib_compress_pages(struct list_head *ws,
* before the total_in so we will pull in a new page for
* the stream end if required
*/
- if (workspace->def_strm.avail_out == 0) {
+ if (workspace->strm.avail_out == 0) {
kunmap(out_page);
if (nr_pages == nr_dest_pages) {
out_page = NULL;
@@ -158,19 +156,19 @@ static int zlib_compress_pages(struct list_head *ws,
cpage_out = kmap(out_page);
pages[nr_pages] = out_page;
nr_pages++;
- workspace->def_strm.avail_out = PAGE_CACHE_SIZE;
- workspace->def_strm.next_out = cpage_out;
+ workspace->strm.avail_out = PAGE_CACHE_SIZE;
+ workspace->strm.next_out = cpage_out;
}
/* we're all done */
- if (workspace->def_strm.total_in >= len)
+ if (workspace->strm.total_in >= len)
break;
/* we've read in a full page, get a new one */
- if (workspace->def_strm.avail_in == 0) {
- if (workspace->def_strm.total_out > max_out)
+ if (workspace->strm.avail_in == 0) {
+ if (workspace->strm.total_out > max_out)
break;
- bytes_left = len - workspace->def_strm.total_in;
+ bytes_left = len - workspace->strm.total_in;
kunmap(in_page);
page_cache_release(in_page);
@@ -178,28 +176,28 @@ static int zlib_compress_pages(struct list_head *ws,
in_page = find_get_page(mapping,
start >> PAGE_CACHE_SHIFT);
data_in = kmap(in_page);
- workspace->def_strm.avail_in = min(bytes_left,
+ workspace->strm.avail_in = min(bytes_left,
PAGE_CACHE_SIZE);
- workspace->def_strm.next_in = data_in;
+ workspace->strm.next_in = data_in;
}
}
- workspace->def_strm.avail_in = 0;
- ret = zlib_deflate(&workspace->def_strm, Z_FINISH);
- zlib_deflateEnd(&workspace->def_strm);
+ workspace->strm.avail_in = 0;
+ ret = zlib_deflate(&workspace->strm, Z_FINISH);
+ zlib_deflateEnd(&workspace->strm);
if (ret != Z_STREAM_END) {
ret = -EIO;
goto out;
}
- if (workspace->def_strm.total_out >= workspace->def_strm.total_in) {
+ if (workspace->strm.total_out >= workspace->strm.total_in) {
ret = -E2BIG;
goto out;
}
ret = 0;
- *total_out = workspace->def_strm.total_out;
- *total_in = workspace->def_strm.total_in;
+ *total_out = workspace->strm.total_out;
+ *total_in = workspace->strm.total_in;
out:
*out_pages = nr_pages;
if (out_page)
@@ -225,19 +223,18 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
size_t total_out = 0;
unsigned long page_in_index = 0;
unsigned long page_out_index = 0;
- unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) /
- PAGE_CACHE_SIZE;
+ unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE);
unsigned long buf_start;
unsigned long pg_offset;
data_in = kmap(pages_in[page_in_index]);
- workspace->inf_strm.next_in = data_in;
- workspace->inf_strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE);
- workspace->inf_strm.total_in = 0;
+ workspace->strm.next_in = data_in;
+ workspace->strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE);
+ workspace->strm.total_in = 0;
- workspace->inf_strm.total_out = 0;
- workspace->inf_strm.next_out = workspace->buf;
- workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
+ workspace->strm.total_out = 0;
+ workspace->strm.next_out = workspace->buf;
+ workspace->strm.avail_out = PAGE_CACHE_SIZE;
pg_offset = 0;
/* If it's deflate, and it's got no preset dictionary, then
@@ -247,21 +244,21 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
!(((data_in[0]<<8) + data_in[1]) % 31)) {
wbits = -((data_in[0] >> 4) + 8);
- workspace->inf_strm.next_in += 2;
- workspace->inf_strm.avail_in -= 2;
+ workspace->strm.next_in += 2;
+ workspace->strm.avail_in -= 2;
}
- if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) {
+ if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
printk(KERN_WARNING "BTRFS: inflateInit failed\n");
return -EIO;
}
- while (workspace->inf_strm.total_in < srclen) {
- ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH);
+ while (workspace->strm.total_in < srclen) {
+ ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
if (ret != Z_OK && ret != Z_STREAM_END)
break;
buf_start = total_out;
- total_out = workspace->inf_strm.total_out;
+ total_out = workspace->strm.total_out;
/* we didn't make progress in this inflate call, we're done */
if (buf_start == total_out)
@@ -276,10 +273,10 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
goto done;
}
- workspace->inf_strm.next_out = workspace->buf;
- workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
+ workspace->strm.next_out = workspace->buf;
+ workspace->strm.avail_out = PAGE_CACHE_SIZE;
- if (workspace->inf_strm.avail_in == 0) {
+ if (workspace->strm.avail_in == 0) {
unsigned long tmp;
kunmap(pages_in[page_in_index]);
page_in_index++;
@@ -288,9 +285,9 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
break;
}
data_in = kmap(pages_in[page_in_index]);
- workspace->inf_strm.next_in = data_in;
- tmp = srclen - workspace->inf_strm.total_in;
- workspace->inf_strm.avail_in = min(tmp,
+ workspace->strm.next_in = data_in;
+ tmp = srclen - workspace->strm.total_in;
+ workspace->strm.avail_in = min(tmp,
PAGE_CACHE_SIZE);
}
}
@@ -299,7 +296,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
else
ret = 0;
done:
- zlib_inflateEnd(&workspace->inf_strm);
+ zlib_inflateEnd(&workspace->strm);
if (data_in)
kunmap(pages_in[page_in_index]);
return ret;
@@ -317,13 +314,13 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
unsigned long total_out = 0;
char *kaddr;
- workspace->inf_strm.next_in = data_in;
- workspace->inf_strm.avail_in = srclen;
- workspace->inf_strm.total_in = 0;
+ workspace->strm.next_in = data_in;
+ workspace->strm.avail_in = srclen;
+ workspace->strm.total_in = 0;
- workspace->inf_strm.next_out = workspace->buf;
- workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
- workspace->inf_strm.total_out = 0;
+ workspace->strm.next_out = workspace->buf;
+ workspace->strm.avail_out = PAGE_CACHE_SIZE;
+ workspace->strm.total_out = 0;
/* If it's deflate, and it's got no preset dictionary, then
we can tell zlib to skip the adler32 check. */
if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
@@ -331,11 +328,11 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
!(((data_in[0]<<8) + data_in[1]) % 31)) {
wbits = -((data_in[0] >> 4) + 8);
- workspace->inf_strm.next_in += 2;
- workspace->inf_strm.avail_in -= 2;
+ workspace->strm.next_in += 2;
+ workspace->strm.avail_in -= 2;
}
- if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) {
+ if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
printk(KERN_WARNING "BTRFS: inflateInit failed\n");
return -EIO;
}
@@ -346,12 +343,12 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
unsigned long bytes;
unsigned long pg_offset = 0;
- ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH);
+ ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
if (ret != Z_OK && ret != Z_STREAM_END)
break;
buf_start = total_out;
- total_out = workspace->inf_strm.total_out;
+ total_out = workspace->strm.total_out;
if (total_out == buf_start) {
ret = -EIO;
@@ -377,8 +374,8 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
pg_offset += bytes;
bytes_left -= bytes;
next:
- workspace->inf_strm.next_out = workspace->buf;
- workspace->inf_strm.avail_out = PAGE_CACHE_SIZE;
+ workspace->strm.next_out = workspace->buf;
+ workspace->strm.avail_out = PAGE_CACHE_SIZE;
}
if (ret != Z_STREAM_END && bytes_left != 0)
@@ -386,7 +383,7 @@ next:
else
ret = 0;
- zlib_inflateEnd(&workspace->inf_strm);
+ zlib_inflateEnd(&workspace->strm);
return ret;
}
diff --git a/fs/buffer.c b/fs/buffer.c
index 44c14a87750e..d1f704806264 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2318,6 +2318,11 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
err = 0;
balance_dirty_pages_ratelimited(mapping);
+
+ if (unlikely(fatal_signal_pending(current))) {
+ err = -EINTR;
+ goto out;
+ }
}
/* page covers the boundary, find the boundary offset */
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 25e745b8eb1b..616db0e77b44 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -880,7 +880,6 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
{
struct cachefiles_object *object;
struct cachefiles_cache *cache;
- mm_segment_t old_fs;
struct file *file;
struct path path;
loff_t pos, eof;
@@ -914,36 +913,27 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
if (IS_ERR(file)) {
ret = PTR_ERR(file);
} else {
- ret = -EIO;
- if (file->f_op->write) {
- pos = (loff_t) page->index << PAGE_SHIFT;
-
- /* we mustn't write more data than we have, so we have
- * to beware of a partial page at EOF */
- eof = object->fscache.store_limit_l;
- len = PAGE_SIZE;
- if (eof & ~PAGE_MASK) {
- ASSERTCMP(pos, <, eof);
- if (eof - pos < PAGE_SIZE) {
- _debug("cut short %llx to %llx",
- pos, eof);
- len = eof - pos;
- ASSERTCMP(pos + len, ==, eof);
- }
+ pos = (loff_t) page->index << PAGE_SHIFT;
+
+ /* we mustn't write more data than we have, so we have
+ * to beware of a partial page at EOF */
+ eof = object->fscache.store_limit_l;
+ len = PAGE_SIZE;
+ if (eof & ~PAGE_MASK) {
+ ASSERTCMP(pos, <, eof);
+ if (eof - pos < PAGE_SIZE) {
+ _debug("cut short %llx to %llx",
+ pos, eof);
+ len = eof - pos;
+ ASSERTCMP(pos + len, ==, eof);
}
-
- data = kmap(page);
- file_start_write(file);
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = file->f_op->write(
- file, (const void __user *) data, len, &pos);
- set_fs(old_fs);
- kunmap(page);
- file_end_write(file);
- if (ret != len)
- ret = -EIO;
}
+
+ data = kmap(page);
+ ret = __kernel_write(file, data, len, &pos);
+ kunmap(page);
+ if (ret != len)
+ ret = -EIO;
fput(file);
}
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index c29d6ae68874..b6c59eaa4f64 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1069,7 +1069,6 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
ceph_dentry_lru_touch(dentry);
} else {
ceph_dir_clear_complete(dir);
- d_drop(dentry);
}
iput(dir);
return valid;
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index a3e932547617..f4cf200b3c76 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -62,7 +62,6 @@ cifs_spnego_key_destroy(struct key *key)
struct key_type cifs_spnego_key_type = {
.name = "cifs.spnego",
.instantiate = cifs_spnego_key_instantiate,
- .match = user_match,
.destroy = cifs_spnego_key_destroy,
.describe = user_describe,
};
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 7ff866dbb89e..6d00c419cbae 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -84,7 +84,6 @@ static struct key_type cifs_idmap_key_type = {
.instantiate = cifs_idmap_key_instantiate,
.destroy = cifs_idmap_key_destroy,
.describe = user_describe,
- .match = user_match,
};
static char *
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 889b98455750..9d7996e8e793 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -813,7 +813,8 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
return generic_file_llseek(file, offset, whence);
}
-static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
+static int
+cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
{
/*
* Note that this is called by vfs setlease with i_lock held to
@@ -829,7 +830,7 @@ static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
if (arg == F_UNLCK ||
((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
- return generic_setlease(file, arg, lease);
+ return generic_setlease(file, arg, lease, priv);
else if (tlink_tcon(cfile->tlink)->local_lease &&
!CIFS_CACHE_READ(CIFS_I(inode)))
/*
@@ -840,7 +841,7 @@ static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
* knows that the file won't be changed on the server by anyone
* else.
*/
- return generic_setlease(file, arg, lease);
+ return generic_setlease(file, arg, lease, priv);
else
return -EAGAIN;
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 6cbd9c688cfe..073640675a39 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -461,8 +461,8 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
xid = get_xid();
- cifs_dbg(FYI, "parent inode = 0x%p name is: %s and dentry = 0x%p\n",
- inode, direntry->d_name.name, direntry);
+ cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
+ inode, direntry, direntry);
tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
if (IS_ERR(tlink)) {
@@ -540,8 +540,8 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
struct cifs_fid fid;
__u32 oplock;
- cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p\n",
- inode, direntry->d_name.name, direntry);
+ cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
+ inode, direntry, direntry);
tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
rc = PTR_ERR(tlink);
@@ -713,8 +713,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
xid = get_xid();
- cifs_dbg(FYI, "parent inode = 0x%p name is: %s and dentry = 0x%p\n",
- parent_dir_inode, direntry->d_name.name, direntry);
+ cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
+ parent_dir_inode, direntry, direntry);
/* check whether path exists */
@@ -833,7 +833,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
{
int rc = 0;
- cifs_dbg(FYI, "In cifs d_delete, name = %s\n", direntry->d_name.name);
+ cifs_dbg(FYI, "In cifs d_delete, name = %pd\n", direntry);
return rc;
} */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 5f29354b072a..8f7b40fd8f3b 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1650,8 +1650,8 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
cifs_sb = CIFS_SB(dentry->d_sb);
- cifs_dbg(FYI, "write %zd bytes to offset %lld of %s\n",
- write_size, *offset, dentry->d_name.name);
+ cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
+ write_size, *offset, dentry);
tcon = tlink_tcon(open_file->tlink);
server = tcon->ses->server;
@@ -2273,8 +2273,8 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
xid = get_xid();
- cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
- file->f_path.dentry->d_name.name, datasync);
+ cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
+ file, datasync);
if (!CIFS_CACHE_READ(CIFS_I(inode))) {
rc = cifs_zap_mapping(inode);
@@ -2315,8 +2315,8 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
xid = get_xid();
- cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
- file->f_path.dentry->d_name.name, datasync);
+ cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
+ file, datasync);
tcon = tlink_tcon(smbfile->tlink);
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 7899a40465b3..8fd4ee8e07ff 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1419,8 +1419,8 @@ cifs_posix_mkdir(struct inode *inode, struct dentry *dentry, umode_t mode,
d_instantiate(dentry, newinode);
#ifdef CONFIG_CIFS_DEBUG2
- cifs_dbg(FYI, "instantiated dentry %p %s to inode %p\n",
- dentry, dentry->d_name.name, newinode);
+ cifs_dbg(FYI, "instantiated dentry %p %pd to inode %p\n",
+ dentry, dentry, newinode);
if (newinode->i_nlink != 2)
cifs_dbg(FYI, "unexpected number of links %d\n",
@@ -2111,8 +2111,8 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
struct cifs_unix_set_info_args *args = NULL;
struct cifsFileInfo *open_file;
- cifs_dbg(FYI, "setattr_unix on file %s attrs->ia_valid=0x%x\n",
- direntry->d_name.name, attrs->ia_valid);
+ cifs_dbg(FYI, "setattr_unix on file %pd attrs->ia_valid=0x%x\n",
+ direntry, attrs->ia_valid);
xid = get_xid();
@@ -2254,8 +2254,8 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
xid = get_xid();
- cifs_dbg(FYI, "setattr on file %s attrs->iavalid 0x%x\n",
- direntry->d_name.name, attrs->ia_valid);
+ cifs_dbg(FYI, "setattr on file %pd attrs->iavalid 0x%x\n",
+ direntry, attrs->ia_valid);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
attrs->ia_valid |= ATTR_FORCE;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index b334a89d6a66..d2141f101382 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -87,8 +87,6 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
return;
if (dentry) {
- int err;
-
inode = dentry->d_inode;
if (inode) {
/*
@@ -105,10 +103,8 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
goto out;
}
}
- err = d_invalidate(dentry);
+ d_invalidate(dentry);
dput(dentry);
- if (err)
- return;
}
/*
diff --git a/fs/compat.c b/fs/compat.c
index 66d3d3c6b4b2..b13df99f3534 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -794,25 +794,21 @@ COMPAT_SYSCALL_DEFINE5(mount, const char __user *, dev_name,
char *kernel_type;
unsigned long data_page;
char *kernel_dev;
- struct filename *dir;
int retval;
- retval = copy_mount_string(type, &kernel_type);
- if (retval < 0)
+ kernel_type = copy_mount_string(type);
+ retval = PTR_ERR(kernel_type);
+ if (IS_ERR(kernel_type))
goto out;
- dir = getname(dir_name);
- retval = PTR_ERR(dir);
- if (IS_ERR(dir))
+ kernel_dev = copy_mount_string(dev_name);
+ retval = PTR_ERR(kernel_dev);
+ if (IS_ERR(kernel_dev))
goto out1;
- retval = copy_mount_string(dev_name, &kernel_dev);
- if (retval < 0)
- goto out2;
-
retval = copy_mount_options(data, &data_page);
if (retval < 0)
- goto out3;
+ goto out2;
retval = -EINVAL;
@@ -821,19 +817,17 @@ COMPAT_SYSCALL_DEFINE5(mount, const char __user *, dev_name,
do_ncp_super_data_conv((void *)data_page);
} else if (!strcmp(kernel_type, NFS4_NAME)) {
if (do_nfs4_super_data_conv((void *) data_page))
- goto out4;
+ goto out3;
}
}
- retval = do_mount(kernel_dev, dir->name, kernel_type,
+ retval = do_mount(kernel_dev, dir_name, kernel_type,
flags, (void*)data_page);
- out4:
- free_page(data_page);
out3:
- kfree(kernel_dev);
+ free_page(data_page);
out2:
- putname(dir);
+ kfree(kernel_dev);
out1:
kfree(kernel_type);
out:
diff --git a/fs/dcache.c b/fs/dcache.c
index cb25a1a5e307..d5a23fd0da90 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -235,18 +235,49 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
return dentry_string_cmp(cs, ct, tcount);
}
+struct external_name {
+ union {
+ atomic_t count;
+ struct rcu_head head;
+ } u;
+ unsigned char name[];
+};
+
+static inline struct external_name *external_name(struct dentry *dentry)
+{
+ return container_of(dentry->d_name.name, struct external_name, name[0]);
+}
+
static void __d_free(struct rcu_head *head)
{
struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
WARN_ON(!hlist_unhashed(&dentry->d_alias));
- if (dname_external(dentry))
- kfree(dentry->d_name.name);
kmem_cache_free(dentry_cache, dentry);
}
+static void __d_free_external(struct rcu_head *head)
+{
+ struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
+ WARN_ON(!hlist_unhashed(&dentry->d_alias));
+ kfree(external_name(dentry));
+ kmem_cache_free(dentry_cache, dentry);
+}
+
+static inline int dname_external(const struct dentry *dentry)
+{
+ return dentry->d_name.name != dentry->d_iname;
+}
+
static void dentry_free(struct dentry *dentry)
{
+ if (unlikely(dname_external(dentry))) {
+ struct external_name *p = external_name(dentry);
+ if (likely(atomic_dec_and_test(&p->u.count))) {
+ call_rcu(&dentry->d_u.d_rcu, __d_free_external);
+ return;
+ }
+ }
/* if dentry was never visible to RCU, immediate free is OK */
if (!(dentry->d_flags & DCACHE_RCUACCESS))
__d_free(&dentry->d_u.d_rcu);
@@ -456,7 +487,7 @@ static void __dentry_kill(struct dentry *dentry)
* inform the fs via d_prune that this dentry is about to be
* unhashed and destroyed.
*/
- if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
+ if (dentry->d_flags & DCACHE_OP_PRUNE)
dentry->d_op->d_prune(dentry);
if (dentry->d_flags & DCACHE_LRU_LIST) {
@@ -619,62 +650,6 @@ kill_it:
}
EXPORT_SYMBOL(dput);
-/**
- * d_invalidate - invalidate a dentry
- * @dentry: dentry to invalidate
- *
- * Try to invalidate the dentry if it turns out to be
- * possible. If there are other dentries that can be
- * reached through this one we can't delete it and we
- * return -EBUSY. On success we return 0.
- *
- * no dcache lock.
- */
-
-int d_invalidate(struct dentry * dentry)
-{
- /*
- * If it's already been dropped, return OK.
- */
- spin_lock(&dentry->d_lock);
- if (d_unhashed(dentry)) {
- spin_unlock(&dentry->d_lock);
- return 0;
- }
- /*
- * Check whether to do a partial shrink_dcache
- * to get rid of unused child entries.
- */
- if (!list_empty(&dentry->d_subdirs)) {
- spin_unlock(&dentry->d_lock);
- shrink_dcache_parent(dentry);
- spin_lock(&dentry->d_lock);
- }
-
- /*
- * Somebody else still using it?
- *
- * If it's a directory, we can't drop it
- * for fear of somebody re-populating it
- * with children (even though dropping it
- * would make it unreachable from the root,
- * we might still populate it if it was a
- * working directory or similar).
- * We also need to leave mountpoints alone,
- * directory or not.
- */
- if (dentry->d_lockref.count > 1 && dentry->d_inode) {
- if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
- spin_unlock(&dentry->d_lock);
- return -EBUSY;
- }
- }
-
- __d_drop(dentry);
- spin_unlock(&dentry->d_lock);
- return 0;
-}
-EXPORT_SYMBOL(d_invalidate);
/* This must be called with d_lock held */
static inline void __dget_dlock(struct dentry *dentry)
@@ -735,7 +710,8 @@ EXPORT_SYMBOL(dget_parent);
* acquire the reference to alias and return it. Otherwise return NULL.
* Notice that if inode is a directory there can be only one alias and
* it can be unhashed only if it has no children, or if it is the root
- * of a filesystem.
+ * of a filesystem, or if the directory was renamed and d_revalidate
+ * was the first vfs operation to notice.
*
* If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
* any other hashed alias over that one.
@@ -799,20 +775,13 @@ restart:
hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
spin_lock(&dentry->d_lock);
if (!dentry->d_lockref.count) {
- /*
- * inform the fs via d_prune that this dentry
- * is about to be unhashed and destroyed.
- */
- if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
- !d_unhashed(dentry))
- dentry->d_op->d_prune(dentry);
-
- __dget_dlock(dentry);
- __d_drop(dentry);
- spin_unlock(&dentry->d_lock);
- spin_unlock(&inode->i_lock);
- dput(dentry);
- goto restart;
+ struct dentry *parent = lock_parent(dentry);
+ if (likely(!dentry->d_lockref.count)) {
+ __dentry_kill(dentry);
+ goto restart;
+ }
+ if (parent)
+ spin_unlock(&parent->d_lock);
}
spin_unlock(&dentry->d_lock);
}
@@ -1193,7 +1162,7 @@ EXPORT_SYMBOL(have_submounts);
* reachable (e.g. NFS can unhash a directory dentry and then the complete
* subtree can become unreachable).
*
- * Only one of check_submounts_and_drop() and d_set_mounted() must succeed. For
+ * Only one of d_invalidate() and d_set_mounted() must succeed. For
* this reason take rename_lock and d_lock on dentry and ancestors.
*/
int d_set_mounted(struct dentry *dentry)
@@ -1202,7 +1171,7 @@ int d_set_mounted(struct dentry *dentry)
int ret = -ENOENT;
write_seqlock(&rename_lock);
for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
- /* Need exclusion wrt. check_submounts_and_drop() */
+ /* Need exclusion wrt. d_invalidate() */
spin_lock(&p->d_lock);
if (unlikely(d_unhashed(p))) {
spin_unlock(&p->d_lock);
@@ -1346,70 +1315,84 @@ void shrink_dcache_for_umount(struct super_block *sb)
}
}
-static enum d_walk_ret check_and_collect(void *_data, struct dentry *dentry)
+struct detach_data {
+ struct select_data select;
+ struct dentry *mountpoint;
+};
+static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
{
- struct select_data *data = _data;
+ struct detach_data *data = _data;
if (d_mountpoint(dentry)) {
- data->found = -EBUSY;
+ __dget_dlock(dentry);
+ data->mountpoint = dentry;
return D_WALK_QUIT;
}
- return select_collect(_data, dentry);
+ return select_collect(&data->select, dentry);
}
static void check_and_drop(void *_data)
{
- struct select_data *data = _data;
+ struct detach_data *data = _data;
- if (d_mountpoint(data->start))
- data->found = -EBUSY;
- if (!data->found)
- __d_drop(data->start);
+ if (!data->mountpoint && !data->select.found)
+ __d_drop(data->select.start);
}
/**
- * check_submounts_and_drop - prune dcache, check for submounts and drop
+ * d_invalidate - detach submounts, prune dcache, and drop
+ * @dentry: dentry to invalidate (aka detach, prune and drop)
*
- * All done as a single atomic operation relative to has_unlinked_ancestor().
- * Returns 0 if successfully unhashed @parent. If there were submounts then
- * return -EBUSY.
+ * no dcache lock.
*
- * @dentry: dentry to prune and drop
+ * The final d_drop is done as an atomic operation relative to
+ * rename_lock ensuring there are no races with d_set_mounted. This
+ * ensures there are no unhashed dentries on the path to a mountpoint.
*/
-int check_submounts_and_drop(struct dentry *dentry)
+void d_invalidate(struct dentry *dentry)
{
- int ret = 0;
+ /*
+ * If it's already been dropped, return OK.
+ */
+ spin_lock(&dentry->d_lock);
+ if (d_unhashed(dentry)) {
+ spin_unlock(&dentry->d_lock);
+ return;
+ }
+ spin_unlock(&dentry->d_lock);
/* Negative dentries can be dropped without further checks */
if (!dentry->d_inode) {
d_drop(dentry);
- goto out;
+ return;
}
for (;;) {
- struct select_data data;
+ struct detach_data data;
- INIT_LIST_HEAD(&data.dispose);
- data.start = dentry;
- data.found = 0;
+ data.mountpoint = NULL;
+ INIT_LIST_HEAD(&data.select.dispose);
+ data.select.start = dentry;
+ data.select.found = 0;
+
+ d_walk(dentry, &data, detach_and_collect, check_and_drop);
- d_walk(dentry, &data, check_and_collect, check_and_drop);
- ret = data.found;
+ if (data.select.found)
+ shrink_dentry_list(&data.select.dispose);
- if (!list_empty(&data.dispose))
- shrink_dentry_list(&data.dispose);
+ if (data.mountpoint) {
+ detach_mounts(data.mountpoint);
+ dput(data.mountpoint);
+ }
- if (ret <= 0)
+ if (!data.mountpoint && !data.select.found)
break;
cond_resched();
}
-
-out:
- return ret;
}
-EXPORT_SYMBOL(check_submounts_and_drop);
+EXPORT_SYMBOL(d_invalidate);
/**
* __d_alloc - allocate a dcache entry
@@ -1438,11 +1421,14 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
*/
dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
if (name->len > DNAME_INLINE_LEN-1) {
- dname = kmalloc(name->len + 1, GFP_KERNEL);
- if (!dname) {
+ size_t size = offsetof(struct external_name, name[1]);
+ struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
+ if (!p) {
kmem_cache_free(dentry_cache, dentry);
return NULL;
}
+ atomic_set(&p->u.count, 1);
+ dname = p->name;
} else {
dname = dentry->d_iname;
}
@@ -2112,10 +2098,10 @@ struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
struct dentry *dentry;
unsigned seq;
- do {
- seq = read_seqbegin(&rename_lock);
- dentry = __d_lookup(parent, name);
- if (dentry)
+ do {
+ seq = read_seqbegin(&rename_lock);
+ dentry = __d_lookup(parent, name);
+ if (dentry)
break;
} while (read_seqretry(&rename_lock, seq));
return dentry;
@@ -2372,11 +2358,10 @@ void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
}
EXPORT_SYMBOL(dentry_update_name_case);
-static void switch_names(struct dentry *dentry, struct dentry *target,
- bool exchange)
+static void swap_names(struct dentry *dentry, struct dentry *target)
{
- if (dname_external(target)) {
- if (dname_external(dentry)) {
+ if (unlikely(dname_external(target))) {
+ if (unlikely(dname_external(dentry))) {
/*
* Both external: swap the pointers
*/
@@ -2392,7 +2377,7 @@ static void switch_names(struct dentry *dentry, struct dentry *target,
target->d_name.name = target->d_iname;
}
} else {
- if (dname_external(dentry)) {
+ if (unlikely(dname_external(dentry))) {
/*
* dentry:external, target:internal. Give dentry's
* storage to target and make dentry internal
@@ -2407,12 +2392,6 @@ static void switch_names(struct dentry *dentry, struct dentry *target,
*/
unsigned int i;
BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
- if (!exchange) {
- memcpy(dentry->d_iname, target->d_name.name,
- target->d_name.len + 1);
- dentry->d_name.hash_len = target->d_name.hash_len;
- return;
- }
for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
swap(((long *) &dentry->d_iname)[i],
((long *) &target->d_iname)[i]);
@@ -2422,6 +2401,24 @@ static void switch_names(struct dentry *dentry, struct dentry *target,
swap(dentry->d_name.hash_len, target->d_name.hash_len);
}
+static void copy_name(struct dentry *dentry, struct dentry *target)
+{
+ struct external_name *old_name = NULL;
+ if (unlikely(dname_external(dentry)))
+ old_name = external_name(dentry);
+ if (unlikely(dname_external(target))) {
+ atomic_inc(&external_name(target)->u.count);
+ dentry->d_name = target->d_name;
+ } else {
+ memcpy(dentry->d_iname, target->d_name.name,
+ target->d_name.len + 1);
+ dentry->d_name.name = dentry->d_iname;
+ dentry->d_name.hash_len = target->d_name.hash_len;
+ }
+ if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
+ kfree_rcu(old_name, u.head);
+}
+
static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
{
/*
@@ -2518,7 +2515,10 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
}
/* Switch the names.. */
- switch_names(dentry, target, exchange);
+ if (exchange)
+ swap_names(dentry, target);
+ else
+ copy_name(dentry, target);
/* ... and switch them in the tree */
if (IS_ROOT(dentry)) {
@@ -2625,10 +2625,8 @@ static struct dentry *__d_unalias(struct inode *inode,
goto out_err;
m2 = &alias->d_parent->d_inode->i_mutex;
out_unalias:
- if (likely(!d_mountpoint(alias))) {
- __d_move(alias, dentry, false);
- ret = alias;
- }
+ __d_move(alias, dentry, false);
+ ret = alias;
out_err:
spin_unlock(&inode->i_lock);
if (m2)
@@ -2810,6 +2808,9 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
* the beginning of the name. The sequence number check at the caller will
* retry it again when a d_move() does happen. So any garbage in the buffer
* due to mismatched pointer and length will be discarded.
+ *
+ * Data dependency barrier is needed to make sure that we see that terminating
+ * NUL. Alpha strikes again, film at 11...
*/
static int prepend_name(char **buffer, int *buflen, struct qstr *name)
{
@@ -2817,6 +2818,8 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
u32 dlen = ACCESS_ONCE(name->len);
char *p;
+ smp_read_barrier_depends();
+
*buflen -= dlen + 1;
if (*buflen < 0)
return -ENAMETOOLONG;
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index f704458ea5f5..e0ab3a93eeff 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -30,7 +30,7 @@ struct plock_op {
struct plock_xop {
struct plock_op xop;
- void *callback;
+ int (*callback)(struct file_lock *fl, int result);
void *fl;
void *file;
struct file_lock flc;
@@ -190,7 +190,7 @@ static int dlm_plock_callback(struct plock_op *op)
struct file *file;
struct file_lock *fl;
struct file_lock *flc;
- int (*notify)(void *, void *, int) = NULL;
+ int (*notify)(struct file_lock *fl, int result) = NULL;
struct plock_xop *xop = (struct plock_xop *)op;
int rv = 0;
@@ -209,7 +209,7 @@ static int dlm_plock_callback(struct plock_op *op)
notify = xop->callback;
if (op->info.rv) {
- notify(fl, NULL, op->info.rv);
+ notify(fl, op->info.rv);
goto out;
}
@@ -228,7 +228,7 @@ static int dlm_plock_callback(struct plock_op *op)
(unsigned long long)op->info.number, file, fl);
}
- rv = notify(fl, NULL, 0);
+ rv = notify(fl, 0);
if (rv) {
/* XXX: We need to cancel the fs lock here: */
log_print("dlm_plock_callback: lock granted after lock request "
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index db0fad3269c0..f5bce9096555 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -229,8 +229,8 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
- "[%s]; rc = [%d]\n", __func__,
- ecryptfs_dentry->d_name.name, rc);
+ "[%pd]; rc = [%d]\n", __func__,
+ ecryptfs_dentry, rc);
goto out_free;
}
if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_ACCMODE)
@@ -327,7 +327,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct file *lower_file = ecryptfs_file_to_lower(file);
long rc = -ENOIOCTLCMD;
- if (lower_file->f_op && lower_file->f_op->compat_ioctl)
+ if (lower_file->f_op->compat_ioctl)
rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
return rc;
}
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index d4a9431ec73c..1686dc2da9fd 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -53,9 +53,7 @@ static void unlock_dir(struct dentry *dir)
static int ecryptfs_inode_test(struct inode *inode, void *lower_inode)
{
- if (ecryptfs_inode_to_lower(inode) == (struct inode *)lower_inode)
- return 1;
- return 0;
+ return ecryptfs_inode_to_lower(inode) == lower_inode;
}
static int ecryptfs_inode_set(struct inode *inode, void *opaque)
@@ -192,12 +190,6 @@ ecryptfs_do_create(struct inode *directory_inode,
lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
lower_dir_dentry = lock_parent(lower_dentry);
- if (IS_ERR(lower_dir_dentry)) {
- ecryptfs_printk(KERN_ERR, "Error locking directory of "
- "dentry\n");
- inode = ERR_CAST(lower_dir_dentry);
- goto out;
- }
rc = vfs_create(lower_dir_dentry->d_inode, lower_dentry, mode, true);
if (rc) {
printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
@@ -215,7 +207,6 @@ ecryptfs_do_create(struct inode *directory_inode,
fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode);
out_lock:
unlock_dir(lower_dir_dentry);
-out:
return inode;
}
@@ -250,8 +241,8 @@ int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
- "[%s]; rc = [%d]\n", __func__,
- ecryptfs_dentry->d_name.name, rc);
+ "[%pd]; rc = [%d]\n", __func__,
+ ecryptfs_dentry, rc);
goto out;
}
rc = ecryptfs_write_metadata(ecryptfs_dentry, ecryptfs_inode);
@@ -313,8 +304,8 @@ static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode)
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
- "[%s]; rc = [%d]\n", __func__,
- dentry->d_name.name, rc);
+ "[%pd]; rc = [%d]\n", __func__,
+ dentry, rc);
return rc;
}
@@ -418,8 +409,8 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
- "[%d] on lower_dentry = [%s]\n", __func__, rc,
- ecryptfs_dentry->d_name.name);
+ "[%d] on lower_dentry = [%pd]\n", __func__, rc,
+ ecryptfs_dentry);
goto out;
}
if (lower_dentry->d_inode)
@@ -1039,7 +1030,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
}
rc = vfs_setxattr(lower_dentry, name, value, size, flags);
- if (!rc)
+ if (!rc && dentry->d_inode)
fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode);
out:
return rc;
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 4725a07f003c..635e8e16a5b7 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -26,7 +26,6 @@
*/
#include <linux/string.h>
-#include <linux/syscalls.h>
#include <linux/pagemap.h>
#include <linux/key.h>
#include <linux/random.h>
@@ -1846,7 +1845,6 @@ int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat,
"(Tag 11 not allowed by itself)\n");
rc = -EIO;
goto out_wipe_list;
- break;
default:
ecryptfs_printk(KERN_DEBUG, "No packet at offset [%zd] "
"of the file header; hex value of "
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index e57380e5f6bd..286f10b0363b 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -434,8 +434,7 @@ void ecryptfs_release_messaging(void)
mutex_lock(&ecryptfs_msg_ctx_lists_mux);
for (i = 0; i < ecryptfs_message_buf_len; i++) {
mutex_lock(&ecryptfs_msg_ctx_arr[i].mux);
- if (ecryptfs_msg_ctx_arr[i].msg)
- kfree(ecryptfs_msg_ctx_arr[i].msg);
+ kfree(ecryptfs_msg_ctx_arr[i].msg);
mutex_unlock(&ecryptfs_msg_ctx_arr[i].mux);
}
kfree(ecryptfs_msg_ctx_arr);
diff --git a/fs/exec.c b/fs/exec.c
index a2b42a98c743..7302b75a9820 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1372,18 +1372,23 @@ int search_binary_handler(struct linux_binprm *bprm)
read_unlock(&binfmt_lock);
bprm->recursion_depth++;
retval = fmt->load_binary(bprm);
+ read_lock(&binfmt_lock);
+ put_binfmt(fmt);
bprm->recursion_depth--;
- if (retval >= 0 || retval != -ENOEXEC ||
- bprm->mm == NULL || bprm->file == NULL) {
- put_binfmt(fmt);
+ if (retval < 0 && !bprm->mm) {
+ /* we got to flush_old_exec() and failed after it */
+ read_unlock(&binfmt_lock);
+ force_sigsegv(SIGSEGV, current);
+ return retval;
+ }
+ if (retval != -ENOEXEC || !bprm->file) {
+ read_unlock(&binfmt_lock);
return retval;
}
- read_lock(&binfmt_lock);
- put_binfmt(fmt);
}
read_unlock(&binfmt_lock);
- if (need_retry && retval == -ENOEXEC) {
+ if (need_retry) {
if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
printable(bprm->buf[2]) && printable(bprm->buf[3]))
return retval;
diff --git a/fs/ext3/ext3.h b/fs/ext3/ext3.h
index e85ff15a060e..fc3cdcf24aed 100644
--- a/fs/ext3/ext3.h
+++ b/fs/ext3/ext3.h
@@ -237,6 +237,8 @@ struct ext3_new_group_data {
#define EXT3_IOC32_GETVERSION_OLD FS_IOC32_GETVERSION
#define EXT3_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION
+/* Number of supported quota types */
+#define EXT3_MAXQUOTAS 2
/*
* Mount options
@@ -248,7 +250,7 @@ struct ext3_mount_options {
unsigned long s_commit_interval;
#ifdef CONFIG_QUOTA
int s_jquota_fmt;
- char *s_qf_names[MAXQUOTAS];
+ char *s_qf_names[EXT3_MAXQUOTAS];
#endif
};
@@ -669,7 +671,7 @@ struct ext3_sb_info {
unsigned long s_commit_interval;
struct block_device *journal_bdev;
#ifdef CONFIG_QUOTA
- char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */
+ char *s_qf_names[EXT3_MAXQUOTAS]; /* Names of quota files with journalled quota */
int s_jquota_fmt; /* Format of quota to use */
#endif
};
@@ -1183,9 +1185,9 @@ extern const struct inode_operations ext3_fast_symlink_inode_operations;
#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
#endif
-#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
-#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
-#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
+#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (EXT3_MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
+#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (EXT3_MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
+#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (EXT3_MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
int
ext3_mark_iloc_dirty(handle_t *handle,
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index bb0fdacad058..7015db0bafd1 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -441,7 +441,7 @@ static void ext3_put_super (struct super_block * sb)
percpu_counter_destroy(&sbi->s_dirs_counter);
brelse(sbi->s_sbh);
#ifdef CONFIG_QUOTA
- for (i = 0; i < MAXQUOTAS; i++)
+ for (i = 0; i < EXT3_MAXQUOTAS; i++)
kfree(sbi->s_qf_names[i]);
#endif
@@ -1555,7 +1555,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
/* Needed for iput() to work correctly and not trash data */
sb->s_flags |= MS_ACTIVE;
/* Turn on quotas so that they are updated correctly */
- for (i = 0; i < MAXQUOTAS; i++) {
+ for (i = 0; i < EXT3_MAXQUOTAS; i++) {
if (EXT3_SB(sb)->s_qf_names[i]) {
int ret = ext3_quota_on_mount(sb, i);
if (ret < 0)
@@ -1606,7 +1606,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
PLURAL(nr_truncates));
#ifdef CONFIG_QUOTA
/* Turn quotas off */
- for (i = 0; i < MAXQUOTAS; i++) {
+ for (i = 0; i < EXT3_MAXQUOTAS; i++) {
if (sb_dqopt(sb)->files[i])
dquot_quota_off(sb, i);
}
@@ -2139,7 +2139,7 @@ failed_mount2:
kfree(sbi->s_group_desc);
failed_mount:
#ifdef CONFIG_QUOTA
- for (i = 0; i < MAXQUOTAS; i++)
+ for (i = 0; i < EXT3_MAXQUOTAS; i++)
kfree(sbi->s_qf_names[i]);
#endif
ext3_blkdev_remove(sbi);
@@ -2659,7 +2659,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
old_opts.s_commit_interval = sbi->s_commit_interval;
#ifdef CONFIG_QUOTA
old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
- for (i = 0; i < MAXQUOTAS; i++)
+ for (i = 0; i < EXT3_MAXQUOTAS; i++)
if (sbi->s_qf_names[i]) {
old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
GFP_KERNEL);
@@ -2763,7 +2763,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
}
#ifdef CONFIG_QUOTA
/* Release old quota file names */
- for (i = 0; i < MAXQUOTAS; i++)
+ for (i = 0; i < EXT3_MAXQUOTAS; i++)
kfree(old_opts.s_qf_names[i]);
#endif
if (enable_quota)
@@ -2777,7 +2777,7 @@ restore_opts:
sbi->s_commit_interval = old_opts.s_commit_interval;
#ifdef CONFIG_QUOTA
sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
- for (i = 0; i < MAXQUOTAS; i++) {
+ for (i = 0; i < EXT3_MAXQUOTAS; i++) {
kfree(sbi->s_qf_names[i]);
sbi->s_qf_names[i] = old_opts.s_qf_names[i];
}
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 22d1c3df61ac..99d440a4a6ba 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -98,26 +98,19 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
write_unlock_irq(&filp->f_owner.lock);
}
-int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
+void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
int force)
{
- int err;
-
- err = security_file_set_fowner(filp);
- if (err)
- return err;
-
+ security_file_set_fowner(filp);
f_modown(filp, pid, type, force);
- return 0;
}
EXPORT_SYMBOL(__f_setown);
-int f_setown(struct file *filp, unsigned long arg, int force)
+void f_setown(struct file *filp, unsigned long arg, int force)
{
enum pid_type type;
struct pid *pid;
int who = arg;
- int result;
type = PIDTYPE_PID;
if (who < 0) {
type = PIDTYPE_PGID;
@@ -125,9 +118,8 @@ int f_setown(struct file *filp, unsigned long arg, int force)
}
rcu_read_lock();
pid = find_vpid(who);
- result = __f_setown(filp, pid, type, force);
+ __f_setown(filp, pid, type, force);
rcu_read_unlock();
- return result;
}
EXPORT_SYMBOL(f_setown);
@@ -181,7 +173,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
if (owner.pid && !pid)
ret = -ESRCH;
else
- ret = __f_setown(filp, pid, type, 1);
+ __f_setown(filp, pid, type, 1);
rcu_read_unlock();
return ret;
@@ -302,7 +294,8 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
force_successful_syscall_return();
break;
case F_SETOWN:
- err = f_setown(filp, arg, 1);
+ f_setown(filp, arg, 1);
+ err = 0;
break;
case F_GETOWN_EX:
err = f_getown_ex(filp, arg);
diff --git a/fs/file.c b/fs/file.c
index 66923fe3176e..ab3eb6a88239 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -367,7 +367,7 @@ static struct fdtable *close_files(struct files_struct * files)
struct file * file = xchg(&fdt->fd[i], NULL);
if (file) {
filp_close(file, files);
- cond_resched();
+ cond_resched_rcu_qs();
}
}
i++;
@@ -750,6 +750,7 @@ bool get_close_on_exec(unsigned int fd)
static int do_dup2(struct files_struct *files,
struct file *file, unsigned fd, unsigned flags)
+__releases(&files->file_lock)
{
struct file *tofree;
struct fdtable *fdt;
diff --git a/fs/file_table.c b/fs/file_table.c
index 0bab12b20460..3f85411b03ce 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -150,18 +150,10 @@ over:
/**
* alloc_file - allocate and initialize a 'struct file'
- * @mnt: the vfsmount on which the file will reside
- * @dentry: the dentry representing the new file
+ *
+ * @path: the (dentry, vfsmount) pair for the new file
* @mode: the mode with which the new file will be opened
* @fop: the 'struct file_operations' for the new file
- *
- * Use this instead of get_empty_filp() to get a new
- * 'struct file'. Do so because of the same initialization
- * pitfalls reasons listed for init_file(). This is a
- * preferred interface to using init_file().
- *
- * If all the callers of init_file() are eliminated, its
- * code should be moved into this function.
*/
struct file *alloc_file(struct path *path, fmode_t mode,
const struct file_operations *fop)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index de1d84af9f7c..dbab798f5caf 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -274,9 +274,6 @@ out:
invalid:
ret = 0;
-
- if (!(flags & LOOKUP_RCU) && check_submounts_and_drop(entry) != 0)
- ret = 1;
goto out;
}
@@ -1289,9 +1286,7 @@ static int fuse_direntplus_link(struct file *file,
d_drop(dentry);
} else if (get_node_id(inode) != o->nodeid ||
((o->attr.mode ^ inode->i_mode) & S_IFMT)) {
- err = d_invalidate(dentry);
- if (err)
- goto out;
+ d_invalidate(dentry);
} else if (is_bad_inode(inode)) {
err = -EIO;
goto out;
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index d3a5d4e29ba5..589f4ea9381c 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -93,9 +93,6 @@ invalid_gunlock:
if (!had_lock)
gfs2_glock_dq_uninit(&d_gh);
invalid:
- if (check_submounts_and_drop(dentry) != 0)
- goto valid;
-
dput(parent);
return 0;
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 1a349f9a9685..5d4261ff5d23 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -2100,8 +2100,13 @@ int gfs2_diradd_alloc_required(struct inode *inode, const struct qstr *name,
}
if (IS_ERR(dent))
return PTR_ERR(dent);
- da->bh = bh;
- da->dent = dent;
+
+ if (da->save_loc) {
+ da->bh = bh;
+ da->dent = dent;
+ } else {
+ brelse(bh);
+ }
return 0;
}
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index 126c65dda028..e1b309c24dab 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -23,6 +23,7 @@ struct gfs2_diradd {
unsigned nr_blocks;
struct gfs2_dirent *dent;
struct buffer_head *bh;
+ int save_loc;
};
extern struct inode *gfs2_dir_search(struct inode *dir,
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 7f4ed3daa38c..80dd44dca028 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -914,26 +914,6 @@ out_uninit:
#ifdef CONFIG_GFS2_FS_LOCKING_DLM
/**
- * gfs2_setlease - acquire/release a file lease
- * @file: the file pointer
- * @arg: lease type
- * @fl: file lock
- *
- * We don't currently have a way to enforce a lease across the whole
- * cluster; until we do, disable leases (by just returning -EINVAL),
- * unless the administrator has requested purely local locking.
- *
- * Locking: called under i_lock
- *
- * Returns: errno
- */
-
-static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
-{
- return -EINVAL;
-}
-
-/**
* gfs2_lock - acquire/release a posix lock on a file
* @file: the file pointer
* @cmd: either modify or retrieve lock state, possibly wait
@@ -1078,7 +1058,7 @@ const struct file_operations gfs2_file_fops = {
.flock = gfs2_flock,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
- .setlease = gfs2_setlease,
+ .setlease = simple_nosetlease,
.fallocate = gfs2_fallocate,
};
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 7f513b1ceb2c..8f0c19d1d943 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -811,7 +811,7 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
{
INIT_LIST_HEAD(&gh->gh_list);
gh->gh_gl = gl;
- gh->gh_ip = (unsigned long)__builtin_return_address(0);
+ gh->gh_ip = _RET_IP_;
gh->gh_owner_pid = get_pid(task_pid(current));
gh->gh_state = state;
gh->gh_flags = flags;
@@ -835,7 +835,7 @@ void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *
gh->gh_state = state;
gh->gh_flags = flags;
gh->gh_iflags = 0;
- gh->gh_ip = (unsigned long)__builtin_return_address(0);
+ gh->gh_ip = _RET_IP_;
if (gh->gh_owner_pid)
put_pid(gh->gh_owner_pid);
gh->gh_owner_pid = get_pid(task_pid(current));
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 2ffc67dce87f..1cc0bba6313f 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -93,7 +93,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
* tr->alloced is not set since the transaction structure is
* on the stack */
tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
- tr.tr_ip = (unsigned long)__builtin_return_address(0);
+ tr.tr_ip = _RET_IP_;
sb_start_intwrite(sdp->sd_vfs);
if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0) {
sb_end_intwrite(sdp->sd_vfs);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index fc8ac2ee0667..c4ed823d150e 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -600,7 +600,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
int error, free_vfs_inode = 0;
u32 aflags = 0;
unsigned blocks = 1;
- struct gfs2_diradd da = { .bh = NULL, };
+ struct gfs2_diradd da = { .bh = NULL, .save_loc = 1, };
if (!name->len || name->len > GFS2_FNAMESIZE)
return -ENAMETOOLONG;
@@ -672,6 +672,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
gfs2_set_inode_blocks(inode, 1);
munge_mode_uid_gid(dip, inode);
+ check_and_update_goal(dip);
ip->i_goal = dip->i_goal;
ip->i_diskflags = 0;
ip->i_eattr = 0;
@@ -899,7 +900,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder ghs[2];
struct buffer_head *dibh;
- struct gfs2_diradd da = { .bh = NULL, };
+ struct gfs2_diradd da = { .bh = NULL, .save_loc = 1, };
int error;
if (S_ISDIR(inode->i_mode))
@@ -1244,6 +1245,9 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
struct dentry *d;
bool excl = !!(flags & O_EXCL);
+ if (!d_unhashed(dentry))
+ goto skip_lookup;
+
d = __gfs2_lookup(dir, dentry, file, opened);
if (IS_ERR(d))
return PTR_ERR(d);
@@ -1260,6 +1264,8 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
}
BUG_ON(d != NULL);
+
+skip_lookup:
if (!(flags & O_CREAT))
return -ENOENT;
@@ -1337,7 +1343,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
struct gfs2_rgrpd *nrgd;
unsigned int num_gh;
int dir_rename = 0;
- struct gfs2_diradd da = { .nr_blocks = 0, };
+ struct gfs2_diradd da = { .nr_blocks = 0, .save_loc = 0, };
unsigned int x;
int error;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index f4cb9c0d6bbd..7474c413ffd1 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -577,6 +577,13 @@ struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
return rgd;
}
+void check_and_update_goal(struct gfs2_inode *ip)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
+ ip->i_goal = ip->i_no_addr;
+}
+
void gfs2_free_clones(struct gfs2_rgrpd *rgd)
{
int x;
@@ -1910,6 +1917,7 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *a
} else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
rs->rs_rbm.rgd = begin = ip->i_rgd;
} else {
+ check_and_update_goal(ip);
rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
}
if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
@@ -2089,7 +2097,7 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
u32 blen, unsigned char new_state)
{
struct gfs2_rbm rbm;
- struct gfs2_bitmap *bi;
+ struct gfs2_bitmap *bi, *bi_prev = NULL;
rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
if (!rbm.rgd) {
@@ -2098,18 +2106,22 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
return NULL;
}
+ gfs2_rbm_from_block(&rbm, bstart);
while (blen--) {
- gfs2_rbm_from_block(&rbm, bstart);
bi = rbm_bi(&rbm);
- bstart++;
- if (!bi->bi_clone) {
- bi->bi_clone = kmalloc(bi->bi_bh->b_size,
- GFP_NOFS | __GFP_NOFAIL);
- memcpy(bi->bi_clone + bi->bi_offset,
- bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
+ if (bi != bi_prev) {
+ if (!bi->bi_clone) {
+ bi->bi_clone = kmalloc(bi->bi_bh->b_size,
+ GFP_NOFS | __GFP_NOFAIL);
+ memcpy(bi->bi_clone + bi->bi_offset,
+ bi->bi_bh->b_data + bi->bi_offset,
+ bi->bi_len);
+ }
+ gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
+ bi_prev = bi;
}
- gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
gfs2_setbit(&rbm, false, new_state);
+ gfs2_rbm_incr(&rbm);
}
return rbm.rgd;
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 463ab2e95d1c..5d8f085f7ade 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -80,4 +80,5 @@ static inline bool gfs2_rs_active(struct gfs2_blkreserv *rs)
return rs && !RB_EMPTY_NODE(&rs->rs_node);
}
+extern void check_and_update_goal(struct gfs2_inode *ip);
#endif /* __RGRP_DOT_H__ */
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 0546ab4e28e8..42bfd3361979 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -44,7 +44,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
if (!tr)
return -ENOMEM;
- tr->tr_ip = (unsigned long)__builtin_return_address(0);
+ tr->tr_ip = _RET_IP_;
tr->tr_blocks = blocks;
tr->tr_revokes = revokes;
tr->tr_reserved = 1;
diff --git a/fs/internal.h b/fs/internal.h
index b2623200107b..9477f8f6aefc 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -56,7 +56,7 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
* namespace.c
*/
extern int copy_mount_options(const void __user *, unsigned long *);
-extern int copy_mount_string(const void __user *, char **);
+extern char *copy_mount_string(const void __user *);
extern struct vfsmount *lookup_mnt(struct path *);
extern int finish_automount(struct vfsmount *, struct path *);
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index 413ef89c2d1b..046fee8b6e9b 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -134,8 +134,6 @@ struct jffs2_sb_info {
struct rw_semaphore wbuf_sem; /* Protects the write buffer */
struct delayed_work wbuf_dwork; /* write-buffer write-out work */
- int wbuf_queued; /* non-zero delayed work is queued */
- spinlock_t wbuf_dwork_lock; /* protects wbuf_dwork and and wbuf_queued */
unsigned char *oobbuf;
int oobavail; /* How many bytes are available for JFFS2 in OOB */
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index a6597d60d76d..09ed55190ee2 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -1162,10 +1162,6 @@ static void delayed_wbuf_sync(struct work_struct *work)
struct jffs2_sb_info *c = work_to_sb(work);
struct super_block *sb = OFNI_BS_2SFFJ(c);
- spin_lock(&c->wbuf_dwork_lock);
- c->wbuf_queued = 0;
- spin_unlock(&c->wbuf_dwork_lock);
-
if (!(sb->s_flags & MS_RDONLY)) {
jffs2_dbg(1, "%s()\n", __func__);
jffs2_flush_wbuf_gc(c, 0);
@@ -1180,14 +1176,9 @@ void jffs2_dirty_trigger(struct jffs2_sb_info *c)
if (sb->s_flags & MS_RDONLY)
return;
- spin_lock(&c->wbuf_dwork_lock);
- if (!c->wbuf_queued) {
+ delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+ if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay))
jffs2_dbg(1, "%s()\n", __func__);
- delay = msecs_to_jiffies(dirty_writeback_interval * 10);
- queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay);
- c->wbuf_queued = 1;
- }
- spin_unlock(&c->wbuf_dwork_lock);
}
int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
@@ -1211,7 +1202,6 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
/* Initialise write buffer */
init_rwsem(&c->wbuf_sem);
- spin_lock_init(&c->wbuf_dwork_lock);
INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
c->wbuf_pagesize = c->mtd->writesize;
c->wbuf_ofs = 0xFFFFFFFF;
@@ -1251,7 +1241,6 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
/* Initialize write buffer */
init_rwsem(&c->wbuf_sem);
- spin_lock_init(&c->wbuf_dwork_lock);
INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
c->wbuf_pagesize = c->mtd->erasesize;
@@ -1311,7 +1300,6 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
/* Initialize write buffer */
init_rwsem(&c->wbuf_sem);
- spin_lock_init(&c->wbuf_dwork_lock);
INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
c->wbuf_pagesize = c->mtd->writesize;
@@ -1346,7 +1334,6 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
return 0;
init_rwsem(&c->wbuf_sem);
- spin_lock_init(&c->wbuf_dwork_lock);
INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
c->wbuf_pagesize = c->mtd->writesize;
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 0acddf60af55..bc462dcd7a40 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1585,7 +1585,6 @@ void jfs_flush_journal(struct jfs_log *log, int wait)
set_current_state(TASK_UNINTERRUPTIBLE);
LOGGC_UNLOCK(log);
schedule();
- __set_current_state(TASK_RUNNING);
LOGGC_LOCK(log);
remove_wait_queue(&target->gcwait, &__wait);
}
@@ -2359,7 +2358,6 @@ int jfsIOWait(void *arg)
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&log_redrive_lock);
schedule();
- __set_current_state(TASK_RUNNING);
}
} while (!kthread_should_stop());
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 564c4f279ac6..d595856453b2 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -136,7 +136,6 @@ static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
set_current_state(TASK_UNINTERRUPTIBLE);
TXN_UNLOCK();
io_schedule();
- __set_current_state(TASK_RUNNING);
remove_wait_queue(event, &wait);
}
@@ -2808,7 +2807,6 @@ int jfs_lazycommit(void *arg)
set_current_state(TASK_INTERRUPTIBLE);
LAZY_UNLOCK(flags);
schedule();
- __set_current_state(TASK_RUNNING);
remove_wait_queue(&jfs_commit_thread_wait, &wq);
}
} while (!kthread_should_stop());
@@ -2996,7 +2994,6 @@ int jfs_sync(void *arg)
set_current_state(TASK_INTERRUPTIBLE);
TXN_UNLOCK();
schedule();
- __set_current_state(TASK_RUNNING);
}
} while (!kthread_should_stop());
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index adf8cb045b9e..93e897e588a8 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -550,7 +550,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
inode->i_ino = 0;
inode->i_size = sb->s_bdev->bd_inode->i_size;
inode->i_mapping->a_ops = &jfs_metapage_aops;
- insert_inode_hash(inode);
+ hlist_add_fake(&inode->i_hash);
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
sbi->direct_inode = inode;
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index a693f5b01ae6..1c771931bb60 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -463,21 +463,10 @@ static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
goto out_bad;
mutex_unlock(&kernfs_mutex);
-out_valid:
return 1;
out_bad:
mutex_unlock(&kernfs_mutex);
out_bad_unlocked:
- /*
- * @dentry doesn't match the underlying kernfs node, drop the
- * dentry and force lookup. If we have submounts we must allow the
- * vfs caches to lie about the state of the filesystem to prevent
- * leaks and other nasty things, so use check_submounts_and_drop()
- * instead of d_drop().
- */
- if (check_submounts_and_drop(dentry) != 0)
- goto out_valid;
-
return 0;
}
diff --git a/fs/libfs.c b/fs/libfs.c
index 88e3e00e2eca..171d2846f2a3 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -1075,3 +1075,21 @@ struct inode *alloc_anon_inode(struct super_block *s)
return inode;
}
EXPORT_SYMBOL(alloc_anon_inode);
+
+/**
+ * simple_nosetlease - generic helper for prohibiting leases
+ * @filp: file pointer
+ * @arg: type of lease to obtain
+ * @flp: new lease supplied for insertion
+ * @priv: private data for lm_setup operation
+ *
+ * Generic helper for filesystems that do not wish to allow leases to be set.
+ * All arguments are ignored and it just returns -EINVAL.
+ */
+int
+simple_nosetlease(struct file *filp, long arg, struct file_lock **flp,
+ void **priv)
+{
+ return -EINVAL;
+}
+EXPORT_SYMBOL(simple_nosetlease);
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index ab798a88ec1d..13db95f54176 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -245,7 +245,6 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
block->b_daemon = rqstp->rq_server;
block->b_host = host;
block->b_file = file;
- block->b_fl = NULL;
file->f_count++;
/* Add to file's list of blocks */
@@ -295,7 +294,6 @@ static void nlmsvc_free_block(struct kref *kref)
nlmsvc_freegrantargs(block->b_call);
nlmsvc_release_call(block->b_call);
nlm_release_file(block->b_file);
- kfree(block->b_fl);
kfree(block);
}
@@ -508,7 +506,6 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
struct nlm_host *host, struct nlm_lock *lock,
struct nlm_lock *conflock, struct nlm_cookie *cookie)
{
- struct nlm_block *block = NULL;
int error;
__be32 ret;
@@ -519,63 +516,26 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
- /* Get existing block (in case client is busy-waiting) */
- block = nlmsvc_lookup_block(file, lock);
-
- if (block == NULL) {
- struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
-
- if (conf == NULL)
- return nlm_granted;
- block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
- if (block == NULL) {
- kfree(conf);
- return nlm_granted;
- }
- block->b_fl = conf;
- }
- if (block->b_flags & B_QUEUED) {
- dprintk("lockd: nlmsvc_testlock deferred block %p flags %d fl %p\n",
- block, block->b_flags, block->b_fl);
- if (block->b_flags & B_TIMED_OUT) {
- nlmsvc_unlink_block(block);
- ret = nlm_lck_denied;
- goto out;
- }
- if (block->b_flags & B_GOT_CALLBACK) {
- nlmsvc_unlink_block(block);
- if (block->b_fl != NULL
- && block->b_fl->fl_type != F_UNLCK) {
- lock->fl = *block->b_fl;
- goto conf_lock;
- } else {
- ret = nlm_granted;
- goto out;
- }
- }
- ret = nlm_drop_reply;
- goto out;
- }
-
if (locks_in_grace(SVC_NET(rqstp))) {
ret = nlm_lck_denied_grace_period;
goto out;
}
+
error = vfs_test_lock(file->f_file, &lock->fl);
- if (error == FILE_LOCK_DEFERRED) {
- ret = nlmsvc_defer_lock_rqst(rqstp, block);
- goto out;
- }
if (error) {
+ /* We can't currently deal with deferred test requests */
+ if (error == FILE_LOCK_DEFERRED)
+ WARN_ON_ONCE(1);
+
ret = nlm_lck_denied_nolocks;
goto out;
}
+
if (lock->fl.fl_type == F_UNLCK) {
ret = nlm_granted;
goto out;
}
-conf_lock:
dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
lock->fl.fl_type, (long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
@@ -586,10 +546,9 @@ conf_lock:
conflock->fl.fl_type = lock->fl.fl_type;
conflock->fl.fl_start = lock->fl.fl_start;
conflock->fl.fl_end = lock->fl.fl_end;
+ locks_release_private(&lock->fl);
ret = nlm_lck_denied;
out:
- if (block)
- nlmsvc_release_block(block);
return ret;
}
@@ -660,29 +619,22 @@ nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *l
* This is a callback from the filesystem for VFS file lock requests.
* It will be used if lm_grant is defined and the filesystem can not
* respond to the request immediately.
- * For GETLK request it will copy the reply to the nlm_block.
* For SETLK or SETLKW request it will get the local posix lock.
* In all cases it will move the block to the head of nlm_blocked q where
* nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
* deferred rpc for GETLK and SETLK.
*/
static void
-nlmsvc_update_deferred_block(struct nlm_block *block, struct file_lock *conf,
- int result)
+nlmsvc_update_deferred_block(struct nlm_block *block, int result)
{
block->b_flags |= B_GOT_CALLBACK;
if (result == 0)
block->b_granted = 1;
else
block->b_flags |= B_TIMED_OUT;
- if (conf) {
- if (block->b_fl)
- __locks_copy_lock(block->b_fl, conf);
- }
}
-static int nlmsvc_grant_deferred(struct file_lock *fl, struct file_lock *conf,
- int result)
+static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
{
struct nlm_block *block;
int rc = -ENOENT;
@@ -697,7 +649,7 @@ static int nlmsvc_grant_deferred(struct file_lock *fl, struct file_lock *conf,
rc = -ENOLCK;
break;
}
- nlmsvc_update_deferred_block(block, conf, result);
+ nlmsvc_update_deferred_block(block, result);
} else if (result == 0)
block->b_granted = 1;
diff --git a/fs/locks.c b/fs/locks.c
index bb08857f90b5..735b8d3fa78c 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -230,8 +230,12 @@ void locks_release_private(struct file_lock *fl)
fl->fl_ops->fl_release_private(fl);
fl->fl_ops = NULL;
}
- fl->fl_lmops = NULL;
+ if (fl->fl_lmops) {
+ if (fl->fl_lmops->lm_put_owner)
+ fl->fl_lmops->lm_put_owner(fl);
+ fl->fl_lmops = NULL;
+ }
}
EXPORT_SYMBOL_GPL(locks_release_private);
@@ -267,21 +271,10 @@ void locks_init_lock(struct file_lock *fl)
EXPORT_SYMBOL(locks_init_lock);
-static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
-{
- if (fl->fl_ops) {
- if (fl->fl_ops->fl_copy_lock)
- fl->fl_ops->fl_copy_lock(new, fl);
- new->fl_ops = fl->fl_ops;
- }
- if (fl->fl_lmops)
- new->fl_lmops = fl->fl_lmops;
-}
-
/*
* Initialize a new lock from an existing file_lock structure.
*/
-void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
+void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
{
new->fl_owner = fl->fl_owner;
new->fl_pid = fl->fl_pid;
@@ -290,22 +283,30 @@ void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
new->fl_type = fl->fl_type;
new->fl_start = fl->fl_start;
new->fl_end = fl->fl_end;
+ new->fl_lmops = fl->fl_lmops;
new->fl_ops = NULL;
- new->fl_lmops = NULL;
+
+ if (fl->fl_lmops) {
+ if (fl->fl_lmops->lm_get_owner)
+ fl->fl_lmops->lm_get_owner(new, fl);
+ }
}
-EXPORT_SYMBOL(__locks_copy_lock);
+EXPORT_SYMBOL(locks_copy_conflock);
void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
/* "new" must be a freshly-initialized lock */
WARN_ON_ONCE(new->fl_ops);
- __locks_copy_lock(new, fl);
+ locks_copy_conflock(new, fl);
+
new->fl_file = fl->fl_file;
new->fl_ops = fl->fl_ops;
- new->fl_lmops = fl->fl_lmops;
- locks_copy_private(new, fl);
+ if (fl->fl_ops) {
+ if (fl->fl_ops->fl_copy_lock)
+ fl->fl_ops->fl_copy_lock(new, fl);
+ }
}
EXPORT_SYMBOL(locks_copy_lock);
@@ -325,17 +326,18 @@ static inline int flock_translate_cmd(int cmd) {
}
/* Fill in a file_lock structure with an appropriate FLOCK lock. */
-static int flock_make_lock(struct file *filp, struct file_lock **lock,
- unsigned int cmd)
+static struct file_lock *
+flock_make_lock(struct file *filp, unsigned int cmd)
{
struct file_lock *fl;
int type = flock_translate_cmd(cmd);
+
if (type < 0)
- return type;
+ return ERR_PTR(type);
fl = locks_alloc_lock();
if (fl == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
fl->fl_file = filp;
fl->fl_owner = filp;
@@ -344,8 +346,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock,
fl->fl_type = type;
fl->fl_end = OFFSET_MAX;
- *lock = fl;
- return 0;
+ return fl;
}
static int assign_type(struct file_lock *fl, long type)
@@ -426,14 +427,34 @@ static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
}
/* default lease lock manager operations */
-static void lease_break_callback(struct file_lock *fl)
+static bool
+lease_break_callback(struct file_lock *fl)
{
kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
+ return false;
+}
+
+static void
+lease_setup(struct file_lock *fl, void **priv)
+{
+ struct file *filp = fl->fl_file;
+ struct fasync_struct *fa = *priv;
+
+ /*
+ * fasync_insert_entry() returns the old entry if any. If there was no
+ * old entry, then it used "priv" and inserted it into the fasync list.
+ * Clear the pointer to indicate that it shouldn't be freed.
+ */
+ if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
+ *priv = NULL;
+
+ __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
}
static const struct lock_manager_operations lease_manager_ops = {
.lm_break = lease_break_callback,
.lm_change = lease_modify,
+ .lm_setup = lease_setup,
};
/*
@@ -444,7 +465,7 @@ static int lease_init(struct file *filp, long type, struct file_lock *fl)
if (assign_type(fl, type) != 0)
return -EINVAL;
- fl->fl_owner = current->files;
+ fl->fl_owner = filp;
fl->fl_pid = current->tgid;
fl->fl_file = filp;
@@ -735,7 +756,7 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
break;
}
if (cfl) {
- __locks_copy_lock(fl, cfl);
+ locks_copy_conflock(fl, cfl);
if (cfl->fl_nspid)
fl->fl_pid = pid_vnr(cfl->fl_nspid);
} else
@@ -941,7 +962,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
if (!posix_locks_conflict(request, fl))
continue;
if (conflock)
- __locks_copy_lock(conflock, fl);
+ locks_copy_conflock(conflock, fl);
error = -EAGAIN;
if (!(request->fl_flags & FL_SLEEP))
goto out;
@@ -1273,7 +1294,7 @@ static void lease_clear_pending(struct file_lock *fl, int arg)
}
/* We already had a lease on this file; just change its type */
-int lease_modify(struct file_lock **before, int arg)
+int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
{
struct file_lock *fl = *before;
int error = assign_type(fl, arg);
@@ -1292,11 +1313,10 @@ int lease_modify(struct file_lock **before, int arg)
printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
fl->fl_fasync = NULL;
}
- locks_delete_lock(before, NULL);
+ locks_delete_lock(before, dispose);
}
return 0;
}
-
EXPORT_SYMBOL(lease_modify);
static bool past_time(unsigned long then)
@@ -1307,18 +1327,20 @@ static bool past_time(unsigned long then)
return time_after(jiffies, then);
}
-static void time_out_leases(struct inode *inode)
+static void time_out_leases(struct inode *inode, struct list_head *dispose)
{
struct file_lock **before;
struct file_lock *fl;
+ lockdep_assert_held(&inode->i_lock);
+
before = &inode->i_flock;
while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
trace_time_out_leases(inode, fl);
if (past_time(fl->fl_downgrade_time))
- lease_modify(before, F_RDLCK);
+ lease_modify(before, F_RDLCK, dispose);
if (past_time(fl->fl_break_time))
- lease_modify(before, F_UNLCK);
+ lease_modify(before, F_UNLCK, dispose);
if (fl == *before) /* lease_modify may have freed fl */
before = &fl->fl_next;
}
@@ -1331,6 +1353,20 @@ static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
return locks_conflict(breaker, lease);
}
+static bool
+any_leases_conflict(struct inode *inode, struct file_lock *breaker)
+{
+ struct file_lock *fl;
+
+ lockdep_assert_held(&inode->i_lock);
+
+ for (fl = inode->i_flock ; fl && IS_LEASE(fl); fl = fl->fl_next) {
+ if (leases_conflict(fl, breaker))
+ return true;
+ }
+ return false;
+}
+
/**
* __break_lease - revoke all outstanding leases on file
* @inode: the inode of the file to return
@@ -1347,12 +1383,11 @@ static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
{
int error = 0;
- struct file_lock *new_fl, *flock;
- struct file_lock *fl;
+ struct file_lock *new_fl;
+ struct file_lock *fl, **before;
unsigned long break_time;
- int i_have_this_lease = 0;
- bool lease_conflict = false;
int want_write = (mode & O_ACCMODE) != O_RDONLY;
+ LIST_HEAD(dispose);
new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
if (IS_ERR(new_fl))
@@ -1361,20 +1396,9 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
spin_lock(&inode->i_lock);
- time_out_leases(inode);
-
- flock = inode->i_flock;
- if ((flock == NULL) || !IS_LEASE(flock))
- goto out;
+ time_out_leases(inode, &dispose);
- for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
- if (leases_conflict(fl, new_fl)) {
- lease_conflict = true;
- if (fl->fl_owner == current->files)
- i_have_this_lease = 1;
- }
- }
- if (!lease_conflict)
+ if (!any_leases_conflict(inode, new_fl))
goto out;
break_time = 0;
@@ -1384,7 +1408,9 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
break_time++; /* so that 0 means no break time */
}
- for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
+ for (before = &inode->i_flock;
+ ((fl = *before) != NULL) && IS_LEASE(fl);
+ before = &fl->fl_next) {
if (!leases_conflict(fl, new_fl))
continue;
if (want_write) {
@@ -1393,51 +1419,56 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
fl->fl_flags |= FL_UNLOCK_PENDING;
fl->fl_break_time = break_time;
} else {
- if (lease_breaking(flock))
+ if (lease_breaking(inode->i_flock))
continue;
fl->fl_flags |= FL_DOWNGRADE_PENDING;
fl->fl_downgrade_time = break_time;
}
- fl->fl_lmops->lm_break(fl);
+ if (fl->fl_lmops->lm_break(fl))
+ locks_delete_lock(before, &dispose);
}
- if (i_have_this_lease || (mode & O_NONBLOCK)) {
+ fl = inode->i_flock;
+ if (!fl || !IS_LEASE(fl))
+ goto out;
+
+ if (mode & O_NONBLOCK) {
trace_break_lease_noblock(inode, new_fl);
error = -EWOULDBLOCK;
goto out;
}
restart:
- break_time = flock->fl_break_time;
+ break_time = inode->i_flock->fl_break_time;
if (break_time != 0)
break_time -= jiffies;
if (break_time == 0)
break_time++;
- locks_insert_block(flock, new_fl);
+ locks_insert_block(inode->i_flock, new_fl);
trace_break_lease_block(inode, new_fl);
spin_unlock(&inode->i_lock);
+ locks_dispose_list(&dispose);
error = wait_event_interruptible_timeout(new_fl->fl_wait,
!new_fl->fl_next, break_time);
spin_lock(&inode->i_lock);
trace_break_lease_unblock(inode, new_fl);
locks_delete_block(new_fl);
if (error >= 0) {
- if (error == 0)
- time_out_leases(inode);
/*
* Wait for the next conflicting lease that has not been
* broken yet
*/
- for (flock = inode->i_flock; flock && IS_LEASE(flock);
- flock = flock->fl_next) {
- if (leases_conflict(new_fl, flock))
- goto restart;
- }
+ if (error == 0)
+ time_out_leases(inode, &dispose);
+ if (any_leases_conflict(inode, new_fl))
+ goto restart;
+
error = 0;
}
out:
spin_unlock(&inode->i_lock);
+ locks_dispose_list(&dispose);
locks_free_lock(new_fl);
return error;
}
@@ -1455,8 +1486,18 @@ EXPORT_SYMBOL(__break_lease);
*/
void lease_get_mtime(struct inode *inode, struct timespec *time)
{
- struct file_lock *flock = inode->i_flock;
- if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK))
+ bool has_lease = false;
+ struct file_lock *flock;
+
+ if (inode->i_flock) {
+ spin_lock(&inode->i_lock);
+ flock = inode->i_flock;
+ if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK))
+ has_lease = true;
+ spin_unlock(&inode->i_lock);
+ }
+
+ if (has_lease)
*time = current_fs_time(inode->i_sb);
else
*time = inode->i_mtime;
@@ -1492,9 +1533,10 @@ int fcntl_getlease(struct file *filp)
struct file_lock *fl;
struct inode *inode = file_inode(filp);
int type = F_UNLCK;
+ LIST_HEAD(dispose);
spin_lock(&inode->i_lock);
- time_out_leases(file_inode(filp));
+ time_out_leases(file_inode(filp), &dispose);
for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl);
fl = fl->fl_next) {
if (fl->fl_file == filp) {
@@ -1503,6 +1545,7 @@ int fcntl_getlease(struct file *filp)
}
}
spin_unlock(&inode->i_lock);
+ locks_dispose_list(&dispose);
return type;
}
@@ -1532,13 +1575,15 @@ check_conflicting_open(const struct dentry *dentry, const long arg)
return ret;
}
-static int generic_add_lease(struct file *filp, long arg, struct file_lock **flp)
+static int
+generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
{
struct file_lock *fl, **before, **my_before = NULL, *lease;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
bool is_deleg = (*flp)->fl_flags & FL_DELEG;
int error;
+ LIST_HEAD(dispose);
lease = *flp;
trace_generic_add_lease(inode, lease);
@@ -1561,6 +1606,8 @@ static int generic_add_lease(struct file *filp, long arg, struct file_lock **flp
return -EINVAL;
}
+ spin_lock(&inode->i_lock);
+ time_out_leases(inode, &dispose);
error = check_conflicting_open(dentry, arg);
if (error)
goto out;
@@ -1596,10 +1643,11 @@ static int generic_add_lease(struct file *filp, long arg, struct file_lock **flp
}
if (my_before != NULL) {
- error = lease->fl_lmops->lm_change(my_before, arg);
- if (!error)
- *flp = *my_before;
- goto out;
+ lease = *my_before;
+ error = lease->fl_lmops->lm_change(my_before, arg, &dispose);
+ if (error)
+ goto out;
+ goto out_setup;
}
error = -EINVAL;
@@ -1619,43 +1667,61 @@ static int generic_add_lease(struct file *filp, long arg, struct file_lock **flp
smp_mb();
error = check_conflicting_open(dentry, arg);
if (error)
- locks_unlink_lock(before);
+ goto out_unlink;
+
+out_setup:
+ if (lease->fl_lmops->lm_setup)
+ lease->fl_lmops->lm_setup(lease, priv);
out:
+ spin_unlock(&inode->i_lock);
+ locks_dispose_list(&dispose);
if (is_deleg)
mutex_unlock(&inode->i_mutex);
+ if (!error && !my_before)
+ *flp = NULL;
return error;
+out_unlink:
+ locks_unlink_lock(before);
+ goto out;
}
-static int generic_delete_lease(struct file *filp, struct file_lock **flp)
+static int generic_delete_lease(struct file *filp)
{
+ int error = -EAGAIN;
struct file_lock *fl, **before;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
+ LIST_HEAD(dispose);
- trace_generic_delete_lease(inode, *flp);
-
+ spin_lock(&inode->i_lock);
+ time_out_leases(inode, &dispose);
for (before = &inode->i_flock;
((fl = *before) != NULL) && IS_LEASE(fl);
before = &fl->fl_next) {
- if (fl->fl_file != filp)
- continue;
- return (*flp)->fl_lmops->lm_change(before, F_UNLCK);
+ if (fl->fl_file == filp)
+ break;
}
- return -EAGAIN;
+ trace_generic_delete_lease(inode, fl);
+ if (fl)
+ error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose);
+ spin_unlock(&inode->i_lock);
+ locks_dispose_list(&dispose);
+ return error;
}
/**
* generic_setlease - sets a lease on an open file
- * @filp: file pointer
- * @arg: type of lease to obtain
- * @flp: input - file_lock to use, output - file_lock inserted
+ * @filp: file pointer
+ * @arg: type of lease to obtain
+ * @flp: input - file_lock to use, output - file_lock inserted
+ * @priv: private data for lm_setup (may be NULL if lm_setup
+ * doesn't require it)
*
* The (input) flp->fl_lmops->lm_break function is required
* by break_lease().
- *
- * Called with inode->i_lock held.
*/
-int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
+int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
+ void **priv)
{
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
@@ -1669,83 +1735,52 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
if (error)
return error;
- time_out_leases(inode);
-
- BUG_ON(!(*flp)->fl_lmops->lm_break);
-
switch (arg) {
case F_UNLCK:
- return generic_delete_lease(filp, flp);
+ return generic_delete_lease(filp);
case F_RDLCK:
case F_WRLCK:
- return generic_add_lease(filp, arg, flp);
+ if (!(*flp)->fl_lmops->lm_break) {
+ WARN_ON_ONCE(1);
+ return -ENOLCK;
+ }
+ return generic_add_lease(filp, arg, flp, priv);
default:
return -EINVAL;
}
}
EXPORT_SYMBOL(generic_setlease);
-static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
-{
- if (filp->f_op->setlease)
- return filp->f_op->setlease(filp, arg, lease);
- else
- return generic_setlease(filp, arg, lease);
-}
-
/**
- * vfs_setlease - sets a lease on an open file
- * @filp: file pointer
- * @arg: type of lease to obtain
- * @lease: file_lock to use
- *
- * Call this to establish a lease on the file.
- * The (*lease)->fl_lmops->lm_break operation must be set; if not,
- * break_lease will oops!
- *
- * This will call the filesystem's setlease file method, if
- * defined. Note that there is no getlease method; instead, the
- * filesystem setlease method should call back to setlease() to
- * add a lease to the inode's lease list, where fcntl_getlease() can
- * find it. Since fcntl_getlease() only reports whether the current
- * task holds a lease, a cluster filesystem need only do this for
- * leases held by processes on this node.
- *
- * There is also no break_lease method; filesystems that
- * handle their own leases should break leases themselves from the
- * filesystem's open, create, and (on truncate) setattr methods.
- *
- * Warning: the only current setlease methods exist only to disable
- * leases in certain cases. More vfs changes may be required to
- * allow a full filesystem lease implementation.
+ * vfs_setlease - sets a lease on an open file
+ * @filp: file pointer
+ * @arg: type of lease to obtain
+ * @lease: file_lock to use when adding a lease
+ * @priv: private info for lm_setup when adding a lease (may be
+ * NULL if lm_setup doesn't require it)
+ *
+ * Call this to establish a lease on the file. The "lease" argument is not
+ * used for F_UNLCK requests and may be NULL. For commands that set or alter
+ * an existing lease, the (*lease)->fl_lmops->lm_break operation must be set;
+ * if not, this function will return -ENOLCK (and generate a scary-looking
+ * stack trace).
+ *
+ * The "priv" pointer is passed directly to the lm_setup function as-is. It
+ * may be NULL if the lm_setup operation doesn't require it.
*/
-
-int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
+int
+vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
{
- struct inode *inode = file_inode(filp);
- int error;
-
- spin_lock(&inode->i_lock);
- error = __vfs_setlease(filp, arg, lease);
- spin_unlock(&inode->i_lock);
-
- return error;
+ if (filp->f_op->setlease)
+ return filp->f_op->setlease(filp, arg, lease, priv);
+ else
+ return generic_setlease(filp, arg, lease, priv);
}
EXPORT_SYMBOL_GPL(vfs_setlease);
-static int do_fcntl_delete_lease(struct file *filp)
-{
- struct file_lock fl, *flp = &fl;
-
- lease_init(filp, F_UNLCK, flp);
-
- return vfs_setlease(filp, F_UNLCK, &flp);
-}
-
static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
{
- struct file_lock *fl, *ret;
- struct inode *inode = file_inode(filp);
+ struct file_lock *fl;
struct fasync_struct *new;
int error;
@@ -1758,26 +1793,9 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
locks_free_lock(fl);
return -ENOMEM;
}
- ret = fl;
- spin_lock(&inode->i_lock);
- error = __vfs_setlease(filp, arg, &ret);
- if (error)
- goto out_unlock;
- if (ret == fl)
- fl = NULL;
-
- /*
- * fasync_insert_entry() returns the old entry if any.
- * If there was no old entry, then it used 'new' and
- * inserted it into the fasync list. Clear new so that
- * we don't release it here.
- */
- if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
- new = NULL;
+ new->fa_fd = fd;
- error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
-out_unlock:
- spin_unlock(&inode->i_lock);
+ error = vfs_setlease(filp, arg, &fl, (void **)&new);
if (fl)
locks_free_lock(fl);
if (new)
@@ -1798,7 +1816,7 @@ out_unlock:
int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
{
if (arg == F_UNLCK)
- return do_fcntl_delete_lease(filp);
+ return vfs_setlease(filp, F_UNLCK, NULL, NULL);
return do_fcntl_add_lease(fd, filp, arg);
}
@@ -1867,9 +1885,12 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
!(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
goto out_putf;
- error = flock_make_lock(f.file, &lock, cmd);
- if (error)
+ lock = flock_make_lock(f.file, cmd);
+ if (IS_ERR(lock)) {
+ error = PTR_ERR(lock);
goto out_putf;
+ }
+
if (can_sleep)
lock->fl_flags |= FL_SLEEP;
@@ -1981,11 +2002,13 @@ int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock __user *l)
if (file_lock.fl_type != F_UNLCK) {
error = posix_lock_to_flock(&flock, &file_lock);
if (error)
- goto out;
+ goto rel_priv;
}
error = -EFAULT;
if (!copy_to_user(l, &flock, sizeof(flock)))
error = 0;
+rel_priv:
+ locks_release_private(&file_lock);
out:
return error;
}
@@ -2206,7 +2229,8 @@ int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
error = -EFAULT;
if (!copy_to_user(l, &flock, sizeof(flock)))
error = 0;
-
+
+ locks_release_private(&file_lock);
out:
return error;
}
@@ -2369,7 +2393,7 @@ void locks_remove_file(struct file *filp)
while ((fl = *before) != NULL) {
if (fl->fl_file == filp) {
if (IS_LEASE(fl)) {
- lease_modify(before, F_UNLCK);
+ lease_modify(before, F_UNLCK, &dispose);
continue;
}
@@ -2593,86 +2617,6 @@ static int __init proc_locks_init(void)
module_init(proc_locks_init);
#endif
-/**
- * lock_may_read - checks that the region is free of locks
- * @inode: the inode that is being read
- * @start: the first byte to read
- * @len: the number of bytes to read
- *
- * Emulates Windows locking requirements. Whole-file
- * mandatory locks (share modes) can prohibit a read and
- * byte-range POSIX locks can prohibit a read if they overlap.
- *
- * N.B. this function is only ever called
- * from knfsd and ownership of locks is never checked.
- */
-int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
-{
- struct file_lock *fl;
- int result = 1;
-
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
- if (IS_POSIX(fl)) {
- if (fl->fl_type == F_RDLCK)
- continue;
- if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
- continue;
- } else if (IS_FLOCK(fl)) {
- if (!(fl->fl_type & LOCK_MAND))
- continue;
- if (fl->fl_type & LOCK_READ)
- continue;
- } else
- continue;
- result = 0;
- break;
- }
- spin_unlock(&inode->i_lock);
- return result;
-}
-
-EXPORT_SYMBOL(lock_may_read);
-
-/**
- * lock_may_write - checks that the region is free of locks
- * @inode: the inode that is being written
- * @start: the first byte to write
- * @len: the number of bytes to write
- *
- * Emulates Windows locking requirements. Whole-file
- * mandatory locks (share modes) can prohibit a write and
- * byte-range POSIX locks can prohibit a write if they overlap.
- *
- * N.B. this function is only ever called
- * from knfsd and ownership of locks is never checked.
- */
-int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
-{
- struct file_lock *fl;
- int result = 1;
-
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
- if (IS_POSIX(fl)) {
- if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
- continue;
- } else if (IS_FLOCK(fl)) {
- if (!(fl->fl_type & LOCK_MAND))
- continue;
- if (fl->fl_type & LOCK_WRITE)
- continue;
- } else
- continue;
- result = 0;
- break;
- }
- spin_unlock(&inode->i_lock);
- return result;
-}
-
-EXPORT_SYMBOL(lock_may_write);
-
static int __init filelock_init(void)
{
int i;
diff --git a/fs/mount.h b/fs/mount.h
index 6740a6215529..f82c62840905 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -21,6 +21,7 @@ struct mnt_pcp {
struct mountpoint {
struct hlist_node m_hash;
struct dentry *m_dentry;
+ struct hlist_head m_list;
int m_count;
};
@@ -29,7 +30,10 @@ struct mount {
struct mount *mnt_parent;
struct dentry *mnt_mountpoint;
struct vfsmount mnt;
- struct rcu_head mnt_rcu;
+ union {
+ struct rcu_head mnt_rcu;
+ struct llist_node mnt_llist;
+ };
#ifdef CONFIG_SMP
struct mnt_pcp __percpu *mnt_pcp;
#else
@@ -48,6 +52,7 @@ struct mount {
struct mount *mnt_master; /* slave is on master->mnt_slave_list */
struct mnt_namespace *mnt_ns; /* containing namespace */
struct mountpoint *mnt_mp; /* where is it mounted */
+ struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
#ifdef CONFIG_FSNOTIFY
struct hlist_head mnt_fsnotify_marks;
__u32 mnt_fsnotify_mask;
@@ -82,6 +87,15 @@ extern struct mount *__lookup_mnt_last(struct vfsmount *, struct dentry *);
extern bool legitimize_mnt(struct vfsmount *, unsigned);
+extern void __detach_mounts(struct dentry *dentry);
+
+static inline void detach_mounts(struct dentry *dentry)
+{
+ if (!d_mountpoint(dentry))
+ return;
+ __detach_mounts(dentry);
+}
+
static inline void get_mnt_ns(struct mnt_namespace *ns)
{
atomic_inc(&ns->count);
@@ -112,3 +126,12 @@ struct proc_mounts {
#define proc_mounts(p) (container_of((p), struct proc_mounts, m))
extern const struct seq_operations mounts_op;
+
+extern bool __is_local_mountpoint(struct dentry *dentry);
+static inline bool is_local_mountpoint(struct dentry *dentry)
+{
+ if (!d_mountpoint(dentry))
+ return false;
+
+ return __is_local_mountpoint(dentry);
+}
diff --git a/fs/namei.c b/fs/namei.c
index a7b05bf82d31..43927d14db67 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1306,7 +1306,8 @@ static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
if (error < 0) {
dput(dentry);
return ERR_PTR(error);
- } else if (!d_invalidate(dentry)) {
+ } else {
+ d_invalidate(dentry);
dput(dentry);
dentry = NULL;
}
@@ -1435,10 +1436,9 @@ unlazy:
dput(dentry);
return status;
}
- if (!d_invalidate(dentry)) {
- dput(dentry);
- goto need_lookup;
- }
+ d_invalidate(dentry);
+ dput(dentry);
+ goto need_lookup;
}
path->mnt = mnt;
@@ -1950,7 +1950,7 @@ static int path_lookupat(int dfd, const char *name,
err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base);
if (unlikely(err))
- return err;
+ goto out;
current->total_link_count = 0;
err = link_path_walk(name, nd);
@@ -1982,6 +1982,7 @@ static int path_lookupat(int dfd, const char *name,
}
}
+out:
if (base)
fput(base);
@@ -2301,7 +2302,7 @@ path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags
err = path_init(dfd, name, flags | LOOKUP_PARENT, &nd, &base);
if (unlikely(err))
- return err;
+ goto out;
current->total_link_count = 0;
err = link_path_walk(name, &nd);
@@ -3074,7 +3075,7 @@ opened:
error = open_check_o_direct(file);
if (error)
goto exit_fput;
- error = ima_file_check(file, op->acc_mode);
+ error = ima_file_check(file, op->acc_mode, *opened);
if (error)
goto exit_fput;
@@ -3565,7 +3566,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
mutex_lock(&dentry->d_inode->i_mutex);
error = -EBUSY;
- if (d_mountpoint(dentry))
+ if (is_local_mountpoint(dentry))
goto out;
error = security_inode_rmdir(dir, dentry);
@@ -3579,6 +3580,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
dentry->d_inode->i_flags |= S_DEAD;
dont_mount(dentry);
+ detach_mounts(dentry);
out:
mutex_unlock(&dentry->d_inode->i_mutex);
@@ -3681,7 +3683,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegate
return -EPERM;
mutex_lock(&target->i_mutex);
- if (d_mountpoint(dentry))
+ if (is_local_mountpoint(dentry))
error = -EBUSY;
else {
error = security_inode_unlink(dir, dentry);
@@ -3690,8 +3692,10 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegate
if (error)
goto out;
error = dir->i_op->unlink(dir, dentry);
- if (!error)
+ if (!error) {
dont_mount(dentry);
+ detach_mounts(dentry);
+ }
}
}
out:
@@ -4126,7 +4130,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
mutex_lock(&target->i_mutex);
error = -EBUSY;
- if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry))
+ if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry))
goto out;
if (max_links && new_dir != old_dir) {
@@ -4164,6 +4168,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (is_dir)
target->i_flags |= S_DEAD;
dont_mount(new_dentry);
+ detach_mounts(new_dentry);
}
if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) {
if (!(flags & RENAME_EXCHANGE))
diff --git a/fs/namespace.c b/fs/namespace.c
index ef42d9bee212..348562f14e93 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -23,6 +23,7 @@
#include <linux/proc_ns.h>
#include <linux/magic.h>
#include <linux/bootmem.h>
+#include <linux/task_work.h>
#include "pnode.h"
#include "internal.h"
@@ -224,6 +225,7 @@ static struct mount *alloc_vfsmnt(const char *name)
INIT_LIST_HEAD(&mnt->mnt_share);
INIT_LIST_HEAD(&mnt->mnt_slave_list);
INIT_LIST_HEAD(&mnt->mnt_slave);
+ INIT_HLIST_NODE(&mnt->mnt_mp_list);
#ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
#endif
@@ -666,11 +668,45 @@ struct vfsmount *lookup_mnt(struct path *path)
return m;
}
-static struct mountpoint *new_mountpoint(struct dentry *dentry)
+/*
+ * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
+ * current mount namespace.
+ *
+ * The common case is dentries are not mountpoints at all and that
+ * test is handled inline. For the slow case when we are actually
+ * dealing with a mountpoint of some kind, walk through all of the
+ * mounts in the current mount namespace and test to see if the dentry
+ * is a mountpoint.
+ *
+ * The mount_hashtable is not usable in the context because we
+ * need to identify all mounts that may be in the current mount
+ * namespace not just a mount that happens to have some specified
+ * parent mount.
+ */
+bool __is_local_mountpoint(struct dentry *dentry)
+{
+ struct mnt_namespace *ns = current->nsproxy->mnt_ns;
+ struct mount *mnt;
+ bool is_covered = false;
+
+ if (!d_mountpoint(dentry))
+ goto out;
+
+ down_read(&namespace_sem);
+ list_for_each_entry(mnt, &ns->list, mnt_list) {
+ is_covered = (mnt->mnt_mountpoint == dentry);
+ if (is_covered)
+ break;
+ }
+ up_read(&namespace_sem);
+out:
+ return is_covered;
+}
+
+static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
{
struct hlist_head *chain = mp_hash(dentry);
struct mountpoint *mp;
- int ret;
hlist_for_each_entry(mp, chain, m_hash) {
if (mp->m_dentry == dentry) {
@@ -681,6 +717,14 @@ static struct mountpoint *new_mountpoint(struct dentry *dentry)
return mp;
}
}
+ return NULL;
+}
+
+static struct mountpoint *new_mountpoint(struct dentry *dentry)
+{
+ struct hlist_head *chain = mp_hash(dentry);
+ struct mountpoint *mp;
+ int ret;
mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
if (!mp)
@@ -695,6 +739,7 @@ static struct mountpoint *new_mountpoint(struct dentry *dentry)
mp->m_dentry = dentry;
mp->m_count = 1;
hlist_add_head(&mp->m_hash, chain);
+ INIT_HLIST_HEAD(&mp->m_list);
return mp;
}
@@ -702,6 +747,7 @@ static void put_mountpoint(struct mountpoint *mp)
{
if (!--mp->m_count) {
struct dentry *dentry = mp->m_dentry;
+ BUG_ON(!hlist_empty(&mp->m_list));
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_MOUNTED;
spin_unlock(&dentry->d_lock);
@@ -748,6 +794,7 @@ static void detach_mnt(struct mount *mnt, struct path *old_path)
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
list_del_init(&mnt->mnt_child);
hlist_del_init_rcu(&mnt->mnt_hash);
+ hlist_del_init(&mnt->mnt_mp_list);
put_mountpoint(mnt->mnt_mp);
mnt->mnt_mp = NULL;
}
@@ -764,6 +811,7 @@ void mnt_set_mountpoint(struct mount *mnt,
child_mnt->mnt_mountpoint = dget(mp->m_dentry);
child_mnt->mnt_parent = mnt;
child_mnt->mnt_mp = mp;
+ hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
}
/*
@@ -957,6 +1005,46 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
return ERR_PTR(err);
}
+static void cleanup_mnt(struct mount *mnt)
+{
+ /*
+ * This probably indicates that somebody messed
+ * up a mnt_want/drop_write() pair. If this
+ * happens, the filesystem was probably unable
+ * to make r/w->r/o transitions.
+ */
+ /*
+ * The locking used to deal with mnt_count decrement provides barriers,
+ * so mnt_get_writers() below is safe.
+ */
+ WARN_ON(mnt_get_writers(mnt));
+ if (unlikely(mnt->mnt_pins.first))
+ mnt_pin_kill(mnt);
+ fsnotify_vfsmount_delete(&mnt->mnt);
+ dput(mnt->mnt.mnt_root);
+ deactivate_super(mnt->mnt.mnt_sb);
+ mnt_free_id(mnt);
+ call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
+}
+
+static void __cleanup_mnt(struct rcu_head *head)
+{
+ cleanup_mnt(container_of(head, struct mount, mnt_rcu));
+}
+
+static LLIST_HEAD(delayed_mntput_list);
+static void delayed_mntput(struct work_struct *unused)
+{
+ struct llist_node *node = llist_del_all(&delayed_mntput_list);
+ struct llist_node *next;
+
+ for (; node; node = next) {
+ next = llist_next(node);
+ cleanup_mnt(llist_entry(node, struct mount, mnt_llist));
+ }
+}
+static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
+
static void mntput_no_expire(struct mount *mnt)
{
rcu_read_lock();
@@ -982,24 +1070,18 @@ static void mntput_no_expire(struct mount *mnt)
list_del(&mnt->mnt_instance);
unlock_mount_hash();
- /*
- * This probably indicates that somebody messed
- * up a mnt_want/drop_write() pair. If this
- * happens, the filesystem was probably unable
- * to make r/w->r/o transitions.
- */
- /*
- * The locking used to deal with mnt_count decrement provides barriers,
- * so mnt_get_writers() below is safe.
- */
- WARN_ON(mnt_get_writers(mnt));
- if (unlikely(mnt->mnt_pins.first))
- mnt_pin_kill(mnt);
- fsnotify_vfsmount_delete(&mnt->mnt);
- dput(mnt->mnt.mnt_root);
- deactivate_super(mnt->mnt.mnt_sb);
- mnt_free_id(mnt);
- call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
+ if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
+ struct task_struct *task = current;
+ if (likely(!(task->flags & PF_KTHREAD))) {
+ init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
+ if (!task_work_add(task, &mnt->mnt_rcu, true))
+ return;
+ }
+ if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
+ schedule_delayed_work(&delayed_mntput_work, 1);
+ return;
+ }
+ cleanup_mnt(mnt);
}
void mntput(struct vfsmount *mnt)
@@ -1272,6 +1354,7 @@ void umount_tree(struct mount *mnt, int how)
if (how < 2)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
if (mnt_has_parent(p)) {
+ hlist_del_init(&p->mnt_mp_list);
put_mountpoint(p->mnt_mp);
mnt_add_count(p->mnt_parent, -1);
/* move the reference to mountpoint into ->mnt_ex_mountpoint */
@@ -1385,6 +1468,37 @@ static int do_umount(struct mount *mnt, int flags)
return retval;
}
+/*
+ * __detach_mounts - lazily unmount all mounts on the specified dentry
+ *
+ * During unlink, rmdir, and d_drop it is possible to loose the path
+ * to an existing mountpoint, and wind up leaking the mount.
+ * detach_mounts allows lazily unmounting those mounts instead of
+ * leaking them.
+ *
+ * The caller may hold dentry->d_inode->i_mutex.
+ */
+void __detach_mounts(struct dentry *dentry)
+{
+ struct mountpoint *mp;
+ struct mount *mnt;
+
+ namespace_lock();
+ mp = lookup_mountpoint(dentry);
+ if (!mp)
+ goto out_unlock;
+
+ lock_mount_hash();
+ while (!hlist_empty(&mp->m_list)) {
+ mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
+ umount_tree(mnt, 2);
+ }
+ unlock_mount_hash();
+ put_mountpoint(mp);
+out_unlock:
+ namespace_unlock();
+}
+
/*
* Is the caller allowed to modify his namespace?
*/
@@ -1742,7 +1856,9 @@ retry:
namespace_lock();
mnt = lookup_mnt(path);
if (likely(!mnt)) {
- struct mountpoint *mp = new_mountpoint(dentry);
+ struct mountpoint *mp = lookup_mountpoint(dentry);
+ if (!mp)
+ mp = new_mountpoint(dentry);
if (IS_ERR(mp)) {
namespace_unlock();
mutex_unlock(&dentry->d_inode->i_mutex);
@@ -2398,21 +2514,9 @@ int copy_mount_options(const void __user * data, unsigned long *where)
return 0;
}
-int copy_mount_string(const void __user *data, char **where)
+char *copy_mount_string(const void __user *data)
{
- char *tmp;
-
- if (!data) {
- *where = NULL;
- return 0;
- }
-
- tmp = strndup_user(data, PAGE_SIZE);
- if (IS_ERR(tmp))
- return PTR_ERR(tmp);
-
- *where = tmp;
- return 0;
+ return data ? strndup_user(data, PAGE_SIZE) : NULL;
}
/*
@@ -2429,7 +2533,7 @@ int copy_mount_string(const void __user *data, char **where)
* Therefore, if this magic number is present, it carries no information
* and must be discarded.
*/
-long do_mount(const char *dev_name, const char *dir_name,
+long do_mount(const char *dev_name, const char __user *dir_name,
const char *type_page, unsigned long flags, void *data_page)
{
struct path path;
@@ -2441,15 +2545,11 @@ long do_mount(const char *dev_name, const char *dir_name,
flags &= ~MS_MGC_MSK;
/* Basic sanity checks */
-
- if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
- return -EINVAL;
-
if (data_page)
((char *)data_page)[PAGE_SIZE - 1] = 0;
/* ... and get the mountpoint */
- retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
+ retval = user_path(dir_name, &path);
if (retval)
return retval;
@@ -2674,37 +2774,30 @@ SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
{
int ret;
char *kernel_type;
- struct filename *kernel_dir;
char *kernel_dev;
unsigned long data_page;
- ret = copy_mount_string(type, &kernel_type);
- if (ret < 0)
+ kernel_type = copy_mount_string(type);
+ ret = PTR_ERR(kernel_type);
+ if (IS_ERR(kernel_type))
goto out_type;
- kernel_dir = getname(dir_name);
- if (IS_ERR(kernel_dir)) {
- ret = PTR_ERR(kernel_dir);
- goto out_dir;
- }
-
- ret = copy_mount_string(dev_name, &kernel_dev);
- if (ret < 0)
+ kernel_dev = copy_mount_string(dev_name);
+ ret = PTR_ERR(kernel_dev);
+ if (IS_ERR(kernel_dev))
goto out_dev;
ret = copy_mount_options(data, &data_page);
if (ret < 0)
goto out_data;
- ret = do_mount(kernel_dev, kernel_dir->name, kernel_type, flags,
+ ret = do_mount(kernel_dev, dir_name, kernel_type, flags,
(void *) data_page);
free_page(data_page);
out_data:
kfree(kernel_dev);
out_dev:
- putname(kernel_dir);
-out_dir:
kfree(kernel_type);
out_type:
return ret;
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 08b8ea8c353e..314e7add99b8 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -388,7 +388,6 @@ static struct dentry *
ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
{
struct dentry *dent = dentry;
- struct list_head *next;
if (d_validate(dent, parent)) {
if (dent->d_name.len <= NCP_MAXPATHLEN &&
@@ -404,9 +403,7 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
/* If a pointer is invalid, we search the dentry. */
spin_lock(&parent->d_lock);
- next = parent->d_subdirs.next;
- while (next != &parent->d_subdirs) {
- dent = list_entry(next, struct dentry, d_u.d_child);
+ list_for_each_entry(dent, &parent->d_subdirs, d_u.d_child) {
if ((unsigned long)dent->d_fsdata == fpos) {
if (dent->d_inode)
dget(dent);
@@ -415,7 +412,6 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
spin_unlock(&parent->d_lock);
goto out;
}
- next = next->next;
}
spin_unlock(&parent->d_lock);
return NULL;
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 32c06587351a..52cb19d66ecb 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -188,20 +188,14 @@ static inline void
ncp_renew_dentries(struct dentry *parent)
{
struct ncp_server *server = NCP_SERVER(parent->d_inode);
- struct list_head *next;
struct dentry *dentry;
spin_lock(&parent->d_lock);
- next = parent->d_subdirs.next;
- while (next != &parent->d_subdirs) {
- dentry = list_entry(next, struct dentry, d_u.d_child);
-
+ list_for_each_entry(dentry, &parent->d_subdirs, d_u.d_child) {
if (dentry->d_fsdata == NULL)
ncp_age_dentry(server, dentry);
else
ncp_new_dentry(dentry);
-
- next = next->next;
}
spin_unlock(&parent->d_lock);
}
@@ -210,16 +204,12 @@ static inline void
ncp_invalidate_dircache_entries(struct dentry *parent)
{
struct ncp_server *server = NCP_SERVER(parent->d_inode);
- struct list_head *next;
struct dentry *dentry;
spin_lock(&parent->d_lock);
- next = parent->d_subdirs.next;
- while (next != &parent->d_subdirs) {
- dentry = list_entry(next, struct dentry, d_u.d_child);
+ list_for_each_entry(dentry, &parent->d_subdirs, d_u.d_child) {
dentry->d_fsdata = NULL;
ncp_age_dentry(server, dentry);
- next = next->next;
}
spin_unlock(&parent->d_lock);
}
diff --git a/fs/nfs/blocklayout/rpc_pipefs.c b/fs/nfs/blocklayout/rpc_pipefs.c
index 8d04bda2bd2e..e966c023b1b7 100644
--- a/fs/nfs/blocklayout/rpc_pipefs.c
+++ b/fs/nfs/blocklayout/rpc_pipefs.c
@@ -92,7 +92,6 @@ bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b,
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
- __set_current_state(TASK_RUNNING);
remove_wait_queue(&nn->bl_wq, &wq);
if (reply->status != BL_DEVICE_REQUEST_PROC) {
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 36d921f0c602..06e8cfcbb670 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -486,8 +486,7 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
nfs_setsecurity(dentry->d_inode, entry->fattr, entry->label);
goto out;
} else {
- if (d_invalidate(dentry) != 0)
- goto out;
+ d_invalidate(dentry);
dput(dentry);
}
}
@@ -1211,10 +1210,6 @@ out_zap_parent:
if (IS_ROOT(dentry))
goto out_valid;
}
- /* If we have submounts, don't unhash ! */
- if (check_submounts_and_drop(dentry) != 0)
- goto out_valid;
-
dput(parent);
dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
__func__, dentry);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 6920127c5eb7..4ea92ce0537f 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -919,17 +919,6 @@ int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
}
EXPORT_SYMBOL_GPL(nfs_flock);
-/*
- * There is no protocol support for leases, so we have no way to implement
- * them correctly in the face of opens by other clients.
- */
-int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
-{
- dprintk("NFS: setlease(%pD2, arg=%ld)\n", file, arg);
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(nfs_setlease);
-
const struct file_operations nfs_file_operations = {
.llseek = nfs_file_llseek,
.read = new_sync_read,
@@ -946,6 +935,6 @@ const struct file_operations nfs_file_operations = {
.splice_read = nfs_file_splice_read,
.splice_write = iter_file_splice_write,
.check_flags = nfs_check_flags,
- .setlease = nfs_setlease,
+ .setlease = simple_nosetlease,
};
EXPORT_SYMBOL_GPL(nfs_file_operations);
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 7dd55b745c4d..2f5db844c172 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -177,7 +177,6 @@ static struct key_type key_type_id_resolver = {
.preparse = user_preparse,
.free_preparse = user_free_preparse,
.instantiate = generic_key_instantiate,
- .match = user_match,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = user_describe,
@@ -401,7 +400,6 @@ static struct key_type key_type_id_resolver_legacy = {
.preparse = user_preparse,
.free_preparse = user_free_preparse,
.instantiate = generic_key_instantiate,
- .match = user_match,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = user_describe,
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 14ae6f20a172..efaa31c70fbe 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -339,7 +339,6 @@ int nfs_file_release(struct inode *, struct file *);
int nfs_lock(struct file *, int, struct file_lock *);
int nfs_flock(struct file *, int, struct file_lock *);
int nfs_check_flags(int);
-int nfs_setlease(struct file *, long, struct file_lock **);
/* inode.c */
extern struct workqueue_struct *nfsiod_workqueue;
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index a816f0627a6c..3e987ad9ae25 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -131,5 +131,5 @@ const struct file_operations nfs4_file_operations = {
.splice_read = nfs_file_splice_read,
.splice_write = iter_file_splice_write,
.check_flags = nfs_check_flags,
- .setlease = nfs_setlease,
+ .setlease = simple_nosetlease,
};
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index ea95a2bc21b5..a25490ae6c62 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -675,7 +675,6 @@ __cld_pipe_upcall(struct rpc_pipe *pipe, struct cld_msg *cmsg)
}
schedule();
- set_current_state(TASK_RUNNING);
if (msg.errno < 0)
ret = msg.errno;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 5c0cac173068..e9c3afe4b5d3 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -218,6 +218,13 @@ static void nfsd4_put_session(struct nfsd4_session *ses)
spin_unlock(&nn->client_lock);
}
+static inline struct nfs4_stateowner *
+nfs4_get_stateowner(struct nfs4_stateowner *sop)
+{
+ atomic_inc(&sop->so_count);
+ return sop;
+}
+
static int
same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
{
@@ -237,10 +244,8 @@ find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
so_strhash) {
if (!so->so_is_open_owner)
continue;
- if (same_owner_str(so, &open->op_owner)) {
- atomic_inc(&so->so_count);
- return openowner(so);
- }
+ if (same_owner_str(so, &open->op_owner))
+ return openowner(nfs4_get_stateowner(so));
}
return NULL;
}
@@ -678,18 +683,14 @@ nfs4_put_stid(struct nfs4_stid *s)
static void nfs4_put_deleg_lease(struct nfs4_file *fp)
{
struct file *filp = NULL;
- struct file_lock *fl;
spin_lock(&fp->fi_lock);
- if (fp->fi_lease && atomic_dec_and_test(&fp->fi_delegees)) {
+ if (fp->fi_deleg_file && atomic_dec_and_test(&fp->fi_delegees))
swap(filp, fp->fi_deleg_file);
- fl = fp->fi_lease;
- fp->fi_lease = NULL;
- }
spin_unlock(&fp->fi_lock);
if (filp) {
- vfs_setlease(filp, F_UNLCK, &fl);
+ vfs_setlease(filp, F_UNLCK, NULL, NULL);
fput(filp);
}
}
@@ -1655,7 +1656,7 @@ __destroy_client(struct nfs4_client *clp)
}
while (!list_empty(&clp->cl_openowners)) {
oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
- atomic_inc(&oo->oo_owner.so_count);
+ nfs4_get_stateowner(&oo->oo_owner);
release_openowner(oo);
}
nfsd4_shutdown_callback(clp);
@@ -3067,8 +3068,8 @@ static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh)
INIT_LIST_HEAD(&fp->fi_stateids);
INIT_LIST_HEAD(&fp->fi_delegations);
fh_copy_shallow(&fp->fi_fhandle, fh);
+ fp->fi_deleg_file = NULL;
fp->fi_had_conflict = false;
- fp->fi_lease = NULL;
fp->fi_share_deny = 0;
memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
memset(fp->fi_access, 0, sizeof(fp->fi_access));
@@ -3136,8 +3137,7 @@ static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
{
if (!nfsd4_has_session(cstate)) {
mutex_lock(&so->so_replay.rp_mutex);
- cstate->replay_owner = so;
- atomic_inc(&so->so_count);
+ cstate->replay_owner = nfs4_get_stateowner(so);
}
}
@@ -3236,8 +3236,7 @@ static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
atomic_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_OPEN_STID;
INIT_LIST_HEAD(&stp->st_locks);
- stp->st_stateowner = &oo->oo_owner;
- atomic_inc(&stp->st_stateowner->so_count);
+ stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
get_nfs4_file(fp);
stp->st_stid.sc_file = fp;
stp->st_access_bmap = 0;
@@ -3434,18 +3433,20 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
}
/* Called from break_lease() with i_lock held. */
-static void nfsd_break_deleg_cb(struct file_lock *fl)
+static bool
+nfsd_break_deleg_cb(struct file_lock *fl)
{
+ bool ret = false;
struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
struct nfs4_delegation *dp;
if (!fp) {
WARN(1, "(%p)->fl_owner NULL\n", fl);
- return;
+ return ret;
}
if (fp->fi_had_conflict) {
WARN(1, "duplicate break on %p\n", fp);
- return;
+ return ret;
}
/*
* We don't want the locks code to timeout the lease for us;
@@ -3457,24 +3458,23 @@ static void nfsd_break_deleg_cb(struct file_lock *fl)
spin_lock(&fp->fi_lock);
fp->fi_had_conflict = true;
/*
- * If there are no delegations on the list, then we can't count on this
- * lease ever being cleaned up. Set the fl_break_time to jiffies so that
- * time_out_leases will do it ASAP. The fact that fi_had_conflict is now
- * true should keep any new delegations from being hashed.
+ * If there are no delegations on the list, then return true
+ * so that the lease code will go ahead and delete it.
*/
if (list_empty(&fp->fi_delegations))
- fl->fl_break_time = jiffies;
+ ret = true;
else
list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
nfsd_break_one_deleg(dp);
spin_unlock(&fp->fi_lock);
+ return ret;
}
-static
-int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
+static int
+nfsd_change_deleg_cb(struct file_lock **onlist, int arg, struct list_head *dispose)
{
if (arg & F_UNLCK)
- return lease_modify(onlist, arg);
+ return lease_modify(onlist, arg, dispose);
else
return -EAGAIN;
}
@@ -3820,7 +3820,7 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
static int nfs4_setlease(struct nfs4_delegation *dp)
{
struct nfs4_file *fp = dp->dl_stid.sc_file;
- struct file_lock *fl;
+ struct file_lock *fl, *ret;
struct file *filp;
int status = 0;
@@ -3834,11 +3834,12 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
return -EBADF;
}
fl->fl_file = filp;
- status = vfs_setlease(filp, fl->fl_type, &fl);
- if (status) {
+ ret = fl;
+ status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
+ if (fl)
locks_free_lock(fl);
+ if (status)
goto out_fput;
- }
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
/* Did the lease get broken before we took the lock? */
@@ -3846,13 +3847,12 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
if (fp->fi_had_conflict)
goto out_unlock;
/* Race breaker */
- if (fp->fi_lease) {
+ if (fp->fi_deleg_file) {
status = 0;
atomic_inc(&fp->fi_delegees);
hash_delegation_locked(dp, fp);
goto out_unlock;
}
- fp->fi_lease = fl;
fp->fi_deleg_file = filp;
atomic_set(&fp->fi_delegees, 1);
hash_delegation_locked(dp, fp);
@@ -3885,7 +3885,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
dp->dl_stid.sc_file = fp;
- if (!fp->fi_lease) {
+ if (!fp->fi_deleg_file) {
spin_unlock(&fp->fi_lock);
spin_unlock(&state_lock);
status = nfs4_setlease(dp);
@@ -4929,9 +4929,25 @@ nfs4_transform_lock_offset(struct file_lock *lock)
lock->fl_end = OFFSET_MAX;
}
-/* Hack!: For now, we're defining this just so we can use a pointer to it
- * as a unique cookie to identify our (NFSv4's) posix locks. */
+static void nfsd4_fl_get_owner(struct file_lock *dst, struct file_lock *src)
+{
+ struct nfs4_lockowner *lo = (struct nfs4_lockowner *)src->fl_owner;
+ dst->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lo->lo_owner));
+}
+
+static void nfsd4_fl_put_owner(struct file_lock *fl)
+{
+ struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
+
+ if (lo) {
+ nfs4_put_stateowner(&lo->lo_owner);
+ fl->fl_owner = NULL;
+ }
+}
+
static const struct lock_manager_operations nfsd_posix_mng_ops = {
+ .lm_get_owner = nfsd4_fl_get_owner,
+ .lm_put_owner = nfsd4_fl_put_owner,
};
static inline void
@@ -4977,10 +4993,8 @@ find_lockowner_str_locked(clientid_t *clid, struct xdr_netobj *owner,
so_strhash) {
if (so->so_is_open_owner)
continue;
- if (!same_owner_str(so, owner))
- continue;
- atomic_inc(&so->so_count);
- return lockowner(so);
+ if (same_owner_str(so, owner))
+ return lockowner(nfs4_get_stateowner(so));
}
return NULL;
}
@@ -5059,8 +5073,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
atomic_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_LOCK_STID;
- stp->st_stateowner = &lo->lo_owner;
- atomic_inc(&lo->lo_owner.so_count);
+ stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
get_nfs4_file(fp);
stp->st_stid.sc_file = fp;
stp->st_stid.sc_free = nfs4_free_lock_stateid;
@@ -5299,7 +5312,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfserr_openmode;
goto out;
}
- file_lock->fl_owner = (fl_owner_t)lock_sop;
+
+ file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
file_lock->fl_pid = current->tgid;
file_lock->fl_file = filp;
file_lock->fl_flags = FL_POSIX;
@@ -5495,7 +5509,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
}
file_lock->fl_type = F_UNLCK;
- file_lock->fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
+ file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
file_lock->fl_pid = current->tgid;
file_lock->fl_file = filp;
file_lock->fl_flags = FL_POSIX;
@@ -5602,7 +5616,7 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
}
}
- atomic_inc(&sop->so_count);
+ nfs4_get_stateowner(sop);
break;
}
spin_unlock(&clp->cl_lock);
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 0a47c6a6b301..2712042a66b1 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -486,7 +486,6 @@ struct nfs4_file {
atomic_t fi_access[2];
u32 fi_share_deny;
struct file *fi_deleg_file;
- struct file_lock *fi_lease;
atomic_t fi_delegees;
struct knfsd_fh fi_fhandle;
bool fi_had_conflict;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 965cffd17a0c..989129e2d6ea 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -721,7 +721,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
goto out_nfserr;
}
- host_err = ima_file_check(file, may_flags);
+ host_err = ima_file_check(file, may_flags, 0);
if (host_err) {
nfsd_close(file);
goto out_nfserr;
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index abc8cbcfe90e..caaaf9dfe353 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -346,13 +346,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
goto out;
}
- error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
- if (error) {
- /* if we added, we must shoot */
- if (dn_mark == new_dn_mark)
- destroy = 1;
- goto out;
- }
+ __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
error = attach_dn(dn, dn_mark, id, fd, filp, mask);
/* !error means that we attached the dn to the dn_mark, so don't free it */
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 682732f3f0d8..324dc93ac896 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1252,7 +1252,7 @@ bail:
brelse(bh);
/* Release quota pointers in case we acquired them */
- for (qtype = 0; qtype < MAXQUOTAS; qtype++)
+ for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
dqput(transfer_to[qtype]);
if (!status && attr->ia_valid & ATTR_MODE) {
diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h
index f266d67df3c6..1eae330193a6 100644
--- a/fs/ocfs2/quota.h
+++ b/fs/ocfs2/quota.h
@@ -17,6 +17,9 @@
#include "ocfs2.h"
+/* Number of quota types we support */
+#define OCFS2_MAXQUOTAS 2
+
/*
* In-memory structures
*/
@@ -39,7 +42,7 @@ struct ocfs2_recovery_chunk {
};
struct ocfs2_quota_recovery {
- struct list_head r_list[MAXQUOTAS]; /* List of chunks to recover */
+ struct list_head r_list[OCFS2_MAXQUOTAS]; /* List of chunks to recover */
};
/* In-memory structure with quota header information */
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index b990a62cff50..c93d67220887 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -336,8 +336,8 @@ void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
int ocfs2_global_read_info(struct super_block *sb, int type)
{
struct inode *gqinode = NULL;
- unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
- GROUP_QUOTA_SYSTEM_INODE };
+ unsigned int ino[OCFS2_MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
+ GROUP_QUOTA_SYSTEM_INODE };
struct ocfs2_global_disk_dqinfo dinfo;
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 2001862bf2b1..10b653930ee2 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -166,12 +166,12 @@ static int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
/* Check whether we understand format of quota files */
static int ocfs2_local_check_quota_file(struct super_block *sb, int type)
{
- unsigned int lmagics[MAXQUOTAS] = OCFS2_LOCAL_QMAGICS;
- unsigned int lversions[MAXQUOTAS] = OCFS2_LOCAL_QVERSIONS;
- unsigned int gmagics[MAXQUOTAS] = OCFS2_GLOBAL_QMAGICS;
- unsigned int gversions[MAXQUOTAS] = OCFS2_GLOBAL_QVERSIONS;
- unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
- GROUP_QUOTA_SYSTEM_INODE };
+ unsigned int lmagics[OCFS2_MAXQUOTAS] = OCFS2_LOCAL_QMAGICS;
+ unsigned int lversions[OCFS2_MAXQUOTAS] = OCFS2_LOCAL_QVERSIONS;
+ unsigned int gmagics[OCFS2_MAXQUOTAS] = OCFS2_GLOBAL_QMAGICS;
+ unsigned int gversions[OCFS2_MAXQUOTAS] = OCFS2_GLOBAL_QVERSIONS;
+ unsigned int ino[OCFS2_MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
+ GROUP_QUOTA_SYSTEM_INODE };
struct buffer_head *bh = NULL;
struct inode *linode = sb_dqopt(sb)->files[type];
struct inode *ginode = NULL;
@@ -336,7 +336,7 @@ void ocfs2_free_quota_recovery(struct ocfs2_quota_recovery *rec)
{
int type;
- for (type = 0; type < MAXQUOTAS; type++)
+ for (type = 0; type < OCFS2_MAXQUOTAS; type++)
free_recovery_list(&(rec->r_list[type]));
kfree(rec);
}
@@ -382,7 +382,7 @@ static struct ocfs2_quota_recovery *ocfs2_alloc_quota_recovery(void)
rec = kmalloc(sizeof(struct ocfs2_quota_recovery), GFP_NOFS);
if (!rec)
return NULL;
- for (type = 0; type < MAXQUOTAS; type++)
+ for (type = 0; type < OCFS2_MAXQUOTAS; type++)
INIT_LIST_HEAD(&(rec->r_list[type]));
return rec;
}
@@ -392,10 +392,11 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery(
struct ocfs2_super *osb,
int slot_num)
{
- unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
- OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
- unsigned int ino[MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE,
- LOCAL_GROUP_QUOTA_SYSTEM_INODE };
+ unsigned int feature[OCFS2_MAXQUOTAS] = {
+ OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
+ OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
+ unsigned int ino[OCFS2_MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE,
+ LOCAL_GROUP_QUOTA_SYSTEM_INODE };
struct super_block *sb = osb->sb;
struct ocfs2_local_disk_dqinfo *ldinfo;
struct inode *lqinode;
@@ -412,7 +413,7 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery(
return ERR_PTR(-ENOMEM);
/* First init... */
- for (type = 0; type < MAXQUOTAS; type++) {
+ for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
continue;
/* At this point, journal of the slot is already replayed so
@@ -589,8 +590,8 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
struct ocfs2_quota_recovery *rec,
int slot_num)
{
- unsigned int ino[MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE,
- LOCAL_GROUP_QUOTA_SYSTEM_INODE };
+ unsigned int ino[OCFS2_MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE,
+ LOCAL_GROUP_QUOTA_SYSTEM_INODE };
struct super_block *sb = osb->sb;
struct ocfs2_local_disk_dqinfo *ldinfo;
struct buffer_head *bh;
@@ -604,7 +605,7 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
"slot %u\n", osb->dev_str, slot_num);
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
- for (type = 0; type < MAXQUOTAS; type++) {
+ for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
if (list_empty(&(rec->r_list[type])))
continue;
trace_ocfs2_finish_quota_recovery(slot_num);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 4142546aedae..93c85bc745e1 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -899,11 +899,12 @@ static int ocfs2_susp_quotas(struct ocfs2_super *osb, int unsuspend)
{
int type;
struct super_block *sb = osb->sb;
- unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
- OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
+ unsigned int feature[OCFS2_MAXQUOTAS] = {
+ OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
+ OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
int status = 0;
- for (type = 0; type < MAXQUOTAS; type++) {
+ for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
continue;
if (unsuspend)
@@ -927,17 +928,19 @@ static int ocfs2_susp_quotas(struct ocfs2_super *osb, int unsuspend)
static int ocfs2_enable_quotas(struct ocfs2_super *osb)
{
- struct inode *inode[MAXQUOTAS] = { NULL, NULL };
+ struct inode *inode[OCFS2_MAXQUOTAS] = { NULL, NULL };
struct super_block *sb = osb->sb;
- unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
- OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
- unsigned int ino[MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE,
+ unsigned int feature[OCFS2_MAXQUOTAS] = {
+ OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
+ OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
+ unsigned int ino[OCFS2_MAXQUOTAS] = {
+ LOCAL_USER_QUOTA_SYSTEM_INODE,
LOCAL_GROUP_QUOTA_SYSTEM_INODE };
int status;
int type;
sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NEGATIVE_USAGE;
- for (type = 0; type < MAXQUOTAS; type++) {
+ for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
continue;
inode[type] = ocfs2_get_system_file_inode(osb, ino[type],
@@ -952,12 +955,12 @@ static int ocfs2_enable_quotas(struct ocfs2_super *osb)
goto out_quota_off;
}
- for (type = 0; type < MAXQUOTAS; type++)
+ for (type = 0; type < OCFS2_MAXQUOTAS; type++)
iput(inode[type]);
return 0;
out_quota_off:
ocfs2_disable_quotas(osb);
- for (type = 0; type < MAXQUOTAS; type++)
+ for (type = 0; type < OCFS2_MAXQUOTAS; type++)
iput(inode[type]);
mlog_errno(status);
return status;
@@ -972,7 +975,7 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
/* We mostly ignore errors in this function because there's not much
* we can do when we see them */
- for (type = 0; type < MAXQUOTAS; type++) {
+ for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
if (!sb_has_quota_loaded(sb, type))
continue;
/* Cancel periodic syncing before we grab dqonoff_mutex */
@@ -993,8 +996,9 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
/* Handle quota on quotactl */
static int ocfs2_quota_on(struct super_block *sb, int type, int format_id)
{
- unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
- OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
+ unsigned int feature[OCFS2_MAXQUOTAS] = {
+ OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
+ OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
return -EINVAL;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 950100e326a1..772efa45a452 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1565,7 +1565,6 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
put_task_struct(task);
return 1;
}
- d_drop(dentry);
return 0;
}
@@ -1702,9 +1701,6 @@ out:
put_task_struct(task);
out_notask:
- if (status <= 0)
- d_drop(dentry);
-
return status;
}
@@ -2618,8 +2614,7 @@ static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
/* no ->d_hash() rejects on procfs */
dentry = d_hash_and_lookup(mnt->mnt_root, &name);
if (dentry) {
- shrink_dcache_parent(dentry);
- d_drop(dentry);
+ d_invalidate(dentry);
dput(dentry);
}
@@ -2639,8 +2634,7 @@ static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
name.len = snprintf(buf, sizeof(buf), "%d", pid);
dentry = d_hash_and_lookup(dir, &name);
if (dentry) {
- shrink_dcache_parent(dentry);
- d_drop(dentry);
+ d_invalidate(dentry);
dput(dentry);
}
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 955bb55fab8c..e11d7c590bb0 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -129,8 +129,6 @@ static int tid_fd_revalidate(struct dentry *dentry, unsigned int flags)
}
put_task_struct(task);
}
-
- d_drop(dentry);
return 0;
}
diff --git a/fs/read_write.c b/fs/read_write.c
index 009d8542a889..7d9318c3d43c 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -513,6 +513,8 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
return ret;
}
+EXPORT_SYMBOL(__kernel_write);
+
ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
{
ssize_t ret;
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 735c2c2b4536..1894d96ccb7c 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -506,6 +506,9 @@ typedef struct reiserfs_proc_info_data {
} reiserfs_proc_info_data_t;
#endif
+/* Number of quota types we support */
+#define REISERFS_MAXQUOTAS 2
+
/* reiserfs union of in-core super block data */
struct reiserfs_sb_info {
/* Buffer containing the super block */
@@ -615,7 +618,7 @@ struct reiserfs_sb_info {
spinlock_t old_work_lock; /* protects old_work and work_queued */
#ifdef CONFIG_QUOTA
- char *s_qf_names[MAXQUOTAS];
+ char *s_qf_names[REISERFS_MAXQUOTAS];
int s_jquota_fmt;
#endif
char *s_jdev; /* Stored jdev for mount option showing */
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index d46e88a33b02..f1376c92cf74 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -206,7 +206,7 @@ static int finish_unfinished(struct super_block *s)
#ifdef CONFIG_QUOTA
int i;
int ms_active_set;
- int quota_enabled[MAXQUOTAS];
+ int quota_enabled[REISERFS_MAXQUOTAS];
#endif
/* compose key to look for "save" links */
@@ -227,7 +227,7 @@ static int finish_unfinished(struct super_block *s)
s->s_flags |= MS_ACTIVE;
}
/* Turn on quotas so that they are updated correctly */
- for (i = 0; i < MAXQUOTAS; i++) {
+ for (i = 0; i < REISERFS_MAXQUOTAS; i++) {
quota_enabled[i] = 1;
if (REISERFS_SB(s)->s_qf_names[i]) {
int ret;
@@ -370,7 +370,7 @@ static int finish_unfinished(struct super_block *s)
#ifdef CONFIG_QUOTA
/* Turn quotas off */
reiserfs_write_unlock(s);
- for (i = 0; i < MAXQUOTAS; i++) {
+ for (i = 0; i < REISERFS_MAXQUOTAS; i++) {
if (sb_dqopt(s)->files[i] && quota_enabled[i])
dquot_quota_off(s, i);
}
@@ -1360,7 +1360,7 @@ static void handle_quota_files(struct super_block *s, char **qf_names,
{
int i;
- for (i = 0; i < MAXQUOTAS; i++) {
+ for (i = 0; i < REISERFS_MAXQUOTAS; i++) {
if (qf_names[i] != REISERFS_SB(s)->s_qf_names[i])
kfree(REISERFS_SB(s)->s_qf_names[i]);
REISERFS_SB(s)->s_qf_names[i] = qf_names[i];
@@ -1381,7 +1381,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
struct reiserfs_journal *journal = SB_JOURNAL(s);
char *new_opts = kstrdup(arg, GFP_KERNEL);
int err;
- char *qf_names[MAXQUOTAS];
+ char *qf_names[REISERFS_MAXQUOTAS];
unsigned int qfmt = 0;
#ifdef CONFIG_QUOTA
int i;
@@ -1400,7 +1400,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
(s, arg, &mount_options, &blocks, NULL, &commit_max_age,
qf_names, &qfmt)) {
#ifdef CONFIG_QUOTA
- for (i = 0; i < MAXQUOTAS; i++)
+ for (i = 0; i < REISERFS_MAXQUOTAS; i++)
if (qf_names[i] != REISERFS_SB(s)->s_qf_names[i])
kfree(qf_names[i]);
#endif
@@ -1844,7 +1844,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
char *jdev_name;
struct reiserfs_sb_info *sbi;
int errval = -EINVAL;
- char *qf_names[MAXQUOTAS] = {};
+ char *qf_names[REISERFS_MAXQUOTAS] = {};
unsigned int qfmt = 0;
save_mount_options(s, data);
@@ -2169,7 +2169,7 @@ error_unlocked:
#ifdef CONFIG_QUOTA
{
int j;
- for (j = 0; j < MAXQUOTAS; j++)
+ for (j = 0; j < REISERFS_MAXQUOTAS; j++)
kfree(qf_names[j]);
}
#endif
diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h
index 857ec7e3016f..f620e9678dd5 100644
--- a/fs/reiserfs/xattr.h
+++ b/fs/reiserfs/xattr.h
@@ -7,7 +7,6 @@ struct inode;
struct dentry;
struct iattr;
struct super_block;
-struct nameidata;
int reiserfs_xattr_register_handlers(void) __init;
void reiserfs_xattr_unregister_handlers(void);
diff --git a/fs/super.c b/fs/super.c
index 1b836107acee..eae088f6aaae 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -80,6 +80,8 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
total_objects = dentries + inodes + fs_objects + 1;
+ if (!total_objects)
+ total_objects = 1;
/* proportion the scan between the caches */
dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 86c6743ec1fe..bb15771b92ae 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -223,11 +223,18 @@ out:
static int udf_release_file(struct inode *inode, struct file *filp)
{
- if (filp->f_mode & FMODE_WRITE) {
+ if (filp->f_mode & FMODE_WRITE &&
+ atomic_read(&inode->i_writecount) > 1) {
+ /*
+ * Grab i_mutex to avoid races with writes changing i_size
+ * while we are running.
+ */
+ mutex_lock(&inode->i_mutex);
down_write(&UDF_I(inode)->i_data_sem);
udf_discard_prealloc(inode);
udf_truncate_tail_extent(inode);
up_write(&UDF_I(inode)->i_data_sem);
+ mutex_unlock(&inode->i_mutex);
}
return 0;
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 08598843288f..c9b4df5810d5 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1277,7 +1277,7 @@ update_time:
*/
#define UDF_MAX_ICB_NESTING 1024
-static int udf_read_inode(struct inode *inode)
+static int udf_read_inode(struct inode *inode, bool hidden_inode)
{
struct buffer_head *bh = NULL;
struct fileEntry *fe;
@@ -1436,8 +1436,11 @@ reread:
link_count = le16_to_cpu(fe->fileLinkCount);
if (!link_count) {
- ret = -ESTALE;
- goto out;
+ if (!hidden_inode) {
+ ret = -ESTALE;
+ goto out;
+ }
+ link_count = 1;
}
set_nlink(inode, link_count);
@@ -1826,7 +1829,8 @@ out:
return err;
}
-struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino)
+struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino,
+ bool hidden_inode)
{
unsigned long block = udf_get_lb_pblock(sb, ino, 0);
struct inode *inode = iget_locked(sb, block);
@@ -1839,7 +1843,7 @@ struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino)
return inode;
memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
- err = udf_read_inode(inode);
+ err = udf_read_inode(inode, hidden_inode);
if (err < 0) {
iget_failed(inode);
return ERR_PTR(err);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 5401fc33f5cc..e229315bbf7a 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -959,7 +959,7 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
addr.logicalBlockNum = meta_file_loc;
addr.partitionReferenceNum = partition_num;
- metadata_fe = udf_iget(sb, &addr);
+ metadata_fe = udf_iget_special(sb, &addr);
if (IS_ERR(metadata_fe)) {
udf_warn(sb, "metadata inode efe not found\n");
@@ -1020,7 +1020,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
udf_debug("Bitmap file location: block = %d part = %d\n",
addr.logicalBlockNum, addr.partitionReferenceNum);
- fe = udf_iget(sb, &addr);
+ fe = udf_iget_special(sb, &addr);
if (IS_ERR(fe)) {
if (sb->s_flags & MS_RDONLY)
udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
@@ -1119,7 +1119,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
};
struct inode *inode;
- inode = udf_iget(sb, &loc);
+ inode = udf_iget_special(sb, &loc);
if (IS_ERR(inode)) {
udf_debug("cannot load unallocSpaceTable (part %d)\n",
p_index);
@@ -1154,7 +1154,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
};
struct inode *inode;
- inode = udf_iget(sb, &loc);
+ inode = udf_iget_special(sb, &loc);
if (IS_ERR(inode)) {
udf_debug("cannot load freedSpaceTable (part %d)\n",
p_index);
@@ -1198,7 +1198,7 @@ static void udf_find_vat_block(struct super_block *sb, int p_index,
vat_block >= map->s_partition_root &&
vat_block >= start_block - 3; vat_block--) {
ino.logicalBlockNum = vat_block - map->s_partition_root;
- inode = udf_iget(sb, &ino);
+ inode = udf_iget_special(sb, &ino);
if (!IS_ERR(inode)) {
sbi->s_vat_inode = inode;
break;
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 742557be9936..1cc3c993ebd0 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -138,7 +138,18 @@ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
/* file.c */
extern long udf_ioctl(struct file *, unsigned int, unsigned long);
/* inode.c */
-extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *);
+extern struct inode *__udf_iget(struct super_block *, struct kernel_lb_addr *,
+ bool hidden_inode);
+static inline struct inode *udf_iget_special(struct super_block *sb,
+ struct kernel_lb_addr *ino)
+{
+ return __udf_iget(sb, ino, true);
+}
+static inline struct inode *udf_iget(struct super_block *sb,
+ struct kernel_lb_addr *ino)
+{
+ return __udf_iget(sb, ino, false);
+}
extern int udf_expand_file_adinicb(struct inode *);
extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
index 1f11483eba6a..77c331f1a770 100644
--- a/fs/udf/udftime.c
+++ b/fs/udf/udftime.c
@@ -81,8 +81,6 @@ static time_t year_seconds[MAX_YEAR_SECONDS] = {
/*2038*/ SPY(68, 17, 0)
};
-extern struct timezone sys_tz;
-
#define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
diff --git a/fs/xattr.c b/fs/xattr.c
index c69e6d43a0d2..64e83efb742d 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -364,13 +364,12 @@ out:
return error;
}
-SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
- const char __user *, name, const void __user *, value,
- size_t, size, int, flags)
+static int path_setxattr(const char __user *pathname,
+ const char __user *name, const void __user *value,
+ size_t size, int flags, unsigned int lookup_flags)
{
struct path path;
int error;
- unsigned int lookup_flags = LOOKUP_FOLLOW;
retry:
error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
@@ -388,28 +387,18 @@ retry:
return error;
}
+SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
+ const char __user *, name, const void __user *, value,
+ size_t, size, int, flags)
+{
+ return path_setxattr(pathname, name, value, size, flags, LOOKUP_FOLLOW);
+}
+
SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
const char __user *, name, const void __user *, value,
size_t, size, int, flags)
{
- struct path path;
- int error;
- unsigned int lookup_flags = 0;
-retry:
- error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
- if (error)
- return error;
- error = mnt_want_write(path.mnt);
- if (!error) {
- error = setxattr(path.dentry, name, value, size, flags);
- mnt_drop_write(path.mnt);
- }
- path_put(&path);
- if (retry_estale(error, lookup_flags)) {
- lookup_flags |= LOOKUP_REVAL;
- goto retry;
- }
- return error;
+ return path_setxattr(pathname, name, value, size, flags, 0);
}
SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
@@ -481,12 +470,12 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
return error;
}
-SYSCALL_DEFINE4(getxattr, const char __user *, pathname,
- const char __user *, name, void __user *, value, size_t, size)
+static ssize_t path_getxattr(const char __user *pathname,
+ const char __user *name, void __user *value,
+ size_t size, unsigned int lookup_flags)
{
struct path path;
ssize_t error;
- unsigned int lookup_flags = LOOKUP_FOLLOW;
retry:
error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
@@ -500,23 +489,16 @@ retry:
return error;
}
+SYSCALL_DEFINE4(getxattr, const char __user *, pathname,
+ const char __user *, name, void __user *, value, size_t, size)
+{
+ return path_getxattr(pathname, name, value, size, LOOKUP_FOLLOW);
+}
+
SYSCALL_DEFINE4(lgetxattr, const char __user *, pathname,
const char __user *, name, void __user *, value, size_t, size)
{
- struct path path;
- ssize_t error;
- unsigned int lookup_flags = 0;
-retry:
- error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
- if (error)
- return error;
- error = getxattr(path.dentry, name, value, size);
- path_put(&path);
- if (retry_estale(error, lookup_flags)) {
- lookup_flags |= LOOKUP_REVAL;
- goto retry;
- }
- return error;
+ return path_getxattr(pathname, name, value, size, 0);
}
SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name,
@@ -571,12 +553,11 @@ listxattr(struct dentry *d, char __user *list, size_t size)
return error;
}
-SYSCALL_DEFINE3(listxattr, const char __user *, pathname, char __user *, list,
- size_t, size)
+static ssize_t path_listxattr(const char __user *pathname, char __user *list,
+ size_t size, unsigned int lookup_flags)
{
struct path path;
ssize_t error;
- unsigned int lookup_flags = LOOKUP_FOLLOW;
retry:
error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
@@ -590,23 +571,16 @@ retry:
return error;
}
+SYSCALL_DEFINE3(listxattr, const char __user *, pathname, char __user *, list,
+ size_t, size)
+{
+ return path_listxattr(pathname, list, size, LOOKUP_FOLLOW);
+}
+
SYSCALL_DEFINE3(llistxattr, const char __user *, pathname, char __user *, list,
size_t, size)
{
- struct path path;
- ssize_t error;
- unsigned int lookup_flags = 0;
-retry:
- error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
- if (error)
- return error;
- error = listxattr(path.dentry, list, size);
- path_put(&path);
- if (retry_estale(error, lookup_flags)) {
- lookup_flags |= LOOKUP_REVAL;
- goto retry;
- }
- return error;
+ return path_listxattr(pathname, list, size, 0);
}
SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
@@ -640,12 +614,11 @@ removexattr(struct dentry *d, const char __user *name)
return vfs_removexattr(d, kname);
}
-SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
- const char __user *, name)
+static int path_removexattr(const char __user *pathname,
+ const char __user *name, unsigned int lookup_flags)
{
struct path path;
int error;
- unsigned int lookup_flags = LOOKUP_FOLLOW;
retry:
error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
if (error)
@@ -663,27 +636,16 @@ retry:
return error;
}
+SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
+ const char __user *, name)
+{
+ return path_removexattr(pathname, name, LOOKUP_FOLLOW);
+}
+
SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
const char __user *, name)
{
- struct path path;
- int error;
- unsigned int lookup_flags = 0;
-retry:
- error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path);
- if (error)
- return error;
- error = mnt_want_write(path.mnt);
- if (!error) {
- error = removexattr(path.dentry, name);
- mnt_drop_write(path.mnt);
- }
- path_put(&path);
- if (retry_estale(error, lookup_flags)) {
- lookup_flags |= LOOKUP_REVAL;
- goto retry;
- }
- return error;
+ return path_removexattr(pathname, name, 0);
}
SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index 844e288b9576..53e95b2a1369 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -21,7 +21,6 @@
#include <linux/swap.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
-#include "time.h"
#include "kmem.h"
#include "xfs_message.h"
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 4bffffe038a1..eff34218f405 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -2209,6 +2209,10 @@ xfs_agf_verify(
be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
return false;
+ if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
+ return false;
+
/*
* during growfs operations, the perag is not fully initialised,
* so we can't use it for any useful checking. growfs ensures we can't
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 86df952d3e24..79c981984dca 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -5404,22 +5404,223 @@ error0:
}
/*
+ * Determine whether an extent shift can be accomplished by a merge with the
+ * extent that precedes the target hole of the shift.
+ */
+STATIC bool
+xfs_bmse_can_merge(
+ struct xfs_bmbt_irec *left, /* preceding extent */
+ struct xfs_bmbt_irec *got, /* current extent to shift */
+ xfs_fileoff_t shift) /* shift fsb */
+{
+ xfs_fileoff_t startoff;
+
+ startoff = got->br_startoff - shift;
+
+ /*
+ * The extent, once shifted, must be adjacent in-file and on-disk with
+ * the preceding extent.
+ */
+ if ((left->br_startoff + left->br_blockcount != startoff) ||
+ (left->br_startblock + left->br_blockcount != got->br_startblock) ||
+ (left->br_state != got->br_state) ||
+ (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
+ return false;
+
+ return true;
+}
+
+/*
+ * A bmap extent shift adjusts the file offset of an extent to fill a preceding
+ * hole in the file. If an extent shift would result in the extent being fully
+ * adjacent to the extent that currently precedes the hole, we can merge with
+ * the preceding extent rather than do the shift.
+ *
+ * This function assumes the caller has verified a shift-by-merge is possible
+ * with the provided extents via xfs_bmse_can_merge().
+ */
+STATIC int
+xfs_bmse_merge(
+ struct xfs_inode *ip,
+ int whichfork,
+ xfs_fileoff_t shift, /* shift fsb */
+ int current_ext, /* idx of gotp */
+ struct xfs_bmbt_rec_host *gotp, /* extent to shift */
+ struct xfs_bmbt_rec_host *leftp, /* preceding extent */
+ struct xfs_btree_cur *cur,
+ int *logflags) /* output */
+{
+ struct xfs_ifork *ifp;
+ struct xfs_bmbt_irec got;
+ struct xfs_bmbt_irec left;
+ xfs_filblks_t blockcount;
+ int error, i;
+
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+ xfs_bmbt_get_all(gotp, &got);
+ xfs_bmbt_get_all(leftp, &left);
+ blockcount = left.br_blockcount + got.br_blockcount;
+
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+ ASSERT(xfs_bmse_can_merge(&left, &got, shift));
+
+ /*
+ * Merge the in-core extents. Note that the host record pointers and
+ * current_ext index are invalid once the extent has been removed via
+ * xfs_iext_remove().
+ */
+ xfs_bmbt_set_blockcount(leftp, blockcount);
+ xfs_iext_remove(ip, current_ext, 1, 0);
+
+ /*
+ * Update the on-disk extent count, the btree if necessary and log the
+ * inode.
+ */
+ XFS_IFORK_NEXT_SET(ip, whichfork,
+ XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
+ *logflags |= XFS_ILOG_CORE;
+ if (!cur) {
+ *logflags |= XFS_ILOG_DEXT;
+ return 0;
+ }
+
+ /* lookup and remove the extent to merge */
+ error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
+ got.br_blockcount, &i);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(i == 1, out_error);
+
+ error = xfs_btree_delete(cur, &i);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(i == 1, out_error);
+
+ /* lookup and update size of the previous extent */
+ error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock,
+ left.br_blockcount, &i);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(i == 1, out_error);
+
+ left.br_blockcount = blockcount;
+
+ error = xfs_bmbt_update(cur, left.br_startoff, left.br_startblock,
+ left.br_blockcount, left.br_state);
+ if (error)
+ goto out_error;
+
+ return 0;
+
+out_error:
+ return error;
+}
+
+/*
+ * Shift a single extent.
+ */
+STATIC int
+xfs_bmse_shift_one(
+ struct xfs_inode *ip,
+ int whichfork,
+ xfs_fileoff_t offset_shift_fsb,
+ int *current_ext,
+ struct xfs_bmbt_rec_host *gotp,
+ struct xfs_btree_cur *cur,
+ int *logflags)
+{
+ struct xfs_ifork *ifp;
+ xfs_fileoff_t startoff;
+ struct xfs_bmbt_rec_host *leftp;
+ struct xfs_bmbt_irec got;
+ struct xfs_bmbt_irec left;
+ int error;
+ int i;
+
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+
+ xfs_bmbt_get_all(gotp, &got);
+ startoff = got.br_startoff - offset_shift_fsb;
+
+ /* delalloc extents should be prevented by caller */
+ XFS_WANT_CORRUPTED_GOTO(!isnullstartblock(got.br_startblock),
+ out_error);
+
+ /*
+ * If this is the first extent in the file, make sure there's enough
+ * room at the start of the file and jump right to the shift as there's
+ * no left extent to merge.
+ */
+ if (*current_ext == 0) {
+ if (got.br_startoff < offset_shift_fsb)
+ return -EINVAL;
+ goto shift_extent;
+ }
+
+ /* grab the left extent and check for a large enough hole */
+ leftp = xfs_iext_get_ext(ifp, *current_ext - 1);
+ xfs_bmbt_get_all(leftp, &left);
+
+ if (startoff < left.br_startoff + left.br_blockcount)
+ return -EINVAL;
+
+ /* check whether to merge the extent or shift it down */
+ if (!xfs_bmse_can_merge(&left, &got, offset_shift_fsb))
+ goto shift_extent;
+
+ return xfs_bmse_merge(ip, whichfork, offset_shift_fsb, *current_ext,
+ gotp, leftp, cur, logflags);
+
+shift_extent:
+ /*
+ * Increment the extent index for the next iteration, update the start
+ * offset of the in-core extent and update the btree if applicable.
+ */
+ (*current_ext)++;
+ xfs_bmbt_set_startoff(gotp, startoff);
+ *logflags |= XFS_ILOG_CORE;
+ if (!cur) {
+ *logflags |= XFS_ILOG_DEXT;
+ return 0;
+ }
+
+ error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
+ got.br_blockcount, &i);
+ if (error)
+ return error;
+ XFS_WANT_CORRUPTED_GOTO(i == 1, out_error);
+
+ got.br_startoff = startoff;
+ error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
+ got.br_blockcount, got.br_state);
+ if (error)
+ return error;
+
+ return 0;
+
+out_error:
+ return error;
+}
+
+/*
* Shift extent records to the left to cover a hole.
*
- * The maximum number of extents to be shifted in a single operation
- * is @num_exts, and @current_ext keeps track of the current extent
- * index we have shifted. @offset_shift_fsb is the length by which each
- * extent is shifted. If there is no hole to shift the extents
- * into, this will be considered invalid operation and we abort immediately.
+ * The maximum number of extents to be shifted in a single operation is
+ * @num_exts. @start_fsb specifies the file offset to start the shift and the
+ * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
+ * is the length by which each extent is shifted. If there is no hole to shift
+ * the extents into, this will be considered invalid operation and we abort
+ * immediately.
*/
int
xfs_bmap_shift_extents(
struct xfs_trans *tp,
struct xfs_inode *ip,
- int *done,
xfs_fileoff_t start_fsb,
xfs_fileoff_t offset_shift_fsb,
- xfs_extnum_t *current_ext,
+ int *done,
+ xfs_fileoff_t *next_fsb,
xfs_fsblock_t *firstblock,
struct xfs_bmap_free *flist,
int num_exts)
@@ -5427,16 +5628,13 @@ xfs_bmap_shift_extents(
struct xfs_btree_cur *cur = NULL;
struct xfs_bmbt_rec_host *gotp;
struct xfs_bmbt_irec got;
- struct xfs_bmbt_irec left;
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp;
xfs_extnum_t nexts = 0;
- xfs_fileoff_t startoff;
+ xfs_extnum_t current_ext;
int error = 0;
- int i;
int whichfork = XFS_DATA_FORK;
int logflags = 0;
- xfs_filblks_t blockcount = 0;
int total_extents;
if (unlikely(XFS_TEST_ERROR(
@@ -5451,7 +5649,8 @@ xfs_bmap_shift_extents(
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- ASSERT(current_ext != NULL);
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ifp = XFS_IFORK_PTR(ip, whichfork);
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
@@ -5461,23 +5660,6 @@ xfs_bmap_shift_extents(
return error;
}
- /*
- * If *current_ext is 0, we would need to lookup the extent
- * from where we would start shifting and store it in gotp.
- */
- if (!*current_ext) {
- gotp = xfs_iext_bno_to_ext(ifp, start_fsb, current_ext);
- /*
- * gotp can be null in 2 cases: 1) if there are no extents
- * or 2) start_fsb lies in a hole beyond which there are
- * no extents. Either way, we are done.
- */
- if (!gotp) {
- *done = 1;
- return 0;
- }
- }
-
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_private.b.firstblock = *firstblock;
@@ -5486,112 +5668,46 @@ xfs_bmap_shift_extents(
}
/*
+ * Look up the extent index for the fsb where we start shifting. We can
+ * henceforth iterate with current_ext as extent list changes are locked
+ * out via ilock.
+ *
+ * gotp can be null in 2 cases: 1) if there are no extents or 2)
+ * start_fsb lies in a hole beyond which there are no extents. Either
+ * way, we are done.
+ */
+ gotp = xfs_iext_bno_to_ext(ifp, start_fsb, &current_ext);
+ if (!gotp) {
+ *done = 1;
+ goto del_cursor;
+ }
+
+ /*
* There may be delalloc extents in the data fork before the range we
- * are collapsing out, so we cannot
- * use the count of real extents here. Instead we have to calculate it
- * from the incore fork.
+ * are collapsing out, so we cannot use the count of real extents here.
+ * Instead we have to calculate it from the incore fork.
*/
total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
- while (nexts++ < num_exts && *current_ext < total_extents) {
-
- gotp = xfs_iext_get_ext(ifp, *current_ext);
- xfs_bmbt_get_all(gotp, &got);
- startoff = got.br_startoff - offset_shift_fsb;
-
- /*
- * Before shifting extent into hole, make sure that the hole
- * is large enough to accomodate the shift.
- */
- if (*current_ext) {
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
- *current_ext - 1), &left);
-
- if (startoff < left.br_startoff + left.br_blockcount)
- error = -EINVAL;
- } else if (offset_shift_fsb > got.br_startoff) {
- /*
- * When first extent is shifted, offset_shift_fsb
- * should be less than the stating offset of
- * the first extent.
- */
- error = -EINVAL;
- }
-
+ while (nexts++ < num_exts && current_ext < total_extents) {
+ error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
+ &current_ext, gotp, cur, &logflags);
if (error)
goto del_cursor;
- if (cur) {
- error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
- got.br_startblock,
- got.br_blockcount,
- &i);
- if (error)
- goto del_cursor;
- XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor);
- }
-
- /* Check if we can merge 2 adjacent extents */
- if (*current_ext &&
- left.br_startoff + left.br_blockcount == startoff &&
- left.br_startblock + left.br_blockcount ==
- got.br_startblock &&
- left.br_state == got.br_state &&
- left.br_blockcount + got.br_blockcount <= MAXEXTLEN) {
- blockcount = left.br_blockcount +
- got.br_blockcount;
- xfs_iext_remove(ip, *current_ext, 1, 0);
- logflags |= XFS_ILOG_CORE;
- if (cur) {
- error = xfs_btree_delete(cur, &i);
- if (error)
- goto del_cursor;
- XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor);
- } else {
- logflags |= XFS_ILOG_DEXT;
- }
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
- gotp = xfs_iext_get_ext(ifp, --*current_ext);
- xfs_bmbt_get_all(gotp, &got);
-
- /* Make cursor point to the extent we will update */
- if (cur) {
- error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
- got.br_startblock,
- got.br_blockcount,
- &i);
- if (error)
- goto del_cursor;
- XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor);
- }
-
- xfs_bmbt_set_blockcount(gotp, blockcount);
- got.br_blockcount = blockcount;
- } else {
- /* We have to update the startoff */
- xfs_bmbt_set_startoff(gotp, startoff);
- got.br_startoff = startoff;
- }
-
- logflags |= XFS_ILOG_CORE;
- if (cur) {
- error = xfs_bmbt_update(cur, got.br_startoff,
- got.br_startblock,
- got.br_blockcount,
- got.br_state);
- if (error)
- goto del_cursor;
- } else {
- logflags |= XFS_ILOG_DEXT;
- }
-
- (*current_ext)++;
+ /* update total extent count and grab the next record */
total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
+ if (current_ext >= total_extents)
+ break;
+ gotp = xfs_iext_get_ext(ifp, current_ext);
}
/* Check if we are done */
- if (*current_ext == total_extents)
+ if (current_ext == total_extents) {
*done = 1;
+ } else if (next_fsb) {
+ xfs_bmbt_get_all(gotp, &got);
+ *next_fsb = got.br_startoff;
+ }
del_cursor:
if (cur)
@@ -5600,5 +5716,6 @@ del_cursor:
if (logflags)
xfs_trans_log_inode(tp, ip, logflags);
+
return error;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index b879ca56a64c..44db6db86402 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -178,9 +178,8 @@ int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
xfs_extnum_t num);
uint xfs_default_attroffset(struct xfs_inode *ip);
int xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip,
- int *done, xfs_fileoff_t start_fsb,
- xfs_fileoff_t offset_shift_fsb, xfs_extnum_t *current_ext,
- xfs_fsblock_t *firstblock, struct xfs_bmap_free *flist,
- int num_exts);
+ xfs_fileoff_t start_fsb, xfs_fileoff_t offset_shift_fsb,
+ int *done, xfs_fileoff_t *next_fsb, xfs_fsblock_t *firstblock,
+ struct xfs_bmap_free *flist, int num_exts);
#endif /* __XFS_BMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 2c42ae28d027..fd827530afec 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -2563,7 +2563,8 @@ xfs_da_get_buf(
mapp, nmap, 0);
error = bp ? bp->b_error : -EIO;
if (error) {
- xfs_trans_brelse(trans, bp);
+ if (bp)
+ xfs_trans_brelse(trans, bp);
goto out_free;
}
diff --git a/fs/xfs/libxfs/xfs_da_format.c b/fs/xfs/libxfs/xfs_da_format.c
index c9aee52a37e2..7e42fdfd2f1d 100644
--- a/fs/xfs/libxfs/xfs_da_format.c
+++ b/fs/xfs/libxfs/xfs_da_format.c
@@ -270,7 +270,6 @@ xfs_dir3_data_get_ftype(
{
__uint8_t ftype = dep->name[dep->namelen];
- ASSERT(ftype < XFS_DIR3_FT_MAX);
if (ftype >= XFS_DIR3_FT_MAX)
return XFS_DIR3_FT_UNKNOWN;
return ftype;
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index 6cef22152fd6..7075aaf131f4 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -237,7 +237,8 @@ xfs_dir_init(
}
/*
- Enter a name in a directory.
+ * Enter a name in a directory, or check for available space.
+ * If inum is 0, only the available space test is performed.
*/
int
xfs_dir_createname(
@@ -254,10 +255,12 @@ xfs_dir_createname(
int v; /* type-checking value */
ASSERT(S_ISDIR(dp->i_d.di_mode));
- rval = xfs_dir_ino_validate(tp->t_mountp, inum);
- if (rval)
- return rval;
- XFS_STATS_INC(xs_dir_create);
+ if (inum) {
+ rval = xfs_dir_ino_validate(tp->t_mountp, inum);
+ if (rval)
+ return rval;
+ XFS_STATS_INC(xs_dir_create);
+ }
args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS);
if (!args)
@@ -276,6 +279,8 @@ xfs_dir_createname(
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
args->op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
+ if (!inum)
+ args->op_flags |= XFS_DA_OP_JUSTCHECK;
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
rval = xfs_dir2_sf_addname(args);
@@ -535,62 +540,14 @@ out_free:
/*
* See if this entry can be added to the directory without allocating space.
- * First checks that the caller couldn't reserve enough space (resblks = 0).
*/
int
xfs_dir_canenter(
xfs_trans_t *tp,
xfs_inode_t *dp,
- struct xfs_name *name, /* name of entry to add */
- uint resblks)
+ struct xfs_name *name) /* name of entry to add */
{
- struct xfs_da_args *args;
- int rval;
- int v; /* type-checking value */
-
- if (resblks)
- return 0;
-
- ASSERT(S_ISDIR(dp->i_d.di_mode));
-
- args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS);
- if (!args)
- return -ENOMEM;
-
- args->geo = dp->i_mount->m_dir_geo;
- args->name = name->name;
- args->namelen = name->len;
- args->filetype = name->type;
- args->hashval = dp->i_mount->m_dirnameops->hashname(name);
- args->dp = dp;
- args->whichfork = XFS_DATA_FORK;
- args->trans = tp;
- args->op_flags = XFS_DA_OP_JUSTCHECK | XFS_DA_OP_ADDNAME |
- XFS_DA_OP_OKNOENT;
-
- if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
- rval = xfs_dir2_sf_addname(args);
- goto out_free;
- }
-
- rval = xfs_dir2_isblock(args, &v);
- if (rval)
- goto out_free;
- if (v) {
- rval = xfs_dir2_block_addname(args);
- goto out_free;
- }
-
- rval = xfs_dir2_isleaf(args, &v);
- if (rval)
- goto out_free;
- if (v)
- rval = xfs_dir2_leaf_addname(args);
- else
- rval = xfs_dir2_node_addname(args);
-out_free:
- kmem_free(args);
- return rval;
+ return xfs_dir_createname(tp, dp, name, 0, NULL, NULL, 0);
}
/*
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index c8e86b0b5e99..4dff261e6ed5 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -136,7 +136,7 @@ extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
xfs_fsblock_t *first,
struct xfs_bmap_free *flist, xfs_extlen_t tot);
extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp,
- struct xfs_name *name, uint resblks);
+ struct xfs_name *name);
/*
* Direct call from the bmap code, bypassing the generic directory layer.
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index b62771f1f4b5..23dcb72fc5e6 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -1076,8 +1076,8 @@ xfs_dialloc_ag_finobt_newino(
int i;
if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
- error = xfs_inobt_lookup(cur, agi->agi_newino, XFS_LOOKUP_EQ,
- &i);
+ error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
+ XFS_LOOKUP_EQ, &i);
if (error)
return error;
if (i == 1) {
@@ -1085,7 +1085,6 @@ xfs_dialloc_ag_finobt_newino(
if (error)
return error;
XFS_WANT_CORRUPTED_RETURN(i == 1);
-
return 0;
}
}
@@ -2051,6 +2050,8 @@ xfs_agi_verify(
if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
return false;
+ if (be32_to_cpu(agi->agi_level) > XFS_BTREE_MAXLEVELS)
+ return false;
/*
* during growfs operations, the perag is not fully initialised,
* so we can't use it for any useful checking. growfs ensures we can't
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index f4dd697cac08..7c818f1e4484 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -424,20 +424,24 @@ xfs_rtfind_forw(
}
/*
- * Read and modify the summary information for a given extent size,
+ * Read and/or modify the summary information for a given extent size,
* bitmap block combination.
* Keeps track of a current summary block, so we don't keep reading
* it from the buffer cache.
+ *
+ * Summary information is returned in *sum if specified.
+ * If no delta is specified, returns summary only.
*/
int
-xfs_rtmodify_summary(
- xfs_mount_t *mp, /* file system mount point */
+xfs_rtmodify_summary_int(
+ xfs_mount_t *mp, /* file system mount structure */
xfs_trans_t *tp, /* transaction pointer */
int log, /* log2 of extent size */
xfs_rtblock_t bbno, /* bitmap block number */
int delta, /* change to make to summary info */
xfs_buf_t **rbpp, /* in/out: summary block buffer */
- xfs_fsblock_t *rsb) /* in/out: summary block number */
+ xfs_fsblock_t *rsb, /* in/out: summary block number */
+ xfs_suminfo_t *sum) /* out: summary info for this block */
{
xfs_buf_t *bp; /* buffer for the summary block */
int error; /* error value */
@@ -456,7 +460,7 @@ xfs_rtmodify_summary(
/*
* If we have an old buffer, and the block number matches, use that.
*/
- if (rbpp && *rbpp && *rsb == sb)
+ if (*rbpp && *rsb == sb)
bp = *rbpp;
/*
* Otherwise we have to get the buffer.
@@ -465,7 +469,7 @@ xfs_rtmodify_summary(
/*
* If there was an old one, get rid of it first.
*/
- if (rbpp && *rbpp)
+ if (*rbpp)
xfs_trans_brelse(tp, *rbpp);
error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
if (error) {
@@ -474,21 +478,38 @@ xfs_rtmodify_summary(
/*
* Remember this buffer and block for the next call.
*/
- if (rbpp) {
- *rbpp = bp;
- *rsb = sb;
- }
+ *rbpp = bp;
+ *rsb = sb;
}
/*
- * Point to the summary information, modify and log it.
+ * Point to the summary information, modify/log it, and/or copy it out.
*/
sp = XFS_SUMPTR(mp, bp, so);
- *sp += delta;
- xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)bp->b_addr),
- (uint)((char *)sp - (char *)bp->b_addr + sizeof(*sp) - 1));
+ if (delta) {
+ uint first = (uint)((char *)sp - (char *)bp->b_addr);
+
+ *sp += delta;
+ xfs_trans_log_buf(tp, bp, first, first + sizeof(*sp) - 1);
+ }
+ if (sum)
+ *sum = *sp;
return 0;
}
+int
+xfs_rtmodify_summary(
+ xfs_mount_t *mp, /* file system mount structure */
+ xfs_trans_t *tp, /* transaction pointer */
+ int log, /* log2 of extent size */
+ xfs_rtblock_t bbno, /* bitmap block number */
+ int delta, /* change to make to summary info */
+ xfs_buf_t **rbpp, /* in/out: summary block buffer */
+ xfs_fsblock_t *rsb) /* in/out: summary block number */
+{
+ return xfs_rtmodify_summary_int(mp, tp, log, bbno,
+ delta, rbpp, rsb, NULL);
+}
+
/*
* Set the given range of bitmap bits to the given value.
* Do whatever I/O and logging is required.
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index ad525a5623a4..5f902fa7913f 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -279,11 +279,13 @@ xfs_mount_validate_sb(
sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
sbp->sb_blocksize != (1 << sbp->sb_blocklog) ||
+ sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG ||
sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
sbp->sb_inodelog > XFS_DINODE_MAX_LOG ||
sbp->sb_inodesize != (1 << sbp->sb_inodelog) ||
+ sbp->sb_logsunit > XLOG_MAX_RECORD_BSIZE ||
sbp->sb_inopblock != howmany(sbp->sb_blocksize,sbp->sb_inodesize) ||
(sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) ||
(sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) ||
@@ -443,6 +445,8 @@ __xfs_sb_from_disk(
to->sb_features_incompat = be32_to_cpu(from->sb_features_incompat);
to->sb_features_log_incompat =
be32_to_cpu(from->sb_features_log_incompat);
+ /* crc is only used on disk, not in memory; just init to 0 here. */
+ to->sb_crc = 0;
to->sb_pad = 0;
to->sb_pquotino = be64_to_cpu(from->sb_pquotino);
to->sb_lsn = be64_to_cpu(from->sb_lsn);
@@ -548,6 +552,9 @@ xfs_sb_to_disk(
if (!fields)
return;
+ /* We should never write the crc here, it's updated in the IO path */
+ fields &= ~XFS_SB_CRC;
+
xfs_sb_quota_to_disk(to, from, &fields);
while (fields) {
f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
diff --git a/fs/xfs/time.h b/fs/xfs/time.h
deleted file mode 100644
index 387e695a184c..000000000000
--- a/fs/xfs/time.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_SUPPORT_TIME_H__
-#define __XFS_SUPPORT_TIME_H__
-
-#include <linux/sched.h>
-#include <linux/time.h>
-
-typedef struct timespec timespec_t;
-
-static inline void delay(long ticks)
-{
- schedule_timeout_uninterruptible(ticks);
-}
-
-static inline void nanotime(struct timespec *tvp)
-{
- *tvp = CURRENT_TIME;
-}
-
-#endif /* __XFS_SUPPORT_TIME_H__ */
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index b984647c24db..f5b2453a43b2 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -434,10 +434,22 @@ xfs_start_page_writeback(
{
ASSERT(PageLocked(page));
ASSERT(!PageWriteback(page));
- if (clear_dirty)
+
+ /*
+ * if the page was not fully cleaned, we need to ensure that the higher
+ * layers come back to it correctly. That means we need to keep the page
+ * dirty, and for WB_SYNC_ALL writeback we need to ensure the
+ * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
+ * write this page in this writeback sweep will be made.
+ */
+ if (clear_dirty) {
clear_page_dirty_for_io(page);
- set_page_writeback(page);
+ set_page_writeback(page);
+ } else
+ set_page_writeback_keepwrite(page);
+
unlock_page(page);
+
/* If no buffers on the page are to be written, finish it here */
if (!buffers)
end_page_writeback(page);
@@ -548,6 +560,13 @@ xfs_cancel_ioend(
do {
next_bh = bh->b_private;
clear_buffer_async_write(bh);
+ /*
+ * The unwritten flag is cleared when added to the
+ * ioend. We're not submitting for I/O so mark the
+ * buffer unwritten again for next time around.
+ */
+ if (ioend->io_type == XFS_IO_UNWRITTEN)
+ set_buffer_unwritten(bh);
unlock_buffer(bh);
} while ((bh = next_bh) != NULL);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 1707980f9a4b..92e8f99a5857 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1122,14 +1122,6 @@ xfs_zero_remaining_bytes(
if (endoff > XFS_ISIZE(ip))
endoff = XFS_ISIZE(ip);
- bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
- mp->m_rtdev_targp : mp->m_ddev_targp,
- BTOBB(mp->m_sb.sb_blocksize), 0);
- if (!bp)
- return -ENOMEM;
-
- xfs_buf_unlock(bp);
-
for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
uint lock_mode;
@@ -1152,42 +1144,24 @@ xfs_zero_remaining_bytes(
ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
if (imap.br_state == XFS_EXT_UNWRITTEN)
continue;
- XFS_BUF_UNDONE(bp);
- XFS_BUF_UNWRITE(bp);
- XFS_BUF_READ(bp);
- XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
- if (XFS_FORCED_SHUTDOWN(mp)) {
- error = -EIO;
- break;
- }
- xfs_buf_iorequest(bp);
- error = xfs_buf_iowait(bp);
- if (error) {
- xfs_buf_ioerror_alert(bp,
- "xfs_zero_remaining_bytes(read)");
- break;
- }
+ error = xfs_buf_read_uncached(XFS_IS_REALTIME_INODE(ip) ?
+ mp->m_rtdev_targp : mp->m_ddev_targp,
+ xfs_fsb_to_db(ip, imap.br_startblock),
+ BTOBB(mp->m_sb.sb_blocksize),
+ 0, &bp, NULL);
+ if (error)
+ return error;
+
memset(bp->b_addr +
- (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
- 0, lastoffset - offset + 1);
- XFS_BUF_UNDONE(bp);
- XFS_BUF_UNREAD(bp);
- XFS_BUF_WRITE(bp);
-
- if (XFS_FORCED_SHUTDOWN(mp)) {
- error = -EIO;
- break;
- }
- xfs_buf_iorequest(bp);
- error = xfs_buf_iowait(bp);
- if (error) {
- xfs_buf_ioerror_alert(bp,
- "xfs_zero_remaining_bytes(write)");
- break;
- }
+ (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
+ 0, lastoffset - offset + 1);
+
+ error = xfs_bwrite(bp);
+ xfs_buf_relse(bp);
+ if (error)
+ return error;
}
- xfs_buf_free(bp);
return error;
}
@@ -1205,6 +1179,7 @@ xfs_free_file_space(
xfs_bmap_free_t free_list;
xfs_bmbt_irec_t imap;
xfs_off_t ioffset;
+ xfs_off_t iendoffset;
xfs_extlen_t mod=0;
xfs_mount_t *mp;
int nimap;
@@ -1233,12 +1208,13 @@ xfs_free_file_space(
inode_dio_wait(VFS_I(ip));
rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
- ioffset = offset & ~(rounding - 1);
- error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
- ioffset, -1);
+ ioffset = round_down(offset, rounding);
+ iendoffset = round_up(offset + len, rounding) - 1;
+ error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset,
+ iendoffset);
if (error)
goto out;
- truncate_pagecache_range(VFS_I(ip), ioffset, -1);
+ truncate_pagecache_range(VFS_I(ip), ioffset, iendoffset);
/*
* Need to zero the stuff we're not freeing, on disk.
@@ -1392,14 +1368,14 @@ xfs_zero_file_space(
if (start_boundary < end_boundary - 1) {
/*
- * punch out delayed allocation blocks and the page cache over
- * the conversion range
+ * Writeback the range to ensure any inode size updates due to
+ * appending writes make it to disk (otherwise we could just
+ * punch out the delalloc blocks).
*/
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- error = xfs_bmap_punch_delalloc_range(ip,
- XFS_B_TO_FSBT(mp, start_boundary),
- XFS_B_TO_FSB(mp, end_boundary - start_boundary));
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
+ start_boundary, end_boundary - 1);
+ if (error)
+ goto out;
truncate_pagecache_range(VFS_I(ip), start_boundary,
end_boundary - 1);
@@ -1456,41 +1432,47 @@ xfs_collapse_file_space(
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error;
- xfs_extnum_t current_ext = 0;
struct xfs_bmap_free free_list;
xfs_fsblock_t first_block;
int committed;
xfs_fileoff_t start_fsb;
+ xfs_fileoff_t next_fsb;
xfs_fileoff_t shift_fsb;
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
trace_xfs_collapse_file_space(ip);
- start_fsb = XFS_B_TO_FSB(mp, offset + len);
+ next_fsb = XFS_B_TO_FSB(mp, offset + len);
shift_fsb = XFS_B_TO_FSB(mp, len);
- /*
- * Writeback the entire file and force remove any post-eof blocks. The
- * writeback prevents changes to the extent list via concurrent
- * writeback and the eofblocks trim prevents the extent shift algorithm
- * from running into a post-eof delalloc extent.
- *
- * XXX: This is a temporary fix until the extent shift loop below is
- * converted to use offsets and lookups within the ILOCK rather than
- * carrying around the index into the extent list for the next
- * iteration.
- */
- error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
+ error = xfs_free_file_space(ip, offset, len);
if (error)
return error;
+
+ /*
+ * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
+ * into the accessible region of the file.
+ */
if (xfs_can_free_eofblocks(ip, true)) {
error = xfs_free_eofblocks(mp, ip, false);
if (error)
return error;
}
- error = xfs_free_file_space(ip, offset, len);
+ /*
+ * Writeback and invalidate cache for the remainder of the file as we're
+ * about to shift down every extent from the collapse range to EOF. The
+ * free of the collapse range above might have already done some of
+ * this, but we shouldn't rely on it to do anything outside of the range
+ * that was freed.
+ */
+ error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
+ offset + len, -1);
+ if (error)
+ return error;
+ error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
+ (offset + len) >> PAGE_CACHE_SHIFT, -1);
if (error)
return error;
@@ -1525,10 +1507,10 @@ xfs_collapse_file_space(
* We are using the write transaction in which max 2 bmbt
* updates are allowed
*/
- error = xfs_bmap_shift_extents(tp, ip, &done, start_fsb,
- shift_fsb, &current_ext,
- &first_block, &free_list,
- XFS_BMAP_MAX_SHIFT_EXTENTS);
+ start_fsb = next_fsb;
+ error = xfs_bmap_shift_extents(tp, ip, start_fsb, shift_fsb,
+ &done, &next_fsb, &first_block, &free_list,
+ XFS_BMAP_MAX_SHIFT_EXTENTS);
if (error)
goto out;
@@ -1638,7 +1620,7 @@ xfs_swap_extents_check_format(
return 0;
}
-int
+static int
xfs_swap_extent_flush(
struct xfs_inode *ip)
{
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index cd7b8ca9b064..017b6afe340b 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -623,10 +623,11 @@ _xfs_buf_read(
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
- xfs_buf_iorequest(bp);
- if (flags & XBF_ASYNC)
+ if (flags & XBF_ASYNC) {
+ xfs_buf_submit(bp);
return 0;
- return xfs_buf_iowait(bp);
+ }
+ return xfs_buf_submit_wait(bp);
}
xfs_buf_t *
@@ -687,34 +688,39 @@ xfs_buf_readahead_map(
* Read an uncached buffer from disk. Allocates and returns a locked
* buffer containing the disk contents or nothing.
*/
-struct xfs_buf *
+int
xfs_buf_read_uncached(
struct xfs_buftarg *target,
xfs_daddr_t daddr,
size_t numblks,
int flags,
+ struct xfs_buf **bpp,
const struct xfs_buf_ops *ops)
{
struct xfs_buf *bp;
+ *bpp = NULL;
+
bp = xfs_buf_get_uncached(target, numblks, flags);
if (!bp)
- return NULL;
+ return -ENOMEM;
/* set up the buffer for a read IO */
ASSERT(bp->b_map_count == 1);
- bp->b_bn = daddr;
+ bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */
bp->b_maps[0].bm_bn = daddr;
bp->b_flags |= XBF_READ;
bp->b_ops = ops;
- if (XFS_FORCED_SHUTDOWN(target->bt_mount)) {
+ xfs_buf_submit_wait(bp);
+ if (bp->b_error) {
+ int error = bp->b_error;
xfs_buf_relse(bp);
- return NULL;
+ return error;
}
- xfs_buf_iorequest(bp);
- xfs_buf_iowait(bp);
- return bp;
+
+ *bpp = bp;
+ return 0;
}
/*
@@ -998,53 +1004,56 @@ xfs_buf_wait_unpin(
* Buffer Utility Routines
*/
-STATIC void
-xfs_buf_iodone_work(
- struct work_struct *work)
+void
+xfs_buf_ioend(
+ struct xfs_buf *bp)
{
- struct xfs_buf *bp =
- container_of(work, xfs_buf_t, b_iodone_work);
- bool read = !!(bp->b_flags & XBF_READ);
+ bool read = bp->b_flags & XBF_READ;
+
+ trace_xfs_buf_iodone(bp, _RET_IP_);
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
- /* only validate buffers that were read without errors */
- if (read && bp->b_ops && !bp->b_error && (bp->b_flags & XBF_DONE))
+ /*
+ * Pull in IO completion errors now. We are guaranteed to be running
+ * single threaded, so we don't need the lock to read b_io_error.
+ */
+ if (!bp->b_error && bp->b_io_error)
+ xfs_buf_ioerror(bp, bp->b_io_error);
+
+ /* Only validate buffers that were read without errors */
+ if (read && !bp->b_error && bp->b_ops) {
+ ASSERT(!bp->b_iodone);
bp->b_ops->verify_read(bp);
+ }
+
+ if (!bp->b_error)
+ bp->b_flags |= XBF_DONE;
if (bp->b_iodone)
(*(bp->b_iodone))(bp);
else if (bp->b_flags & XBF_ASYNC)
xfs_buf_relse(bp);
- else {
- ASSERT(read && bp->b_ops);
+ else
complete(&bp->b_iowait);
- }
}
-void
-xfs_buf_ioend(
- struct xfs_buf *bp,
- int schedule)
+static void
+xfs_buf_ioend_work(
+ struct work_struct *work)
{
- bool read = !!(bp->b_flags & XBF_READ);
-
- trace_xfs_buf_iodone(bp, _RET_IP_);
+ struct xfs_buf *bp =
+ container_of(work, xfs_buf_t, b_iodone_work);
- if (bp->b_error == 0)
- bp->b_flags |= XBF_DONE;
+ xfs_buf_ioend(bp);
+}
- if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) {
- if (schedule) {
- INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
- queue_work(xfslogd_workqueue, &bp->b_iodone_work);
- } else {
- xfs_buf_iodone_work(&bp->b_iodone_work);
- }
- } else {
- bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
- complete(&bp->b_iowait);
- }
+void
+xfs_buf_ioend_async(
+ struct xfs_buf *bp)
+{
+ INIT_WORK(&bp->b_iodone_work, xfs_buf_ioend_work);
+ queue_work(xfslogd_workqueue, &bp->b_iodone_work);
}
void
@@ -1067,96 +1076,6 @@ xfs_buf_ioerror_alert(
(__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
}
-/*
- * Called when we want to stop a buffer from getting written or read.
- * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
- * so that the proper iodone callbacks get called.
- */
-STATIC int
-xfs_bioerror(
- xfs_buf_t *bp)
-{
-#ifdef XFSERRORDEBUG
- ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
-#endif
-
- /*
- * No need to wait until the buffer is unpinned, we aren't flushing it.
- */
- xfs_buf_ioerror(bp, -EIO);
-
- /*
- * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
- */
- XFS_BUF_UNREAD(bp);
- XFS_BUF_UNDONE(bp);
- xfs_buf_stale(bp);
-
- xfs_buf_ioend(bp, 0);
-
- return -EIO;
-}
-
-/*
- * Same as xfs_bioerror, except that we are releasing the buffer
- * here ourselves, and avoiding the xfs_buf_ioend call.
- * This is meant for userdata errors; metadata bufs come with
- * iodone functions attached, so that we can track down errors.
- */
-int
-xfs_bioerror_relse(
- struct xfs_buf *bp)
-{
- int64_t fl = bp->b_flags;
- /*
- * No need to wait until the buffer is unpinned.
- * We aren't flushing it.
- *
- * chunkhold expects B_DONE to be set, whether
- * we actually finish the I/O or not. We don't want to
- * change that interface.
- */
- XFS_BUF_UNREAD(bp);
- XFS_BUF_DONE(bp);
- xfs_buf_stale(bp);
- bp->b_iodone = NULL;
- if (!(fl & XBF_ASYNC)) {
- /*
- * Mark b_error and B_ERROR _both_.
- * Lot's of chunkcache code assumes that.
- * There's no reason to mark error for
- * ASYNC buffers.
- */
- xfs_buf_ioerror(bp, -EIO);
- complete(&bp->b_iowait);
- } else {
- xfs_buf_relse(bp);
- }
-
- return -EIO;
-}
-
-STATIC int
-xfs_bdstrat_cb(
- struct xfs_buf *bp)
-{
- if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
- trace_xfs_bdstrat_shut(bp, _RET_IP_);
- /*
- * Metadata write that didn't get logged but
- * written delayed anyway. These aren't associated
- * with a transaction, and can be ignored.
- */
- if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
- return xfs_bioerror_relse(bp);
- else
- return xfs_bioerror(bp);
- }
-
- xfs_buf_iorequest(bp);
- return 0;
-}
-
int
xfs_bwrite(
struct xfs_buf *bp)
@@ -1166,11 +1085,10 @@ xfs_bwrite(
ASSERT(xfs_buf_islocked(bp));
bp->b_flags |= XBF_WRITE;
- bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL);
+ bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
+ XBF_WRITE_FAIL | XBF_DONE);
- xfs_bdstrat_cb(bp);
-
- error = xfs_buf_iowait(bp);
+ error = xfs_buf_submit_wait(bp);
if (error) {
xfs_force_shutdown(bp->b_target->bt_mount,
SHUTDOWN_META_IO_ERROR);
@@ -1179,15 +1097,6 @@ xfs_bwrite(
}
STATIC void
-_xfs_buf_ioend(
- xfs_buf_t *bp,
- int schedule)
-{
- if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
- xfs_buf_ioend(bp, schedule);
-}
-
-STATIC void
xfs_buf_bio_end_io(
struct bio *bio,
int error)
@@ -1198,13 +1107,18 @@ xfs_buf_bio_end_io(
* don't overwrite existing errors - otherwise we can lose errors on
* buffers that require multiple bios to complete.
*/
- if (!bp->b_error)
- xfs_buf_ioerror(bp, error);
+ if (error) {
+ spin_lock(&bp->b_lock);
+ if (!bp->b_io_error)
+ bp->b_io_error = error;
+ spin_unlock(&bp->b_lock);
+ }
if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
- _xfs_buf_ioend(bp, 1);
+ if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
+ xfs_buf_ioend_async(bp);
bio_put(bio);
}
@@ -1283,7 +1197,7 @@ next_chunk:
} else {
/*
* This is guaranteed not to be the last io reference count
- * because the caller (xfs_buf_iorequest) holds a count itself.
+ * because the caller (xfs_buf_submit) holds a count itself.
*/
atomic_dec(&bp->b_io_remaining);
xfs_buf_ioerror(bp, -EIO);
@@ -1373,53 +1287,131 @@ _xfs_buf_ioapply(
blk_finish_plug(&plug);
}
+/*
+ * Asynchronous IO submission path. This transfers the buffer lock ownership and
+ * the current reference to the IO. It is not safe to reference the buffer after
+ * a call to this function unless the caller holds an additional reference
+ * itself.
+ */
void
-xfs_buf_iorequest(
- xfs_buf_t *bp)
+xfs_buf_submit(
+ struct xfs_buf *bp)
{
- trace_xfs_buf_iorequest(bp, _RET_IP_);
+ trace_xfs_buf_submit(bp, _RET_IP_);
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
+ ASSERT(bp->b_flags & XBF_ASYNC);
+
+ /* on shutdown we stale and complete the buffer immediately */
+ if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
+ xfs_buf_ioerror(bp, -EIO);
+ bp->b_flags &= ~XBF_DONE;
+ xfs_buf_stale(bp);
+ xfs_buf_ioend(bp);
+ return;
+ }
if (bp->b_flags & XBF_WRITE)
xfs_buf_wait_unpin(bp);
+
+ /* clear the internal error state to avoid spurious errors */
+ bp->b_io_error = 0;
+
+ /*
+ * The caller's reference is released during I/O completion.
+ * This occurs some time after the last b_io_remaining reference is
+ * released, so after we drop our Io reference we have to have some
+ * other reference to ensure the buffer doesn't go away from underneath
+ * us. Take a direct reference to ensure we have safe access to the
+ * buffer until we are finished with it.
+ */
xfs_buf_hold(bp);
/*
- * Set the count to 1 initially, this will stop an I/O
- * completion callout which happens before we have started
- * all the I/O from calling xfs_buf_ioend too early.
+ * Set the count to 1 initially, this will stop an I/O completion
+ * callout which happens before we have started all the I/O from calling
+ * xfs_buf_ioend too early.
*/
atomic_set(&bp->b_io_remaining, 1);
_xfs_buf_ioapply(bp);
+
/*
- * If _xfs_buf_ioapply failed, we'll get back here with
- * only the reference we took above. _xfs_buf_ioend will
- * drop it to zero, so we'd better not queue it for later,
- * or we'll free it before it's done.
+ * If _xfs_buf_ioapply failed, we can get back here with only the IO
+ * reference we took above. If we drop it to zero, run completion so
+ * that we don't return to the caller with completion still pending.
*/
- _xfs_buf_ioend(bp, bp->b_error ? 0 : 1);
+ if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
+ if (bp->b_error)
+ xfs_buf_ioend(bp);
+ else
+ xfs_buf_ioend_async(bp);
+ }
xfs_buf_rele(bp);
+ /* Note: it is not safe to reference bp now we've dropped our ref */
}
/*
- * Waits for I/O to complete on the buffer supplied. It returns immediately if
- * no I/O is pending or there is already a pending error on the buffer, in which
- * case nothing will ever complete. It returns the I/O error code, if any, or
- * 0 if there was no error.
+ * Synchronous buffer IO submission path, read or write.
*/
int
-xfs_buf_iowait(
- xfs_buf_t *bp)
+xfs_buf_submit_wait(
+ struct xfs_buf *bp)
{
- trace_xfs_buf_iowait(bp, _RET_IP_);
+ int error;
- if (!bp->b_error)
- wait_for_completion(&bp->b_iowait);
+ trace_xfs_buf_submit_wait(bp, _RET_IP_);
+
+ ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC)));
+
+ if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
+ xfs_buf_ioerror(bp, -EIO);
+ xfs_buf_stale(bp);
+ bp->b_flags &= ~XBF_DONE;
+ return -EIO;
+ }
+
+ if (bp->b_flags & XBF_WRITE)
+ xfs_buf_wait_unpin(bp);
+
+ /* clear the internal error state to avoid spurious errors */
+ bp->b_io_error = 0;
+
+ /*
+ * For synchronous IO, the IO does not inherit the submitters reference
+ * count, nor the buffer lock. Hence we cannot release the reference we
+ * are about to take until we've waited for all IO completion to occur,
+ * including any xfs_buf_ioend_async() work that may be pending.
+ */
+ xfs_buf_hold(bp);
+
+ /*
+ * Set the count to 1 initially, this will stop an I/O completion
+ * callout which happens before we have started all the I/O from calling
+ * xfs_buf_ioend too early.
+ */
+ atomic_set(&bp->b_io_remaining, 1);
+ _xfs_buf_ioapply(bp);
+
+ /*
+ * make sure we run completion synchronously if it raced with us and is
+ * already complete.
+ */
+ if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
+ xfs_buf_ioend(bp);
+ /* wait for completion before gathering the error from the buffer */
+ trace_xfs_buf_iowait(bp, _RET_IP_);
+ wait_for_completion(&bp->b_iowait);
trace_xfs_buf_iowait_done(bp, _RET_IP_);
- return bp->b_error;
+ error = bp->b_error;
+
+ /*
+ * all done now, we can release the hold that keeps the buffer
+ * referenced for the entire IO.
+ */
+ xfs_buf_rele(bp);
+ return error;
}
xfs_caddr_t
@@ -1813,13 +1805,19 @@ __xfs_buf_delwri_submit(
blk_start_plug(&plug);
list_for_each_entry_safe(bp, n, io_list, b_list) {
bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
- bp->b_flags |= XBF_WRITE;
+ bp->b_flags |= XBF_WRITE | XBF_ASYNC;
- if (!wait) {
- bp->b_flags |= XBF_ASYNC;
+ /*
+ * we do all Io submission async. This means if we need to wait
+ * for IO completion we need to take an extra reference so the
+ * buffer is still valid on the other side.
+ */
+ if (wait)
+ xfs_buf_hold(bp);
+ else
list_del_init(&bp->b_list);
- }
- xfs_bdstrat_cb(bp);
+
+ xfs_buf_submit(bp);
}
blk_finish_plug(&plug);
@@ -1866,7 +1864,10 @@ xfs_buf_delwri_submit(
bp = list_first_entry(&io_list, struct xfs_buf, b_list);
list_del_init(&bp->b_list);
- error2 = xfs_buf_iowait(bp);
+
+ /* locking the buffer will wait for async IO completion. */
+ xfs_buf_lock(bp);
+ error2 = bp->b_error;
xfs_buf_relse(bp);
if (!error)
error = error2;
@@ -1884,7 +1885,7 @@ xfs_buf_init(void)
goto out;
xfslogd_workqueue = alloc_workqueue("xfslogd",
- WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
if (!xfslogd_workqueue)
goto out_free_buf_zone;
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index c753183900b3..82002c00af90 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -158,6 +158,7 @@ typedef struct xfs_buf {
struct list_head b_lru; /* lru list */
spinlock_t b_lock; /* internal state lock */
unsigned int b_state; /* internal state flags */
+ int b_io_error; /* internal IO error state */
wait_queue_head_t b_waiters; /* unpin waiters */
struct list_head b_list;
struct xfs_perag *b_pag; /* contains rbtree root */
@@ -268,9 +269,9 @@ int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
int flags);
-struct xfs_buf *xfs_buf_read_uncached(struct xfs_buftarg *target,
- xfs_daddr_t daddr, size_t numblks, int flags,
- const struct xfs_buf_ops *ops);
+int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
+ size_t numblks, int flags, struct xfs_buf **bpp,
+ const struct xfs_buf_ops *ops);
void xfs_buf_hold(struct xfs_buf *bp);
/* Releasing Buffers */
@@ -286,18 +287,16 @@ extern void xfs_buf_unlock(xfs_buf_t *);
/* Buffer Read and Write Routines */
extern int xfs_bwrite(struct xfs_buf *bp);
-extern void xfs_buf_ioend(xfs_buf_t *, int);
+extern void xfs_buf_ioend(struct xfs_buf *bp);
extern void xfs_buf_ioerror(xfs_buf_t *, int);
extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
-extern void xfs_buf_iorequest(xfs_buf_t *);
-extern int xfs_buf_iowait(xfs_buf_t *);
+extern void xfs_buf_submit(struct xfs_buf *bp);
+extern int xfs_buf_submit_wait(struct xfs_buf *bp);
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
xfs_buf_rw_t);
#define xfs_buf_zero(bp, off, len) \
xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
-extern int xfs_bioerror_relse(struct xfs_buf *);
-
/* Buffer Utility Routines */
extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 76007deed31f..f15969543326 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -491,7 +491,7 @@ xfs_buf_item_unpin(
xfs_buf_ioerror(bp, -EIO);
XFS_BUF_UNDONE(bp);
xfs_buf_stale(bp);
- xfs_buf_ioend(bp, 0);
+ xfs_buf_ioend(bp);
}
}
@@ -501,7 +501,7 @@ xfs_buf_item_unpin(
* buffer being bad..
*/
-DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
+static DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
STATIC uint
xfs_buf_item_push(
@@ -1081,7 +1081,7 @@ xfs_buf_iodone_callbacks(
* a way to shut the filesystem down if the writes keep failing.
*
* In practice we'll shut the filesystem down soon as non-transient
- * erorrs tend to affect the whole device and a failing log write
+ * errors tend to affect the whole device and a failing log write
* will make us give up. But we really ought to do better here.
*/
if (XFS_BUF_ISASYNC(bp)) {
@@ -1094,7 +1094,7 @@ xfs_buf_iodone_callbacks(
if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) {
bp->b_flags |= XBF_WRITE | XBF_ASYNC |
XBF_DONE | XBF_WRITE_FAIL;
- xfs_buf_iorequest(bp);
+ xfs_buf_submit(bp);
} else {
xfs_buf_relse(bp);
}
@@ -1115,7 +1115,7 @@ do_callbacks:
xfs_buf_do_callbacks(bp);
bp->b_fspriv = NULL;
bp->b_iodone = NULL;
- xfs_buf_ioend(bp, 0);
+ xfs_buf_ioend(bp);
}
/*
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index de5368c803f9..eb596b419942 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -983,7 +983,7 @@ xfs_vm_page_mkwrite(
/*
* This type is designed to indicate the type of offset we would like
- * to search from page cache for either xfs_seek_data() or xfs_seek_hole().
+ * to search from page cache for xfs_seek_hole_data().
*/
enum {
HOLE_OFF = 0,
@@ -1040,7 +1040,7 @@ xfs_lookup_buffer_offset(
/*
* This routine is called to find out and return a data or hole offset
* from the page cache for unwritten extents according to the desired
- * type for xfs_seek_data() or xfs_seek_hole().
+ * type for xfs_seek_hole_data().
*
* The argument offset is used to tell where we start to search from the
* page cache. Map is used to figure out the end points of the range to
@@ -1200,9 +1200,10 @@ out:
}
STATIC loff_t
-xfs_seek_data(
+xfs_seek_hole_data(
struct file *file,
- loff_t start)
+ loff_t start,
+ int whence)
{
struct inode *inode = file->f_mapping->host;
struct xfs_inode *ip = XFS_I(inode);
@@ -1214,6 +1215,9 @@ xfs_seek_data(
uint lock;
int error;
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
lock = xfs_ilock_data_map_shared(ip);
isize = i_size_read(inode);
@@ -1228,6 +1232,7 @@ xfs_seek_data(
*/
fsbno = XFS_B_TO_FSBT(mp, start);
end = XFS_B_TO_FSB(mp, isize);
+
for (;;) {
struct xfs_bmbt_irec map[2];
int nmap = 2;
@@ -1248,29 +1253,48 @@ xfs_seek_data(
offset = max_t(loff_t, start,
XFS_FSB_TO_B(mp, map[i].br_startoff));
- /* Landed in a data extent */
- if (map[i].br_startblock == DELAYSTARTBLOCK ||
- (map[i].br_state == XFS_EXT_NORM &&
- !isnullstartblock(map[i].br_startblock)))
+ /* Landed in the hole we wanted? */
+ if (whence == SEEK_HOLE &&
+ map[i].br_startblock == HOLESTARTBLOCK)
+ goto out;
+
+ /* Landed in the data extent we wanted? */
+ if (whence == SEEK_DATA &&
+ (map[i].br_startblock == DELAYSTARTBLOCK ||
+ (map[i].br_state == XFS_EXT_NORM &&
+ !isnullstartblock(map[i].br_startblock))))
goto out;
/*
- * Landed in an unwritten extent, try to search data
- * from page cache.
+ * Landed in an unwritten extent, try to search
+ * for hole or data from page cache.
*/
if (map[i].br_state == XFS_EXT_UNWRITTEN) {
if (xfs_find_get_desired_pgoff(inode, &map[i],
- DATA_OFF, &offset))
+ whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF,
+ &offset))
goto out;
}
}
/*
- * map[0] is hole or its an unwritten extent but
- * without data in page cache. Probably means that
- * we are reading after EOF if nothing in map[1].
+ * We only received one extent out of the two requested. This
+ * means we've hit EOF and didn't find what we are looking for.
*/
if (nmap == 1) {
+ /*
+ * If we were looking for a hole, set offset to
+ * the end of the file (i.e., there is an implicit
+ * hole at the end of any file).
+ */
+ if (whence == SEEK_HOLE) {
+ offset = isize;
+ break;
+ }
+ /*
+ * If we were looking for data, it's nowhere to be found
+ */
+ ASSERT(whence == SEEK_DATA);
error = -ENXIO;
goto out_unlock;
}
@@ -1279,125 +1303,30 @@ xfs_seek_data(
/*
* Nothing was found, proceed to the next round of search
- * if reading offset not beyond or hit EOF.
+ * if the next reading offset is not at or beyond EOF.
*/
fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
start = XFS_FSB_TO_B(mp, fsbno);
if (start >= isize) {
+ if (whence == SEEK_HOLE) {
+ offset = isize;
+ break;
+ }
+ ASSERT(whence == SEEK_DATA);
error = -ENXIO;
goto out_unlock;
}
}
out:
- offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
-
-out_unlock:
- xfs_iunlock(ip, lock);
-
- if (error)
- return error;
- return offset;
-}
-
-STATIC loff_t
-xfs_seek_hole(
- struct file *file,
- loff_t start)
-{
- struct inode *inode = file->f_mapping->host;
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
- loff_t uninitialized_var(offset);
- xfs_fsize_t isize;
- xfs_fileoff_t fsbno;
- xfs_filblks_t end;
- uint lock;
- int error;
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
- lock = xfs_ilock_data_map_shared(ip);
-
- isize = i_size_read(inode);
- if (start >= isize) {
- error = -ENXIO;
- goto out_unlock;
- }
-
- fsbno = XFS_B_TO_FSBT(mp, start);
- end = XFS_B_TO_FSB(mp, isize);
-
- for (;;) {
- struct xfs_bmbt_irec map[2];
- int nmap = 2;
- unsigned int i;
-
- error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
- XFS_BMAPI_ENTIRE);
- if (error)
- goto out_unlock;
-
- /* No extents at given offset, must be beyond EOF */
- if (nmap == 0) {
- error = -ENXIO;
- goto out_unlock;
- }
-
- for (i = 0; i < nmap; i++) {
- offset = max_t(loff_t, start,
- XFS_FSB_TO_B(mp, map[i].br_startoff));
-
- /* Landed in a hole */
- if (map[i].br_startblock == HOLESTARTBLOCK)
- goto out;
-
- /*
- * Landed in an unwritten extent, try to search hole
- * from page cache.
- */
- if (map[i].br_state == XFS_EXT_UNWRITTEN) {
- if (xfs_find_get_desired_pgoff(inode, &map[i],
- HOLE_OFF, &offset))
- goto out;
- }
- }
-
- /*
- * map[0] contains data or its unwritten but contains
- * data in page cache, probably means that we are
- * reading after EOF. We should fix offset to point
- * to the end of the file(i.e., there is an implicit
- * hole at the end of any file).
- */
- if (nmap == 1) {
- offset = isize;
- break;
- }
-
- ASSERT(i > 1);
-
- /*
- * Both mappings contains data, proceed to the next round of
- * search if the current reading offset not beyond or hit EOF.
- */
- fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
- start = XFS_FSB_TO_B(mp, fsbno);
- if (start >= isize) {
- offset = isize;
- break;
- }
- }
-
-out:
/*
- * At this point, we must have found a hole. However, the returned
+ * If at this point we have found the hole we wanted, the returned
* offset may be bigger than the file size as it may be aligned to
- * page boundary for unwritten extents, we need to deal with this
+ * page boundary for unwritten extents. We need to deal with this
* situation in particular.
*/
- offset = min_t(loff_t, offset, isize);
+ if (whence == SEEK_HOLE)
+ offset = min_t(loff_t, offset, isize);
offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
out_unlock:
@@ -1412,17 +1341,16 @@ STATIC loff_t
xfs_file_llseek(
struct file *file,
loff_t offset,
- int origin)
+ int whence)
{
- switch (origin) {
+ switch (whence) {
case SEEK_END:
case SEEK_CUR:
case SEEK_SET:
- return generic_file_llseek(file, offset, origin);
- case SEEK_DATA:
- return xfs_seek_data(file, offset);
+ return generic_file_llseek(file, offset, whence);
case SEEK_HOLE:
- return xfs_seek_hole(file, offset);
+ case SEEK_DATA:
+ return xfs_seek_hole_data(file, offset, whence);
default:
return -EINVAL;
}
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index f91de1ef05e1..c05ac8b70fa9 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -172,16 +172,11 @@ xfs_growfs_data_private(
if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
return error;
dpct = pct - mp->m_sb.sb_imax_pct;
- bp = xfs_buf_read_uncached(mp->m_ddev_targp,
+ error = xfs_buf_read_uncached(mp->m_ddev_targp,
XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
- XFS_FSS_TO_BB(mp, 1), 0, NULL);
- if (!bp)
- return -EIO;
- if (bp->b_error) {
- error = bp->b_error;
- xfs_buf_relse(bp);
+ XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
+ if (error)
return error;
- }
xfs_buf_relse(bp);
new = nb; /* use new as a temporary here */
diff --git a/fs/xfs/xfs_globals.c b/fs/xfs/xfs_globals.c
index 5399ef222dd7..4d41b241298f 100644
--- a/fs/xfs/xfs_globals.c
+++ b/fs/xfs/xfs_globals.c
@@ -43,3 +43,7 @@ xfs_param_t xfs_params = {
.fstrm_timer = { 1, 30*100, 3600*100},
.eofb_timer = { 1, 300, 3600*24},
};
+
+struct xfs_globals xfs_globals = {
+ .log_recovery_delay = 0, /* no delay by default */
+};
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 981b2cf51985..b45f7b27b5df 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -33,7 +33,6 @@
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_bmap_util.h"
-#include "xfs_quota.h"
#include "xfs_dquot_item.h"
#include "xfs_dquot.h"
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index fea3c92fb3f0..8ed049d1e332 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -654,7 +654,7 @@ xfs_ialloc(
xfs_inode_t *ip;
uint flags;
int error;
- timespec_t tv;
+ struct timespec tv;
/*
* Call the space management code to pick
@@ -720,7 +720,7 @@ xfs_ialloc(
ip->i_d.di_nextents = 0;
ASSERT(ip->i_d.di_nblocks == 0);
- nanotime(&tv);
+ tv = current_fs_time(mp->m_super);
ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
ip->i_d.di_atime = ip->i_d.di_mtime;
@@ -769,6 +769,8 @@ xfs_ialloc(
di_flags |= XFS_DIFLAG_EXTSZINHERIT;
ip->i_d.di_extsize = pip->i_d.di_extsize;
}
+ if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
+ di_flags |= XFS_DIFLAG_PROJINHERIT;
} else if (S_ISREG(mode)) {
if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
di_flags |= XFS_DIFLAG_REALTIME;
@@ -789,8 +791,6 @@ xfs_ialloc(
if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
xfs_inherit_nosymlinks)
di_flags |= XFS_DIFLAG_NOSYMLINKS;
- if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
- di_flags |= XFS_DIFLAG_PROJINHERIT;
if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
xfs_inherit_nodefrag)
di_flags |= XFS_DIFLAG_NODEFRAG;
@@ -1153,9 +1153,11 @@ xfs_create(
if (error)
goto out_trans_cancel;
- error = xfs_dir_canenter(tp, dp, name, resblks);
- if (error)
- goto out_trans_cancel;
+ if (!resblks) {
+ error = xfs_dir_canenter(tp, dp, name);
+ if (error)
+ goto out_trans_cancel;
+ }
/*
* A newly created regular or special file just has one directory
@@ -1421,9 +1423,11 @@ xfs_link(
goto error_return;
}
- error = xfs_dir_canenter(tp, tdp, target_name, resblks);
- if (error)
- goto error_return;
+ if (!resblks) {
+ error = xfs_dir_canenter(tp, tdp, target_name);
+ if (error)
+ goto error_return;
+ }
xfs_bmap_init(&free_list, &first_block);
@@ -2759,9 +2763,11 @@ xfs_rename(
* If there's no space reservation, check the entry will
* fit before actually inserting it.
*/
- error = xfs_dir_canenter(tp, target_dp, target_name, spaceres);
- if (error)
- goto error_return;
+ if (!spaceres) {
+ error = xfs_dir_canenter(tp, target_dp, target_name);
+ if (error)
+ goto error_return;
+ }
/*
* If target does not exist and the rename crosses
* directories, adjust the target directory link count
@@ -3056,7 +3062,7 @@ cluster_corrupt_out:
XFS_BUF_UNDONE(bp);
xfs_buf_stale(bp);
xfs_buf_ioerror(bp, -EIO);
- xfs_buf_ioend(bp, 0);
+ xfs_buf_ioend(bp);
} else {
xfs_buf_stale(bp);
xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index c10e3fadd9af..9af2882e1f4c 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -102,7 +102,7 @@ xfs_new_eof(struct xfs_inode *ip, xfs_fsize_t new_size)
{
xfs_fsize_t i_size = i_size_read(VFS_I(ip));
- if (new_size > i_size)
+ if (new_size > i_size || new_size < 0)
new_size = i_size;
return new_size > ip->i_d.di_size ? new_size : 0;
}
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index de5a7be36e60..63de0b0acc32 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -615,7 +615,7 @@ xfs_iflush_done(
blip = bp->b_fspriv;
prev = NULL;
while (blip != NULL) {
- if (lip->li_cb != xfs_iflush_done) {
+ if (blip->li_cb != xfs_iflush_done) {
prev = blip;
blip = blip->li_bio_list;
continue;
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 3799695b9249..24c926b6fe85 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -968,8 +968,6 @@ xfs_set_diflags(
di_flags |= XFS_DIFLAG_NOATIME;
if (xflags & XFS_XFLAG_NODUMP)
di_flags |= XFS_DIFLAG_NODUMP;
- if (xflags & XFS_XFLAG_PROJINHERIT)
- di_flags |= XFS_DIFLAG_PROJINHERIT;
if (xflags & XFS_XFLAG_NODEFRAG)
di_flags |= XFS_DIFLAG_NODEFRAG;
if (xflags & XFS_XFLAG_FILESTREAM)
@@ -981,6 +979,8 @@ xfs_set_diflags(
di_flags |= XFS_DIFLAG_NOSYMLINKS;
if (xflags & XFS_XFLAG_EXTSZINHERIT)
di_flags |= XFS_DIFLAG_EXTSZINHERIT;
+ if (xflags & XFS_XFLAG_PROJINHERIT)
+ di_flags |= XFS_DIFLAG_PROJINHERIT;
} else if (S_ISREG(ip->i_d.di_mode)) {
if (xflags & XFS_XFLAG_REALTIME)
di_flags |= XFS_DIFLAG_REALTIME;
@@ -1231,13 +1231,25 @@ xfs_ioctl_setattr(
}
- if (mask & FSX_EXTSIZE)
- ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
if (mask & FSX_XFLAGS) {
xfs_set_diflags(ip, fa->fsx_xflags);
xfs_diflags_to_linux(ip);
}
+ /*
+ * Only set the extent size hint if we've already determined that the
+ * extent size hint should be set on the inode. If no extent size flags
+ * are set on the inode then unconditionally clear the extent size hint.
+ */
+ if (mask & FSX_EXTSIZE) {
+ int extsize = 0;
+
+ if (ip->i_d.di_flags &
+ (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
+ extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
+ ip->i_d.di_extsize = extsize;
+ }
+
xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
@@ -1349,7 +1361,7 @@ xfs_ioc_setxflags(
STATIC int
xfs_getbmap_format(void **ap, struct getbmapx *bmv, int *full)
{
- struct getbmap __user *base = *ap;
+ struct getbmap __user *base = (struct getbmap __user *)*ap;
/* copy only getbmap portion (not getbmapx) */
if (copy_to_user(base, bmv, sizeof(struct getbmap)))
@@ -1380,7 +1392,7 @@ xfs_ioc_getbmap(
bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ;
error = xfs_getbmap(ip, &bmx, xfs_getbmap_format,
- (struct getbmap *)arg+1);
+ (__force struct getbmap *)arg+1);
if (error)
return error;
@@ -1393,7 +1405,7 @@ xfs_ioc_getbmap(
STATIC int
xfs_getbmapx_format(void **ap, struct getbmapx *bmv, int *full)
{
- struct getbmapx __user *base = *ap;
+ struct getbmapx __user *base = (struct getbmapx __user *)*ap;
if (copy_to_user(base, bmv, sizeof(struct getbmapx)))
return -EFAULT;
@@ -1420,7 +1432,7 @@ xfs_ioc_getbmapx(
return -EINVAL;
error = xfs_getbmap(ip, &bmx, xfs_getbmapx_format,
- (struct getbmapx *)arg+1);
+ (__force struct getbmapx *)arg+1);
if (error)
return error;
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index a554646ff141..94ce027e28e3 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -160,6 +160,7 @@ xfs_ioctl32_bstat_copyin(
get_user(bstat->bs_gen, &bstat32->bs_gen) ||
get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) ||
get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) ||
+ get_user(bstat->bs_forkoff, &bstat32->bs_forkoff) ||
get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) ||
get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) ||
get_user(bstat->bs_aextents, &bstat32->bs_aextents))
@@ -214,6 +215,7 @@ xfs_bulkstat_one_fmt_compat(
put_user(buffer->bs_gen, &p32->bs_gen) ||
put_user(buffer->bs_projid, &p32->bs_projid) ||
put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) ||
+ put_user(buffer->bs_forkoff, &p32->bs_forkoff) ||
put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
put_user(buffer->bs_aextents, &p32->bs_aextents))
diff --git a/fs/xfs/xfs_ioctl32.h b/fs/xfs/xfs_ioctl32.h
index 80f4060e8970..b1bb45444df8 100644
--- a/fs/xfs/xfs_ioctl32.h
+++ b/fs/xfs/xfs_ioctl32.h
@@ -67,8 +67,9 @@ typedef struct compat_xfs_bstat {
__u32 bs_gen; /* generation count */
__u16 bs_projid_lo; /* lower part of project id */
#define bs_projid bs_projid_lo /* (previously just bs_projid) */
+ __u16 bs_forkoff; /* inode fork offset in bytes */
__u16 bs_projid_hi; /* high part of project id */
- unsigned char bs_pad[12]; /* pad space, unused */
+ unsigned char bs_pad[10]; /* pad space, unused */
__u32 bs_dmevmask; /* DMIG event mask */
__u16 bs_dmstate; /* DMIG state info */
__u16 bs_aextents; /* attribute number of extents */
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index e9c47b6f5e5a..afcf3c926565 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -404,8 +404,8 @@ xfs_quota_calc_throttle(
int shift = 0;
struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
- /* over hi wmark, squash the prealloc completely */
- if (dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
+ /* no dq, or over hi wmark, squash the prealloc completely */
+ if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
*qblocks = 0;
*qfreesp = 0;
return;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 72129493e9d3..ec6dcdc181ee 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -849,6 +849,36 @@ xfs_setattr_size(
return error;
truncate_setsize(inode, newsize);
+ /*
+ * The "we can't serialise against page faults" pain gets worse.
+ *
+ * If the file is mapped then we have to clean the page at the old EOF
+ * when extending the file. Extending the file can expose changes the
+ * underlying page mapping (e.g. from beyond EOF to a hole or
+ * unwritten), and so on the next attempt to write to that page we need
+ * to remap it for write. i.e. we need .page_mkwrite() to be called.
+ * Hence we need to clean the page to clean the pte and so a new write
+ * fault will be triggered appropriately.
+ *
+ * If we do it before we change the inode size, then we can race with a
+ * page fault that maps the page with exactly the same problem. If we do
+ * it after we change the file size, then a new page fault can come in
+ * and allocate space before we've run the rest of the truncate
+ * transaction. That's kinda grotesque, but it's better than have data
+ * over a hole, and so that's the lesser evil that has been chosen here.
+ *
+ * The real solution, however, is to have some mechanism for locking out
+ * page faults while a truncate is in progress.
+ */
+ if (newsize > oldsize && mapping_mapped(VFS_I(ip)->i_mapping)) {
+ error = filemap_write_and_wait_range(
+ VFS_I(ip)->i_mapping,
+ round_down(oldsize, PAGE_CACHE_SIZE),
+ round_up(oldsize, PAGE_CACHE_SIZE) - 1);
+ if (error)
+ return error;
+ }
+
tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
if (error)
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index f71be9c68017..f1deb961a296 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -639,7 +639,8 @@ next_ag:
xfs_buf_relse(agbp);
agbp = NULL;
agino = 0;
- } while (++agno < mp->m_sb.sb_agcount);
+ agno++;
+ } while (agno < mp->m_sb.sb_agcount);
if (!error) {
if (bufidx) {
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index d10dc8f397c9..6a51619d8690 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -56,7 +56,6 @@ typedef __uint64_t __psunsigned_t;
#include "kmem.h"
#include "mrlock.h"
-#include "time.h"
#include "uuid.h"
#include <linux/semaphore.h>
@@ -179,6 +178,11 @@ typedef __uint64_t __psunsigned_t;
#define MAX(a,b) (max(a,b))
#define howmany(x, y) (((x)+((y)-1))/(y))
+static inline void delay(long ticks)
+{
+ schedule_timeout_uninterruptible(ticks);
+}
+
/*
* XFS wrapper structure for sysfs support. It depends on external data
* structures and is embedded in various internal data structures to implement
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index ca4fd5bd8522..fe88ef67f93a 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1678,7 +1678,7 @@ xlog_bdstrat(
if (iclog->ic_state & XLOG_STATE_IOERROR) {
xfs_buf_ioerror(bp, -EIO);
xfs_buf_stale(bp);
- xfs_buf_ioend(bp, 0);
+ xfs_buf_ioend(bp);
/*
* It would seem logical to return EIO here, but we rely on
* the log state machine to propagate I/O errors instead of
@@ -1688,7 +1688,7 @@ xlog_bdstrat(
return 0;
}
- xfs_buf_iorequest(bp);
+ xfs_buf_submit(bp);
return 0;
}
@@ -3867,18 +3867,17 @@ xlog_state_ioerror(
* This is called from xfs_force_shutdown, when we're forcibly
* shutting down the filesystem, typically because of an IO error.
* Our main objectives here are to make sure that:
- * a. the filesystem gets marked 'SHUTDOWN' for all interested
+ * a. if !logerror, flush the logs to disk. Anything modified
+ * after this is ignored.
+ * b. the filesystem gets marked 'SHUTDOWN' for all interested
* parties to find out, 'atomically'.
- * b. those who're sleeping on log reservations, pinned objects and
+ * c. those who're sleeping on log reservations, pinned objects and
* other resources get woken up, and be told the bad news.
- * c. nothing new gets queued up after (a) and (b) are done.
- * d. if !logerror, flush the iclogs to disk, then seal them off
- * for business.
+ * d. nothing new gets queued up after (b) and (c) are done.
*
- * Note: for delayed logging the !logerror case needs to flush the regions
- * held in memory out to the iclogs before flushing them to disk. This needs
- * to be done before the log is marked as shutdown, otherwise the flush to the
- * iclogs will fail.
+ * Note: for the !logerror case we need to flush the regions held in memory out
+ * to disk first. This needs to be done before the log is marked as shutdown,
+ * otherwise the iclog writes will fail.
*/
int
xfs_log_force_umount(
@@ -3910,16 +3909,16 @@ xfs_log_force_umount(
ASSERT(XLOG_FORCED_SHUTDOWN(log));
return 1;
}
- retval = 0;
/*
- * Flush the in memory commit item list before marking the log as
- * being shut down. We need to do it in this order to ensure all the
- * completed transactions are flushed to disk with the xfs_log_force()
- * call below.
+ * Flush all the completed transactions to disk before marking the log
+ * being shut down. We need to do it in this order to ensure that
+ * completed operations are safely on disk before we shut down, and that
+ * we don't have to issue any buffer IO after the shutdown flags are set
+ * to guarantee this.
*/
if (!logerror)
- xlog_cil_force(log);
+ _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
/*
* mark the filesystem and the as in a shutdown state and wake
@@ -3931,18 +3930,11 @@ xfs_log_force_umount(
XFS_BUF_DONE(mp->m_sb_bp);
/*
- * This flag is sort of redundant because of the mount flag, but
- * it's good to maintain the separation between the log and the rest
- * of XFS.
+ * Mark the log and the iclogs with IO error flags to prevent any
+ * further log IO from being issued or completed.
*/
log->l_flags |= XLOG_IO_ERROR;
-
- /*
- * If we hit a log error, we want to mark all the iclogs IOERROR
- * while we're still holding the loglock.
- */
- if (logerror)
- retval = xlog_state_ioerror(log);
+ retval = xlog_state_ioerror(log);
spin_unlock(&log->l_icloglock);
/*
@@ -3955,19 +3947,6 @@ xfs_log_force_umount(
xlog_grant_head_wake_all(&log->l_reserve_head);
xlog_grant_head_wake_all(&log->l_write_head);
- if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
- ASSERT(!logerror);
- /*
- * Force the incore logs to disk before shutting the
- * log down completely.
- */
- _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
-
- spin_lock(&log->l_icloglock);
- retval = xlog_state_ioerror(log);
- spin_unlock(&log->l_icloglock);
- }
-
/*
* Wake up everybody waiting on xfs_log_force. Wake the CIL push first
* as if the log writes were completed. The abort handling in the log
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index f6b79e5325dd..f506c457011e 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -463,12 +463,40 @@ xlog_cil_push(
spin_unlock(&cil->xc_push_lock);
goto out_skip;
}
- spin_unlock(&cil->xc_push_lock);
/* check for a previously pushed seqeunce */
- if (push_seq < cil->xc_ctx->sequence)
+ if (push_seq < cil->xc_ctx->sequence) {
+ spin_unlock(&cil->xc_push_lock);
goto out_skip;
+ }
+
+ /*
+ * We are now going to push this context, so add it to the committing
+ * list before we do anything else. This ensures that anyone waiting on
+ * this push can easily detect the difference between a "push in
+ * progress" and "CIL is empty, nothing to do".
+ *
+ * IOWs, a wait loop can now check for:
+ * the current sequence not being found on the committing list;
+ * an empty CIL; and
+ * an unchanged sequence number
+ * to detect a push that had nothing to do and therefore does not need
+ * waiting on. If the CIL is not empty, we get put on the committing
+ * list before emptying the CIL and bumping the sequence number. Hence
+ * an empty CIL and an unchanged sequence number means we jumped out
+ * above after doing nothing.
+ *
+ * Hence the waiter will either find the commit sequence on the
+ * committing list or the sequence number will be unchanged and the CIL
+ * still dirty. In that latter case, the push has not yet started, and
+ * so the waiter will have to continue trying to check the CIL
+ * committing list until it is found. In extreme cases of delay, the
+ * sequence may fully commit between the attempts the wait makes to wait
+ * on the commit sequence.
+ */
+ list_add(&ctx->committing, &cil->xc_committing);
+ spin_unlock(&cil->xc_push_lock);
/*
* pull all the log vectors off the items in the CIL, and
@@ -532,7 +560,6 @@ xlog_cil_push(
*/
spin_lock(&cil->xc_push_lock);
cil->xc_current_sequence = new_ctx->sequence;
- list_add(&ctx->committing, &cil->xc_committing);
spin_unlock(&cil->xc_push_lock);
up_write(&cil->xc_ctx_lock);
@@ -855,13 +882,15 @@ restart:
* Hence by the time we have got here it our sequence may not have been
* pushed yet. This is true if the current sequence still matches the
* push sequence after the above wait loop and the CIL still contains
- * dirty objects.
+ * dirty objects. This is guaranteed by the push code first adding the
+ * context to the committing list before emptying the CIL.
*
- * When the push occurs, it will empty the CIL and atomically increment
- * the currect sequence past the push sequence and move it into the
- * committing list. Of course, if the CIL is clean at the time of the
- * push, it won't have pushed the CIL at all, so in that case we should
- * try the push for this sequence again from the start just in case.
+ * Hence if we don't find the context in the committing list and the
+ * current sequence number is unchanged then the CIL contents are
+ * significant. If the CIL is empty, if means there was nothing to push
+ * and that means there is nothing to wait for. If the CIL is not empty,
+ * it means we haven't yet started the push, because if it had started
+ * we would have found the context on the committing list.
*/
if (sequence == cil->xc_current_sequence &&
!list_empty(&cil->xc_cil)) {
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 1fd5787add99..00cd7f3a8f59 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -193,12 +193,8 @@ xlog_bread_noalign(
bp->b_io_length = nbblks;
bp->b_error = 0;
- if (XFS_FORCED_SHUTDOWN(log->l_mp))
- return -EIO;
-
- xfs_buf_iorequest(bp);
- error = xfs_buf_iowait(bp);
- if (error)
+ error = xfs_buf_submit_wait(bp);
+ if (error && !XFS_FORCED_SHUTDOWN(log->l_mp))
xfs_buf_ioerror_alert(bp, __func__);
return error;
}
@@ -378,12 +374,14 @@ xlog_recover_iodone(
* We're not going to bother about retrying
* this during recovery. One strike!
*/
- xfs_buf_ioerror_alert(bp, __func__);
- xfs_force_shutdown(bp->b_target->bt_mount,
- SHUTDOWN_META_IO_ERROR);
+ if (!XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
+ xfs_buf_ioerror_alert(bp, __func__);
+ xfs_force_shutdown(bp->b_target->bt_mount,
+ SHUTDOWN_META_IO_ERROR);
+ }
}
bp->b_iodone = NULL;
- xfs_buf_ioend(bp, 0);
+ xfs_buf_ioend(bp);
}
/*
@@ -1445,160 +1443,6 @@ xlog_clear_stale_blocks(
******************************************************************************
*/
-STATIC xlog_recover_t *
-xlog_recover_find_tid(
- struct hlist_head *head,
- xlog_tid_t tid)
-{
- xlog_recover_t *trans;
-
- hlist_for_each_entry(trans, head, r_list) {
- if (trans->r_log_tid == tid)
- return trans;
- }
- return NULL;
-}
-
-STATIC void
-xlog_recover_new_tid(
- struct hlist_head *head,
- xlog_tid_t tid,
- xfs_lsn_t lsn)
-{
- xlog_recover_t *trans;
-
- trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
- trans->r_log_tid = tid;
- trans->r_lsn = lsn;
- INIT_LIST_HEAD(&trans->r_itemq);
-
- INIT_HLIST_NODE(&trans->r_list);
- hlist_add_head(&trans->r_list, head);
-}
-
-STATIC void
-xlog_recover_add_item(
- struct list_head *head)
-{
- xlog_recover_item_t *item;
-
- item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
- INIT_LIST_HEAD(&item->ri_list);
- list_add_tail(&item->ri_list, head);
-}
-
-STATIC int
-xlog_recover_add_to_cont_trans(
- struct xlog *log,
- struct xlog_recover *trans,
- xfs_caddr_t dp,
- int len)
-{
- xlog_recover_item_t *item;
- xfs_caddr_t ptr, old_ptr;
- int old_len;
-
- if (list_empty(&trans->r_itemq)) {
- /* finish copying rest of trans header */
- xlog_recover_add_item(&trans->r_itemq);
- ptr = (xfs_caddr_t) &trans->r_theader +
- sizeof(xfs_trans_header_t) - len;
- memcpy(ptr, dp, len); /* d, s, l */
- return 0;
- }
- /* take the tail entry */
- item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
-
- old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
- old_len = item->ri_buf[item->ri_cnt-1].i_len;
-
- ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
- memcpy(&ptr[old_len], dp, len); /* d, s, l */
- item->ri_buf[item->ri_cnt-1].i_len += len;
- item->ri_buf[item->ri_cnt-1].i_addr = ptr;
- trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
- return 0;
-}
-
-/*
- * The next region to add is the start of a new region. It could be
- * a whole region or it could be the first part of a new region. Because
- * of this, the assumption here is that the type and size fields of all
- * format structures fit into the first 32 bits of the structure.
- *
- * This works because all regions must be 32 bit aligned. Therefore, we
- * either have both fields or we have neither field. In the case we have
- * neither field, the data part of the region is zero length. We only have
- * a log_op_header and can throw away the header since a new one will appear
- * later. If we have at least 4 bytes, then we can determine how many regions
- * will appear in the current log item.
- */
-STATIC int
-xlog_recover_add_to_trans(
- struct xlog *log,
- struct xlog_recover *trans,
- xfs_caddr_t dp,
- int len)
-{
- xfs_inode_log_format_t *in_f; /* any will do */
- xlog_recover_item_t *item;
- xfs_caddr_t ptr;
-
- if (!len)
- return 0;
- if (list_empty(&trans->r_itemq)) {
- /* we need to catch log corruptions here */
- if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
- xfs_warn(log->l_mp, "%s: bad header magic number",
- __func__);
- ASSERT(0);
- return -EIO;
- }
- if (len == sizeof(xfs_trans_header_t))
- xlog_recover_add_item(&trans->r_itemq);
- memcpy(&trans->r_theader, dp, len); /* d, s, l */
- return 0;
- }
-
- ptr = kmem_alloc(len, KM_SLEEP);
- memcpy(ptr, dp, len);
- in_f = (xfs_inode_log_format_t *)ptr;
-
- /* take the tail entry */
- item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
- if (item->ri_total != 0 &&
- item->ri_total == item->ri_cnt) {
- /* tail item is in use, get a new one */
- xlog_recover_add_item(&trans->r_itemq);
- item = list_entry(trans->r_itemq.prev,
- xlog_recover_item_t, ri_list);
- }
-
- if (item->ri_total == 0) { /* first region to be added */
- if (in_f->ilf_size == 0 ||
- in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
- xfs_warn(log->l_mp,
- "bad number of regions (%d) in inode log format",
- in_f->ilf_size);
- ASSERT(0);
- kmem_free(ptr);
- return -EIO;
- }
-
- item->ri_total = in_f->ilf_size;
- item->ri_buf =
- kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
- KM_SLEEP);
- }
- ASSERT(item->ri_total > item->ri_cnt);
- /* Description region is ri_buf[0] */
- item->ri_buf[item->ri_cnt].i_addr = ptr;
- item->ri_buf[item->ri_cnt].i_len = len;
- item->ri_cnt++;
- trace_xfs_log_recover_item_add(log, trans, item, 0);
- return 0;
-}
-
/*
* Sort the log items in the transaction.
*
@@ -3254,31 +3098,6 @@ xlog_recover_do_icreate_pass2(
return 0;
}
-/*
- * Free up any resources allocated by the transaction
- *
- * Remember that EFIs, EFDs, and IUNLINKs are handled later.
- */
-STATIC void
-xlog_recover_free_trans(
- struct xlog_recover *trans)
-{
- xlog_recover_item_t *item, *n;
- int i;
-
- list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
- /* Free the regions in the item. */
- list_del(&item->ri_list);
- for (i = 0; i < item->ri_cnt; i++)
- kmem_free(item->ri_buf[i].i_addr);
- /* Free the item itself */
- kmem_free(item->ri_buf);
- kmem_free(item);
- }
- /* Free the transaction recover structure */
- kmem_free(trans);
-}
-
STATIC void
xlog_recover_buffer_ra_pass2(
struct xlog *log,
@@ -3528,22 +3347,309 @@ out:
if (!list_empty(&done_list))
list_splice_init(&done_list, &trans->r_itemq);
- xlog_recover_free_trans(trans);
-
error2 = xfs_buf_delwri_submit(&buffer_list);
return error ? error : error2;
}
+STATIC void
+xlog_recover_add_item(
+ struct list_head *head)
+{
+ xlog_recover_item_t *item;
+
+ item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
+ INIT_LIST_HEAD(&item->ri_list);
+ list_add_tail(&item->ri_list, head);
+}
+
STATIC int
-xlog_recover_unmount_trans(
- struct xlog *log)
+xlog_recover_add_to_cont_trans(
+ struct xlog *log,
+ struct xlog_recover *trans,
+ xfs_caddr_t dp,
+ int len)
{
- /* Do nothing now */
- xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
+ xlog_recover_item_t *item;
+ xfs_caddr_t ptr, old_ptr;
+ int old_len;
+
+ if (list_empty(&trans->r_itemq)) {
+ /* finish copying rest of trans header */
+ xlog_recover_add_item(&trans->r_itemq);
+ ptr = (xfs_caddr_t) &trans->r_theader +
+ sizeof(xfs_trans_header_t) - len;
+ memcpy(ptr, dp, len);
+ return 0;
+ }
+ /* take the tail entry */
+ item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
+
+ old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
+ old_len = item->ri_buf[item->ri_cnt-1].i_len;
+
+ ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
+ memcpy(&ptr[old_len], dp, len);
+ item->ri_buf[item->ri_cnt-1].i_len += len;
+ item->ri_buf[item->ri_cnt-1].i_addr = ptr;
+ trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
+ return 0;
+}
+
+/*
+ * The next region to add is the start of a new region. It could be
+ * a whole region or it could be the first part of a new region. Because
+ * of this, the assumption here is that the type and size fields of all
+ * format structures fit into the first 32 bits of the structure.
+ *
+ * This works because all regions must be 32 bit aligned. Therefore, we
+ * either have both fields or we have neither field. In the case we have
+ * neither field, the data part of the region is zero length. We only have
+ * a log_op_header and can throw away the header since a new one will appear
+ * later. If we have at least 4 bytes, then we can determine how many regions
+ * will appear in the current log item.
+ */
+STATIC int
+xlog_recover_add_to_trans(
+ struct xlog *log,
+ struct xlog_recover *trans,
+ xfs_caddr_t dp,
+ int len)
+{
+ xfs_inode_log_format_t *in_f; /* any will do */
+ xlog_recover_item_t *item;
+ xfs_caddr_t ptr;
+
+ if (!len)
+ return 0;
+ if (list_empty(&trans->r_itemq)) {
+ /* we need to catch log corruptions here */
+ if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
+ xfs_warn(log->l_mp, "%s: bad header magic number",
+ __func__);
+ ASSERT(0);
+ return -EIO;
+ }
+ if (len == sizeof(xfs_trans_header_t))
+ xlog_recover_add_item(&trans->r_itemq);
+ memcpy(&trans->r_theader, dp, len);
+ return 0;
+ }
+
+ ptr = kmem_alloc(len, KM_SLEEP);
+ memcpy(ptr, dp, len);
+ in_f = (xfs_inode_log_format_t *)ptr;
+
+ /* take the tail entry */
+ item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
+ if (item->ri_total != 0 &&
+ item->ri_total == item->ri_cnt) {
+ /* tail item is in use, get a new one */
+ xlog_recover_add_item(&trans->r_itemq);
+ item = list_entry(trans->r_itemq.prev,
+ xlog_recover_item_t, ri_list);
+ }
+
+ if (item->ri_total == 0) { /* first region to be added */
+ if (in_f->ilf_size == 0 ||
+ in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
+ xfs_warn(log->l_mp,
+ "bad number of regions (%d) in inode log format",
+ in_f->ilf_size);
+ ASSERT(0);
+ kmem_free(ptr);
+ return -EIO;
+ }
+
+ item->ri_total = in_f->ilf_size;
+ item->ri_buf =
+ kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
+ KM_SLEEP);
+ }
+ ASSERT(item->ri_total > item->ri_cnt);
+ /* Description region is ri_buf[0] */
+ item->ri_buf[item->ri_cnt].i_addr = ptr;
+ item->ri_buf[item->ri_cnt].i_len = len;
+ item->ri_cnt++;
+ trace_xfs_log_recover_item_add(log, trans, item, 0);
return 0;
}
/*
+ * Free up any resources allocated by the transaction
+ *
+ * Remember that EFIs, EFDs, and IUNLINKs are handled later.
+ */
+STATIC void
+xlog_recover_free_trans(
+ struct xlog_recover *trans)
+{
+ xlog_recover_item_t *item, *n;
+ int i;
+
+ list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
+ /* Free the regions in the item. */
+ list_del(&item->ri_list);
+ for (i = 0; i < item->ri_cnt; i++)
+ kmem_free(item->ri_buf[i].i_addr);
+ /* Free the item itself */
+ kmem_free(item->ri_buf);
+ kmem_free(item);
+ }
+ /* Free the transaction recover structure */
+ kmem_free(trans);
+}
+
+/*
+ * On error or completion, trans is freed.
+ */
+STATIC int
+xlog_recovery_process_trans(
+ struct xlog *log,
+ struct xlog_recover *trans,
+ xfs_caddr_t dp,
+ unsigned int len,
+ unsigned int flags,
+ int pass)
+{
+ int error = 0;
+ bool freeit = false;
+
+ /* mask off ophdr transaction container flags */
+ flags &= ~XLOG_END_TRANS;
+ if (flags & XLOG_WAS_CONT_TRANS)
+ flags &= ~XLOG_CONTINUE_TRANS;
+
+ /*
+ * Callees must not free the trans structure. We'll decide if we need to
+ * free it or not based on the operation being done and it's result.
+ */
+ switch (flags) {
+ /* expected flag values */
+ case 0:
+ case XLOG_CONTINUE_TRANS:
+ error = xlog_recover_add_to_trans(log, trans, dp, len);
+ break;
+ case XLOG_WAS_CONT_TRANS:
+ error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
+ break;
+ case XLOG_COMMIT_TRANS:
+ error = xlog_recover_commit_trans(log, trans, pass);
+ /* success or fail, we are now done with this transaction. */
+ freeit = true;
+ break;
+
+ /* unexpected flag values */
+ case XLOG_UNMOUNT_TRANS:
+ /* just skip trans */
+ xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
+ freeit = true;
+ break;
+ case XLOG_START_TRANS:
+ default:
+ xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
+ ASSERT(0);
+ error = -EIO;
+ break;
+ }
+ if (error || freeit)
+ xlog_recover_free_trans(trans);
+ return error;
+}
+
+/*
+ * Lookup the transaction recovery structure associated with the ID in the
+ * current ophdr. If the transaction doesn't exist and the start flag is set in
+ * the ophdr, then allocate a new transaction for future ID matches to find.
+ * Either way, return what we found during the lookup - an existing transaction
+ * or nothing.
+ */
+STATIC struct xlog_recover *
+xlog_recover_ophdr_to_trans(
+ struct hlist_head rhash[],
+ struct xlog_rec_header *rhead,
+ struct xlog_op_header *ohead)
+{
+ struct xlog_recover *trans;
+ xlog_tid_t tid;
+ struct hlist_head *rhp;
+
+ tid = be32_to_cpu(ohead->oh_tid);
+ rhp = &rhash[XLOG_RHASH(tid)];
+ hlist_for_each_entry(trans, rhp, r_list) {
+ if (trans->r_log_tid == tid)
+ return trans;
+ }
+
+ /*
+ * skip over non-start transaction headers - we could be
+ * processing slack space before the next transaction starts
+ */
+ if (!(ohead->oh_flags & XLOG_START_TRANS))
+ return NULL;
+
+ ASSERT(be32_to_cpu(ohead->oh_len) == 0);
+
+ /*
+ * This is a new transaction so allocate a new recovery container to
+ * hold the recovery ops that will follow.
+ */
+ trans = kmem_zalloc(sizeof(struct xlog_recover), KM_SLEEP);
+ trans->r_log_tid = tid;
+ trans->r_lsn = be64_to_cpu(rhead->h_lsn);
+ INIT_LIST_HEAD(&trans->r_itemq);
+ INIT_HLIST_NODE(&trans->r_list);
+ hlist_add_head(&trans->r_list, rhp);
+
+ /*
+ * Nothing more to do for this ophdr. Items to be added to this new
+ * transaction will be in subsequent ophdr containers.
+ */
+ return NULL;
+}
+
+STATIC int
+xlog_recover_process_ophdr(
+ struct xlog *log,
+ struct hlist_head rhash[],
+ struct xlog_rec_header *rhead,
+ struct xlog_op_header *ohead,
+ xfs_caddr_t dp,
+ xfs_caddr_t end,
+ int pass)
+{
+ struct xlog_recover *trans;
+ unsigned int len;
+
+ /* Do we understand who wrote this op? */
+ if (ohead->oh_clientid != XFS_TRANSACTION &&
+ ohead->oh_clientid != XFS_LOG) {
+ xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
+ __func__, ohead->oh_clientid);
+ ASSERT(0);
+ return -EIO;
+ }
+
+ /*
+ * Check the ophdr contains all the data it is supposed to contain.
+ */
+ len = be32_to_cpu(ohead->oh_len);
+ if (dp + len > end) {
+ xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
+ WARN_ON(1);
+ return -EIO;
+ }
+
+ trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
+ if (!trans) {
+ /* nothing to do, so skip over this ophdr */
+ return 0;
+ }
+
+ return xlog_recovery_process_trans(log, trans, dp, len,
+ ohead->oh_flags, pass);
+}
+
+/*
* There are two valid states of the r_state field. 0 indicates that the
* transaction structure is in a normal state. We have either seen the
* start of the transaction or the last operation we added was not a partial
@@ -3560,86 +3666,30 @@ xlog_recover_process_data(
xfs_caddr_t dp,
int pass)
{
- xfs_caddr_t lp;
+ struct xlog_op_header *ohead;
+ xfs_caddr_t end;
int num_logops;
- xlog_op_header_t *ohead;
- xlog_recover_t *trans;
- xlog_tid_t tid;
int error;
- unsigned long hash;
- uint flags;
- lp = dp + be32_to_cpu(rhead->h_len);
+ end = dp + be32_to_cpu(rhead->h_len);
num_logops = be32_to_cpu(rhead->h_num_logops);
/* check the log format matches our own - else we can't recover */
if (xlog_header_check_recover(log->l_mp, rhead))
return -EIO;
- while ((dp < lp) && num_logops) {
- ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
- ohead = (xlog_op_header_t *)dp;
- dp += sizeof(xlog_op_header_t);
- if (ohead->oh_clientid != XFS_TRANSACTION &&
- ohead->oh_clientid != XFS_LOG) {
- xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
- __func__, ohead->oh_clientid);
- ASSERT(0);
- return -EIO;
- }
- tid = be32_to_cpu(ohead->oh_tid);
- hash = XLOG_RHASH(tid);
- trans = xlog_recover_find_tid(&rhash[hash], tid);
- if (trans == NULL) { /* not found; add new tid */
- if (ohead->oh_flags & XLOG_START_TRANS)
- xlog_recover_new_tid(&rhash[hash], tid,
- be64_to_cpu(rhead->h_lsn));
- } else {
- if (dp + be32_to_cpu(ohead->oh_len) > lp) {
- xfs_warn(log->l_mp, "%s: bad length 0x%x",
- __func__, be32_to_cpu(ohead->oh_len));
- WARN_ON(1);
- return -EIO;
- }
- flags = ohead->oh_flags & ~XLOG_END_TRANS;
- if (flags & XLOG_WAS_CONT_TRANS)
- flags &= ~XLOG_CONTINUE_TRANS;
- switch (flags) {
- case XLOG_COMMIT_TRANS:
- error = xlog_recover_commit_trans(log,
- trans, pass);
- break;
- case XLOG_UNMOUNT_TRANS:
- error = xlog_recover_unmount_trans(log);
- break;
- case XLOG_WAS_CONT_TRANS:
- error = xlog_recover_add_to_cont_trans(log,
- trans, dp,
- be32_to_cpu(ohead->oh_len));
- break;
- case XLOG_START_TRANS:
- xfs_warn(log->l_mp, "%s: bad transaction",
- __func__);
- ASSERT(0);
- error = -EIO;
- break;
- case 0:
- case XLOG_CONTINUE_TRANS:
- error = xlog_recover_add_to_trans(log, trans,
- dp, be32_to_cpu(ohead->oh_len));
- break;
- default:
- xfs_warn(log->l_mp, "%s: bad flag 0x%x",
- __func__, flags);
- ASSERT(0);
- error = -EIO;
- break;
- }
- if (error) {
- xlog_recover_free_trans(trans);
- return error;
- }
- }
+ while ((dp < end) && num_logops) {
+
+ ohead = (struct xlog_op_header *)dp;
+ dp += sizeof(*ohead);
+ ASSERT(dp <= end);
+
+ /* errors will abort recovery */
+ error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
+ dp, end, pass);
+ if (error)
+ return error;
+
dp += be32_to_cpu(ohead->oh_len);
num_logops--;
}
@@ -4132,41 +4182,13 @@ xlog_do_recovery_pass(
}
memset(rhash, 0, sizeof(rhash));
- if (tail_blk <= head_blk) {
- for (blk_no = tail_blk; blk_no < head_blk; ) {
- error = xlog_bread(log, blk_no, hblks, hbp, &offset);
- if (error)
- goto bread_err2;
-
- rhead = (xlog_rec_header_t *)offset;
- error = xlog_valid_rec_header(log, rhead, blk_no);
- if (error)
- goto bread_err2;
-
- /* blocks in data section */
- bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
- error = xlog_bread(log, blk_no + hblks, bblks, dbp,
- &offset);
- if (error)
- goto bread_err2;
-
- error = xlog_unpack_data(rhead, offset, log);
- if (error)
- goto bread_err2;
-
- error = xlog_recover_process_data(log,
- rhash, rhead, offset, pass);
- if (error)
- goto bread_err2;
- blk_no += bblks + hblks;
- }
- } else {
+ blk_no = tail_blk;
+ if (tail_blk > head_blk) {
/*
* Perform recovery around the end of the physical log.
* When the head is not on the same cycle number as the tail,
- * we can't do a sequential recovery as above.
+ * we can't do a sequential recovery.
*/
- blk_no = tail_blk;
while (blk_no < log->l_logBBsize) {
/*
* Check for header wrapping around physical end-of-log
@@ -4280,34 +4302,35 @@ xlog_do_recovery_pass(
ASSERT(blk_no >= log->l_logBBsize);
blk_no -= log->l_logBBsize;
+ }
- /* read first part of physical log */
- while (blk_no < head_blk) {
- error = xlog_bread(log, blk_no, hblks, hbp, &offset);
- if (error)
- goto bread_err2;
+ /* read first part of physical log */
+ while (blk_no < head_blk) {
+ error = xlog_bread(log, blk_no, hblks, hbp, &offset);
+ if (error)
+ goto bread_err2;
- rhead = (xlog_rec_header_t *)offset;
- error = xlog_valid_rec_header(log, rhead, blk_no);
- if (error)
- goto bread_err2;
+ rhead = (xlog_rec_header_t *)offset;
+ error = xlog_valid_rec_header(log, rhead, blk_no);
+ if (error)
+ goto bread_err2;
- bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
- error = xlog_bread(log, blk_no+hblks, bblks, dbp,
- &offset);
- if (error)
- goto bread_err2;
+ /* blocks in data section */
+ bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
+ error = xlog_bread(log, blk_no+hblks, bblks, dbp,
+ &offset);
+ if (error)
+ goto bread_err2;
- error = xlog_unpack_data(rhead, offset, log);
- if (error)
- goto bread_err2;
+ error = xlog_unpack_data(rhead, offset, log);
+ if (error)
+ goto bread_err2;
- error = xlog_recover_process_data(log, rhash,
- rhead, offset, pass);
- if (error)
- goto bread_err2;
- blk_no += bblks + hblks;
- }
+ error = xlog_recover_process_data(log, rhash,
+ rhead, offset, pass);
+ if (error)
+ goto bread_err2;
+ blk_no += bblks + hblks;
}
bread_err2:
@@ -4427,16 +4450,12 @@ xlog_do_recover(
XFS_BUF_UNASYNC(bp);
bp->b_ops = &xfs_sb_buf_ops;
- if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
- xfs_buf_relse(bp);
- return -EIO;
- }
-
- xfs_buf_iorequest(bp);
- error = xfs_buf_iowait(bp);
+ error = xfs_buf_submit_wait(bp);
if (error) {
- xfs_buf_ioerror_alert(bp, __func__);
- ASSERT(0);
+ if (!XFS_FORCED_SHUTDOWN(log->l_mp)) {
+ xfs_buf_ioerror_alert(bp, __func__);
+ ASSERT(0);
+ }
xfs_buf_relse(bp);
return error;
}
@@ -4509,6 +4528,18 @@ xlog_recover(
return -EINVAL;
}
+ /*
+ * Delay log recovery if the debug hook is set. This is debug
+ * instrumention to coordinate simulation of I/O failures with
+ * log recovery.
+ */
+ if (xfs_globals.log_recovery_delay) {
+ xfs_notice(log->l_mp,
+ "Delaying log recovery for %d seconds.",
+ xfs_globals.log_recovery_delay);
+ msleep(xfs_globals.log_recovery_delay * 1000);
+ }
+
xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
log->l_mp->m_logname ? log->l_mp->m_logname
: "internal");
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index fbf0384a466f..51435dbce9c4 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -61,8 +61,6 @@ static DEFINE_MUTEX(xfs_uuid_table_mutex);
static int xfs_uuid_table_size;
static uuid_t *xfs_uuid_table;
-extern struct kset *xfs_kset;
-
/*
* See if the UUID is unique among mounted XFS filesystems.
* Mount fails if UUID is nil or a FS with the same UUID is already mounted.
@@ -302,21 +300,15 @@ xfs_readsb(
* access to the superblock.
*/
reread:
- bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
- BTOBB(sector_size), 0, buf_ops);
- if (!bp) {
- if (loud)
- xfs_warn(mp, "SB buffer read failed");
- return -EIO;
- }
- if (bp->b_error) {
- error = bp->b_error;
+ error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
+ BTOBB(sector_size), 0, &bp, buf_ops);
+ if (error) {
if (loud)
xfs_warn(mp, "SB validate failed with error %d.", error);
/* bad CRC means corrupted metadata */
if (error == -EFSBADCRC)
error = -EFSCORRUPTED;
- goto release_buf;
+ return error;
}
/*
@@ -546,40 +538,43 @@ xfs_set_inoalignment(xfs_mount_t *mp)
* Check that the data (and log if separate) is an ok size.
*/
STATIC int
-xfs_check_sizes(xfs_mount_t *mp)
+xfs_check_sizes(
+ struct xfs_mount *mp)
{
- xfs_buf_t *bp;
+ struct xfs_buf *bp;
xfs_daddr_t d;
+ int error;
d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
xfs_warn(mp, "filesystem size mismatch detected");
return -EFBIG;
}
- bp = xfs_buf_read_uncached(mp->m_ddev_targp,
+ error = xfs_buf_read_uncached(mp->m_ddev_targp,
d - XFS_FSS_TO_BB(mp, 1),
- XFS_FSS_TO_BB(mp, 1), 0, NULL);
- if (!bp) {
+ XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
+ if (error) {
xfs_warn(mp, "last sector read failed");
- return -EIO;
+ return error;
}
xfs_buf_relse(bp);
- if (mp->m_logdev_targp != mp->m_ddev_targp) {
- d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
- if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
- xfs_warn(mp, "log size mismatch detected");
- return -EFBIG;
- }
- bp = xfs_buf_read_uncached(mp->m_logdev_targp,
+ if (mp->m_logdev_targp == mp->m_ddev_targp)
+ return 0;
+
+ d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
+ if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
+ xfs_warn(mp, "log size mismatch detected");
+ return -EFBIG;
+ }
+ error = xfs_buf_read_uncached(mp->m_logdev_targp,
d - XFS_FSB_TO_BB(mp, 1),
- XFS_FSB_TO_BB(mp, 1), 0, NULL);
- if (!bp) {
- xfs_warn(mp, "log device read failed");
- return -EIO;
- }
- xfs_buf_relse(bp);
+ XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
+ if (error) {
+ xfs_warn(mp, "log device read failed");
+ return error;
}
+ xfs_buf_relse(bp);
return 0;
}
@@ -729,7 +724,6 @@ xfs_mountfs(
xfs_set_maxicount(mp);
- mp->m_kobj.kobject.kset = xfs_kset;
error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname);
if (error)
goto out;
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 1eb6f3df698c..30ecca3037e3 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -304,7 +304,8 @@ _xfs_mru_cache_reap(
int
xfs_mru_cache_init(void)
{
- xfs_mru_reap_wq = alloc_workqueue("xfs_mru_cache", WQ_MEM_RECLAIM, 1);
+ xfs_mru_reap_wq = alloc_workqueue("xfs_mru_cache",
+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 1);
if (!xfs_mru_reap_wq)
return -ENOMEM;
return 0;
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 10232102b4a6..d68f23021af3 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -434,6 +434,7 @@ xfs_qm_dquot_isolate(
struct list_head *item,
spinlock_t *lru_lock,
void *arg)
+ __releases(lru_lock) __acquires(lru_lock)
{
struct xfs_dquot *dqp = container_of(item,
struct xfs_dquot, q_lru);
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 909e143b87ae..e1175ea9b551 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -46,7 +46,7 @@
* Keeps track of a current summary block, so we don't keep reading
* it from the buffer cache.
*/
-STATIC int /* error */
+static int
xfs_rtget_summary(
xfs_mount_t *mp, /* file system mount structure */
xfs_trans_t *tp, /* transaction pointer */
@@ -56,60 +56,9 @@ xfs_rtget_summary(
xfs_fsblock_t *rsb, /* in/out: summary block number */
xfs_suminfo_t *sum) /* out: summary info for this block */
{
- xfs_buf_t *bp; /* buffer for summary block */
- int error; /* error value */
- xfs_fsblock_t sb; /* summary fsblock */
- int so; /* index into the summary file */
- xfs_suminfo_t *sp; /* pointer to returned data */
-
- /*
- * Compute entry number in the summary file.
- */
- so = XFS_SUMOFFS(mp, log, bbno);
- /*
- * Compute the block number in the summary file.
- */
- sb = XFS_SUMOFFSTOBLOCK(mp, so);
- /*
- * If we have an old buffer, and the block number matches, use that.
- */
- if (rbpp && *rbpp && *rsb == sb)
- bp = *rbpp;
- /*
- * Otherwise we have to get the buffer.
- */
- else {
- /*
- * If there was an old one, get rid of it first.
- */
- if (rbpp && *rbpp)
- xfs_trans_brelse(tp, *rbpp);
- error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
- if (error) {
- return error;
- }
- /*
- * Remember this buffer and block for the next call.
- */
- if (rbpp) {
- *rbpp = bp;
- *rsb = sb;
- }
- }
- /*
- * Point to the summary information & copy it out.
- */
- sp = XFS_SUMPTR(mp, bp, so);
- *sum = *sp;
- /*
- * Drop the buffer if we're not asked to remember it.
- */
- if (!rbpp)
- xfs_trans_brelse(tp, bp);
- return 0;
+ return xfs_rtmodify_summary_int(mp, tp, log, bbno, 0, rbpp, rsb, sum);
}
-
/*
* Return whether there are any free extents in the size range given
* by low and high, for the bitmap block bbno.
@@ -972,16 +921,11 @@ xfs_growfs_rt(
/*
* Read in the last block of the device, make sure it exists.
*/
- bp = xfs_buf_read_uncached(mp->m_rtdev_targp,
+ error = xfs_buf_read_uncached(mp->m_rtdev_targp,
XFS_FSB_TO_BB(mp, nrblocks - 1),
- XFS_FSB_TO_BB(mp, 1), 0, NULL);
- if (!bp)
- return -EIO;
- if (bp->b_error) {
- error = bp->b_error;
- xfs_buf_relse(bp);
+ XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
+ if (error)
return error;
- }
xfs_buf_relse(bp);
/*
@@ -1235,11 +1179,12 @@ xfs_rtallocate_extent(
*/
int /* error */
xfs_rtmount_init(
- xfs_mount_t *mp) /* file system mount structure */
+ struct xfs_mount *mp) /* file system mount structure */
{
- xfs_buf_t *bp; /* buffer for last block of subvolume */
- xfs_daddr_t d; /* address of last block of subvolume */
- xfs_sb_t *sbp; /* filesystem superblock copy in mount */
+ struct xfs_buf *bp; /* buffer for last block of subvolume */
+ struct xfs_sb *sbp; /* filesystem superblock copy in mount */
+ xfs_daddr_t d; /* address of last block of subvolume */
+ int error;
sbp = &mp->m_sb;
if (sbp->sb_rblocks == 0)
@@ -1265,14 +1210,12 @@ xfs_rtmount_init(
(unsigned long long) mp->m_sb.sb_rblocks);
return -EFBIG;
}
- bp = xfs_buf_read_uncached(mp->m_rtdev_targp,
+ error = xfs_buf_read_uncached(mp->m_rtdev_targp,
d - XFS_FSB_TO_BB(mp, 1),
- XFS_FSB_TO_BB(mp, 1), 0, NULL);
- if (!bp || bp->b_error) {
+ XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
+ if (error) {
xfs_warn(mp, "realtime device size check failed");
- if (bp)
- xfs_buf_relse(bp);
- return -EIO;
+ return error;
}
xfs_buf_relse(bp);
return 0;
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h
index c642795324af..76c0a4a9bb17 100644
--- a/fs/xfs/xfs_rtalloc.h
+++ b/fs/xfs/xfs_rtalloc.h
@@ -111,6 +111,10 @@ int xfs_rtfind_forw(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_rtblock_t *rtblock);
int xfs_rtmodify_range(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_rtblock_t start, xfs_extlen_t len, int val);
+int xfs_rtmodify_summary_int(struct xfs_mount *mp, struct xfs_trans *tp,
+ int log, xfs_rtblock_t bbno, int delta,
+ xfs_buf_t **rbpp, xfs_fsblock_t *rsb,
+ xfs_suminfo_t *sum);
int xfs_rtmodify_summary(struct xfs_mount *mp, struct xfs_trans *tp, int log,
xfs_rtblock_t bbno, int delta, xfs_buf_t **rbpp,
xfs_fsblock_t *rsb);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index b194652033cd..9f622feda6a4 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -47,6 +47,7 @@
#include "xfs_dinode.h"
#include "xfs_filestream.h"
#include "xfs_quota.h"
+#include "xfs_sysfs.h"
#include <linux/namei.h>
#include <linux/init.h>
@@ -61,7 +62,11 @@
static const struct super_operations xfs_super_operations;
static kmem_zone_t *xfs_ioend_zone;
mempool_t *xfs_ioend_pool;
-struct kset *xfs_kset;
+
+static struct kset *xfs_kset; /* top-level xfs sysfs dir */
+#ifdef DEBUG
+static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
+#endif
#define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */
#define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */
@@ -838,32 +843,32 @@ xfs_init_mount_workqueues(
struct xfs_mount *mp)
{
mp->m_data_workqueue = alloc_workqueue("xfs-data/%s",
- WQ_MEM_RECLAIM, 0, mp->m_fsname);
+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
if (!mp->m_data_workqueue)
goto out;
mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
- WQ_MEM_RECLAIM, 0, mp->m_fsname);
+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
if (!mp->m_unwritten_workqueue)
goto out_destroy_data_iodone_queue;
mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
- WQ_MEM_RECLAIM, 0, mp->m_fsname);
+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
if (!mp->m_cil_workqueue)
goto out_destroy_unwritten;
mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
- 0, 0, mp->m_fsname);
+ WQ_FREEZABLE, 0, mp->m_fsname);
if (!mp->m_reclaim_workqueue)
goto out_destroy_cil;
mp->m_log_workqueue = alloc_workqueue("xfs-log/%s",
- 0, 0, mp->m_fsname);
+ WQ_FREEZABLE, 0, mp->m_fsname);
if (!mp->m_log_workqueue)
goto out_destroy_reclaim;
mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
- 0, 0, mp->m_fsname);
+ WQ_FREEZABLE, 0, mp->m_fsname);
if (!mp->m_eofblocks_workqueue)
goto out_destroy_log;
@@ -1406,6 +1411,7 @@ xfs_fs_fill_super(
atomic_set(&mp->m_active_trans, 0);
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
+ mp->m_kobj.kobject.kset = xfs_kset;
mp->m_super = sb;
sb->s_fs_info = mp;
@@ -1715,7 +1721,8 @@ xfs_init_workqueues(void)
* AGs in all the filesystems mounted. Hence use the default large
* max_active value for this workqueue.
*/
- xfs_alloc_wq = alloc_workqueue("xfsalloc", WQ_MEM_RECLAIM, 0);
+ xfs_alloc_wq = alloc_workqueue("xfsalloc",
+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
if (!xfs_alloc_wq)
return -ENOMEM;
@@ -1768,9 +1775,16 @@ init_xfs_fs(void)
goto out_sysctl_unregister;;
}
- error = xfs_qm_init();
+#ifdef DEBUG
+ xfs_dbg_kobj.kobject.kset = xfs_kset;
+ error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
if (error)
goto out_kset_unregister;
+#endif
+
+ error = xfs_qm_init();
+ if (error)
+ goto out_remove_kobj;
error = register_filesystem(&xfs_fs_type);
if (error)
@@ -1779,7 +1793,11 @@ init_xfs_fs(void)
out_qm_exit:
xfs_qm_exit();
+ out_remove_kobj:
+#ifdef DEBUG
+ xfs_sysfs_del(&xfs_dbg_kobj);
out_kset_unregister:
+#endif
kset_unregister(xfs_kset);
out_sysctl_unregister:
xfs_sysctl_unregister();
@@ -1802,6 +1820,9 @@ exit_xfs_fs(void)
{
xfs_qm_exit();
unregister_filesystem(&xfs_fs_type);
+#ifdef DEBUG
+ xfs_sysfs_del(&xfs_dbg_kobj);
+#endif
kset_unregister(xfs_kset);
xfs_sysctl_unregister();
xfs_cleanup_procfs();
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 6a944a2cd36f..02ae62a998e0 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -269,9 +269,11 @@ xfs_symlink(
/*
* Check for ability to enter directory entry, if no space reserved.
*/
- error = xfs_dir_canenter(tp, dp, link_name, resblks);
- if (error)
- goto error_return;
+ if (!resblks) {
+ error = xfs_dir_canenter(tp, dp, link_name);
+ if (error)
+ goto error_return;
+ }
/*
* Initialize the bmap freelist prior to calling either
* bmapi or the directory create code.
diff --git a/fs/xfs/xfs_sysctl.h b/fs/xfs/xfs_sysctl.h
index bd8e157c20ef..ffef45375754 100644
--- a/fs/xfs/xfs_sysctl.h
+++ b/fs/xfs/xfs_sysctl.h
@@ -92,6 +92,11 @@ enum {
extern xfs_param_t xfs_params;
+struct xfs_globals {
+ int log_recovery_delay; /* log recovery delay (secs) */
+};
+extern struct xfs_globals xfs_globals;
+
#ifdef CONFIG_SYSCTL
extern int xfs_sysctl_register(void);
extern void xfs_sysctl_unregister(void);
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index 9835139ce1ec..aa03670851d8 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -51,6 +51,80 @@ struct kobj_type xfs_mp_ktype = {
.release = xfs_sysfs_release,
};
+#ifdef DEBUG
+/* debug */
+
+STATIC ssize_t
+log_recovery_delay_store(
+ const char *buf,
+ size_t count,
+ void *data)
+{
+ int ret;
+ int val;
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val < 0 || val > 60)
+ return -EINVAL;
+
+ xfs_globals.log_recovery_delay = val;
+
+ return count;
+}
+
+STATIC ssize_t
+log_recovery_delay_show(
+ char *buf,
+ void *data)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.log_recovery_delay);
+}
+XFS_SYSFS_ATTR_RW(log_recovery_delay);
+
+static struct attribute *xfs_dbg_attrs[] = {
+ ATTR_LIST(log_recovery_delay),
+ NULL,
+};
+
+STATIC ssize_t
+xfs_dbg_show(
+ struct kobject *kobject,
+ struct attribute *attr,
+ char *buf)
+{
+ struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
+
+ return xfs_attr->show ? xfs_attr->show(buf, NULL) : 0;
+}
+
+STATIC ssize_t
+xfs_dbg_store(
+ struct kobject *kobject,
+ struct attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
+
+ return xfs_attr->store ? xfs_attr->store(buf, count, NULL) : 0;
+}
+
+static struct sysfs_ops xfs_dbg_ops = {
+ .show = xfs_dbg_show,
+ .store = xfs_dbg_store,
+};
+
+struct kobj_type xfs_dbg_ktype = {
+ .release = xfs_sysfs_release,
+ .sysfs_ops = &xfs_dbg_ops,
+ .default_attrs = xfs_dbg_attrs,
+};
+
+#endif /* DEBUG */
+
/* xlog */
STATIC ssize_t
diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h
index 54a2091183c0..240eee35f342 100644
--- a/fs/xfs/xfs_sysfs.h
+++ b/fs/xfs/xfs_sysfs.h
@@ -20,6 +20,7 @@
#define __XFS_SYSFS_H__
extern struct kobj_type xfs_mp_ktype; /* xfs_mount */
+extern struct kobj_type xfs_dbg_ktype; /* debug */
extern struct kobj_type xfs_log_ktype; /* xlog */
static inline struct xfs_kobj *
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 152f82782630..51372e34d988 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -349,7 +349,8 @@ DEFINE_BUF_EVENT(xfs_buf_free);
DEFINE_BUF_EVENT(xfs_buf_hold);
DEFINE_BUF_EVENT(xfs_buf_rele);
DEFINE_BUF_EVENT(xfs_buf_iodone);
-DEFINE_BUF_EVENT(xfs_buf_iorequest);
+DEFINE_BUF_EVENT(xfs_buf_submit);
+DEFINE_BUF_EVENT(xfs_buf_submit_wait);
DEFINE_BUF_EVENT(xfs_buf_bawrite);
DEFINE_BUF_EVENT(xfs_buf_lock);
DEFINE_BUF_EVENT(xfs_buf_lock_done);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 96c898e7ac9a..e2b2216b1635 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -318,20 +318,10 @@ xfs_trans_read_buf_map(
XFS_BUF_READ(bp);
bp->b_ops = ops;
- /*
- * XXX(hch): clean up the error handling here to be less
- * of a mess..
- */
- if (XFS_FORCED_SHUTDOWN(mp)) {
- trace_xfs_bdstrat_shut(bp, _RET_IP_);
- xfs_bioerror_relse(bp);
- } else {
- xfs_buf_iorequest(bp);
- }
-
- error = xfs_buf_iowait(bp);
+ error = xfs_buf_submit_wait(bp);
if (error) {
- xfs_buf_ioerror_alert(bp, __func__);
+ if (!XFS_FORCED_SHUTDOWN(mp))
+ xfs_buf_ioerror_alert(bp, __func__);
xfs_buf_relse(bp);
/*
* We can gracefully recover from most read
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 50c3f5614288..cdb4d86520e1 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -70,7 +70,7 @@ xfs_trans_ichgtime(
int flags)
{
struct inode *inode = VFS_I(ip);
- timespec_t tv;
+ struct timespec tv;
ASSERT(tp);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 9c79e7603459..1973ad2b13f4 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -18,14 +18,100 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
+/*
+ * atomic_$op() - $op integer to atomic variable
+ * @i: integer value to $op
+ * @v: pointer to the atomic variable
+ *
+ * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
+ * smp_mb__{before,after}_atomic().
+ */
+
+/*
+ * atomic_$op_return() - $op interer to atomic variable and returns the result
+ * @i: integer value to $op
+ * @v: pointer to the atomic variable
+ *
+ * Atomically $ops @i to @v. Does imply a full memory barrier.
+ */
+
#ifdef CONFIG_SMP
-/* Force people to define core atomics */
-# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \
- !defined(atomic_clear_mask) || !defined(atomic_set_mask)
-# error "SMP requires a little arch-specific magic"
-# endif
+
+/* we can build all atomic primitives from cmpxchg */
+
+#define ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ int c, old; \
+ \
+ c = v->counter; \
+ while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ c = old; \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ int c, old; \
+ \
+ c = v->counter; \
+ while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ c = old; \
+ \
+ return c c_op i; \
+}
+
+#else
+
+#include <linux/irqflags.h>
+
+#define ATOMIC_OP(op, c_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter = v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+}
+
+#define ATOMIC_OP_RETURN(op, c_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long flags; \
+ int ret; \
+ \
+ raw_local_irq_save(flags); \
+ ret = (v->counter = v->counter c_op i); \
+ raw_local_irq_restore(flags); \
+ \
+ return ret; \
+}
+
+#endif /* CONFIG_SMP */
+
+#ifndef atomic_add_return
+ATOMIC_OP_RETURN(add, +)
+#endif
+
+#ifndef atomic_sub_return
+ATOMIC_OP_RETURN(sub, -)
+#endif
+
+#ifndef atomic_clear_mask
+ATOMIC_OP(and, &)
+#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
#endif
+#ifndef atomic_set_mask
+#define CONFIG_ARCH_HAS_ATOMIC_OR
+ATOMIC_OP(or, |)
+#define atomic_set_mask(i, v) atomic_or((i), (v))
+#endif
+
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
@@ -33,8 +119,6 @@
#define ATOMIC_INIT(i) { (i) }
-#ifdef __KERNEL__
-
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
@@ -42,7 +126,7 @@
* Atomically reads the value of @v.
*/
#ifndef atomic_read
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
#endif
/**
@@ -56,52 +140,6 @@
#include <linux/irqflags.h>
-/**
- * atomic_add_return - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns the result
- */
-#ifndef atomic_add_return
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int temp;
-
- raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
- temp = v->counter;
- temp += i;
- v->counter = temp;
- raw_local_irq_restore(flags);
-
- return temp;
-}
-#endif
-
-/**
- * atomic_sub_return - subtract integer from atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns the result
- */
-#ifndef atomic_sub_return
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long flags;
- int temp;
-
- raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
- temp = v->counter;
- temp -= i;
- v->counter = temp;
- raw_local_irq_restore(flags);
-
- return temp;
-}
-#endif
-
static inline int atomic_add_negative(int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
@@ -139,49 +177,11 @@ static inline void atomic_dec(atomic_t *v)
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
- int c, old;
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
- c = old;
- return c;
-}
-
-/**
- * atomic_clear_mask - Atomically clear bits in atomic variable
- * @mask: Mask of the bits to be cleared
- * @v: pointer of type atomic_t
- *
- * Atomically clears the bits set in @mask from @v
- */
-#ifndef atomic_clear_mask
-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
-{
- unsigned long flags;
-
- mask = ~mask;
- raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
- v->counter &= mask;
- raw_local_irq_restore(flags);
+ int c, old;
+ c = atomic_read(v);
+ while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
+ c = old;
+ return c;
}
-#endif
-
-/**
- * atomic_set_mask - Atomically set bits in atomic variable
- * @mask: Mask of the bits to be set
- * @v: pointer of type atomic_t
- *
- * Atomically sets the bits set in @mask in @v
- */
-#ifndef atomic_set_mask
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
- unsigned long flags;
-
- raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
- v->counter |= mask;
- raw_local_irq_restore(flags);
-}
-#endif
-#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index b18ce4f9ee3d..30ad9c86cebb 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -20,10 +20,22 @@ typedef struct {
extern long long atomic64_read(const atomic64_t *v);
extern void atomic64_set(atomic64_t *v, long long i);
-extern void atomic64_add(long long a, atomic64_t *v);
-extern long long atomic64_add_return(long long a, atomic64_t *v);
-extern void atomic64_sub(long long a, atomic64_t *v);
-extern long long atomic64_sub_return(long long a, atomic64_t *v);
+
+#define ATOMIC64_OP(op) \
+extern void atomic64_##op(long long a, atomic64_t *v);
+
+#define ATOMIC64_OP_RETURN(op) \
+extern long long atomic64_##op##_return(long long a, atomic64_t *v);
+
+#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
+
+ATOMIC64_OPS(add)
+ATOMIC64_OPS(sub)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
extern long long atomic64_dec_if_positive(atomic64_t *v);
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
extern long long atomic64_xchg(atomic64_t *v, long long new);
diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h
index d5cb78f53986..fe386fc6e85e 100644
--- a/include/asm-generic/cputime_jiffies.h
+++ b/include/asm-generic/cputime_jiffies.h
@@ -3,6 +3,8 @@
typedef unsigned long __nocast cputime_t;
+#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
+
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
#define cputime_to_scaled(__ct) (__ct)
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
index 4e817606c549..0419485891f2 100644
--- a/include/asm-generic/cputime_nsecs.h
+++ b/include/asm-generic/cputime_nsecs.h
@@ -21,6 +21,8 @@
typedef u64 __nocast cputime_t;
typedef u64 __nocast cputime64_t;
+#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
+
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_div(__ct, divisor) div_u64((__force u64)__ct, divisor)
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index a9fd248f5d48..3378dcf4c31e 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -214,14 +214,6 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
-static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
-}
-
int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 0d164c6af539..54add2069901 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -15,6 +15,7 @@
#define _LINUX_PUBLIC_KEY_H
#include <linux/mpi.h>
+#include <keys/asymmetric-type.h>
#include <crypto/hash_info.h>
enum pkey_algo {
@@ -98,8 +99,9 @@ struct key;
extern int verify_signature(const struct key *key,
const struct public_key_signature *sig);
+struct asymmetric_key_id;
extern struct key *x509_request_asymmetric_key(struct key *keyring,
- const char *issuer,
- const char *key_id);
+ const struct asymmetric_key_id *kid,
+ bool partial);
#endif /* _LINUX_PUBLIC_KEY_H */
diff --git a/include/dt-bindings/sound/cs35l32.h b/include/dt-bindings/sound/cs35l32.h
new file mode 100644
index 000000000000..0c6d6a3c15a2
--- /dev/null
+++ b/include/dt-bindings/sound/cs35l32.h
@@ -0,0 +1,26 @@
+#ifndef __DT_CS35L32_H
+#define __DT_CS35L32_H
+
+#define CS35L32_BOOST_MGR_AUTO 0
+#define CS35L32_BOOST_MGR_AUTO_AUDIO 1
+#define CS35L32_BOOST_MGR_BYPASS 2
+#define CS35L32_BOOST_MGR_FIXED 3
+
+#define CS35L32_DATA_CFG_LR_VP 0
+#define CS35L32_DATA_CFG_LR_STAT 1
+#define CS35L32_DATA_CFG_LR 2
+#define CS35L32_DATA_CFG_LR_VPSTAT 3
+
+#define CS35L32_BATT_THRESH_3_1V 0
+#define CS35L32_BATT_THRESH_3_2V 1
+#define CS35L32_BATT_THRESH_3_3V 2
+#define CS35L32_BATT_THRESH_3_4V 3
+
+#define CS35L32_BATT_RECOV_3_1V 0
+#define CS35L32_BATT_RECOV_3_2V 1
+#define CS35L32_BATT_RECOV_3_3V 2
+#define CS35L32_BATT_RECOV_3_4V 3
+#define CS35L32_BATT_RECOV_3_5V 4
+#define CS35L32_BATT_RECOV_3_6V 5
+
+#endif /* __DT_CS35L32_H */
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index 7dd473496180..c0754abb2f56 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -19,6 +19,47 @@
extern struct key_type key_type_asymmetric;
/*
+ * Identifiers for an asymmetric key ID. We have three ways of looking up a
+ * key derived from an X.509 certificate:
+ *
+ * (1) Serial Number & Issuer. Non-optional. This is the only valid way to
+ * map a PKCS#7 signature to an X.509 certificate.
+ *
+ * (2) Issuer & Subject Unique IDs. Optional. These were the original way to
+ * match X.509 certificates, but have fallen into disuse in favour of (3).
+ *
+ * (3) Auth & Subject Key Identifiers. Optional. SKIDs are only provided on
+ * CA keys that are intended to sign other keys, so don't appear in end
+ * user certificates unless forced.
+ *
+ * We could also support an PGP key identifier, which is just a SHA1 sum of the
+ * public key and certain parameters, but since we don't support PGP keys at
+ * the moment, we shall ignore those.
+ *
+ * What we actually do is provide a place where binary identifiers can be
+ * stashed and then compare against them when checking for an id match.
+ */
+struct asymmetric_key_id {
+ unsigned short len;
+ unsigned char data[];
+};
+
+struct asymmetric_key_ids {
+ void *id[2];
+};
+
+extern bool asymmetric_key_id_same(const struct asymmetric_key_id *kid1,
+ const struct asymmetric_key_id *kid2);
+
+extern bool asymmetric_key_id_partial(const struct asymmetric_key_id *kid1,
+ const struct asymmetric_key_id *kid2);
+
+extern struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1,
+ size_t len_1,
+ const void *val_2,
+ size_t len_2);
+
+/*
* The payload is at the discretion of the subtype.
*/
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index 3ab1873a4bfa..cebefb069c44 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -40,7 +40,6 @@ struct key_preparsed_payload;
extern int user_preparse(struct key_preparsed_payload *prep);
extern void user_free_preparse(struct key_preparsed_payload *prep);
extern int user_update(struct key *key, struct key_preparsed_payload *prep);
-extern int user_match(const struct key *key, const void *criterion);
extern void user_revoke(struct key *key);
extern void user_destroy(struct key *key);
extern void user_describe(const struct key *user, struct seq_file *m);
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 4c7a4b2104bf..91b77f8d495d 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_ATMEL_MCI_H
#define __LINUX_ATMEL_MCI_H
+#include <linux/types.h>
+
#define ATMCI_MAX_NR_SLOTS 2
/**
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index fef3a809e7cf..5b08a8540ecf 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -3,42 +3,6 @@
#define _LINUX_ATOMIC_H
#include <asm/atomic.h>
-/*
- * Provide __deprecated wrappers for the new interface, avoid flag day changes.
- * We need the ugly external functions to break header recursion hell.
- */
-#ifndef smp_mb__before_atomic_inc
-static inline void __deprecated smp_mb__before_atomic_inc(void)
-{
- extern void __smp_mb__before_atomic(void);
- __smp_mb__before_atomic();
-}
-#endif
-
-#ifndef smp_mb__after_atomic_inc
-static inline void __deprecated smp_mb__after_atomic_inc(void)
-{
- extern void __smp_mb__after_atomic(void);
- __smp_mb__after_atomic();
-}
-#endif
-
-#ifndef smp_mb__before_atomic_dec
-static inline void __deprecated smp_mb__before_atomic_dec(void)
-{
- extern void __smp_mb__before_atomic(void);
- __smp_mb__before_atomic();
-}
-#endif
-
-#ifndef smp_mb__after_atomic_dec
-static inline void __deprecated smp_mb__after_atomic_dec(void)
-{
- extern void __smp_mb__after_atomic(void);
- __smp_mb__after_atomic();
-}
-#endif
-
/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index cbc5833fb221..be5fd38bd5a0 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -32,26 +32,6 @@ extern unsigned long __sw_hweight64(__u64 w);
*/
#include <asm/bitops.h>
-/*
- * Provide __deprecated wrappers for the new interface, avoid flag day changes.
- * We need the ugly external functions to break header recursion hell.
- */
-#ifndef smp_mb__before_clear_bit
-static inline void __deprecated smp_mb__before_clear_bit(void)
-{
- extern void __smp_mb__before_atomic(void);
- __smp_mb__before_atomic();
-}
-#endif
-
-#ifndef smp_mb__after_clear_bit
-static inline void __deprecated smp_mb__after_clear_bit(void)
-{
- extern void __smp_mb__after_atomic(void);
- __smp_mb__after_atomic();
-}
-#endif
-
#define for_each_set_bit(bit, addr, size) \
for ((bit) = find_first_bit((addr), (size)); \
(bit) < (size); \
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 95978ad7fcdd..b2d9a43012b2 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -213,6 +213,7 @@ extern struct bus_type cpu_subsys;
extern void cpu_hotplug_begin(void);
extern void cpu_hotplug_done(void);
extern void get_online_cpus(void);
+extern bool try_get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
@@ -230,6 +231,7 @@ int cpu_down(unsigned int cpu);
static inline void cpu_hotplug_begin(void) {}
static inline void cpu_hotplug_done(void) {}
#define get_online_cpus() do { } while (0)
+#define try_get_online_cpus() true
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 75a227cc7ce2..b2a2a08523bf 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -11,7 +11,6 @@
#include <linux/rcupdate.h>
#include <linux/lockref.h>
-struct nameidata;
struct path;
struct vfsmount;
@@ -226,11 +225,6 @@ struct dentry_operations {
extern seqlock_t rename_lock;
-static inline int dname_external(const struct dentry *dentry)
-{
- return dentry->d_name.name != dentry->d_iname;
-}
-
/*
* These are the low-level FS interfaces to the dcache..
*/
@@ -254,7 +248,7 @@ extern struct dentry * d_obtain_root(struct inode *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
extern void shrink_dcache_for_umount(struct super_block *);
-extern int d_invalidate(struct dentry *);
+extern void d_invalidate(struct dentry *);
/* only used at mount-time */
extern struct dentry * d_make_root(struct inode *);
@@ -269,7 +263,6 @@ extern void d_prune_aliases(struct inode *);
/* test whether we have any submounts in a subdir tree */
extern int have_submounts(struct dentry *);
-extern int check_submounts_and_drop(struct dentry *);
/*
* This adds the entry to the hash queues.
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 931b70986272..d5d388160f42 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -263,6 +263,32 @@ struct dma_attrs;
#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
dma_unmap_sg(dev, sgl, nents, dir)
+#else
+static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t gfp)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
+}
+
+static inline void dma_free_writecombine(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_addr)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
+}
+
+static inline int dma_mmap_writecombine(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
#endif /* CONFIG_HAVE_DMA_ATTRS */
#ifdef CONFIG_NEED_DMA_MAP_STATE
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 94187721ad41..ab4f1a10da20 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -851,13 +851,7 @@ static inline struct file *get_file(struct file *f)
*/
#define FILE_LOCK_DEFERRED 1
-/*
- * The POSIX file lock owner is determined by
- * the "struct files_struct" in the thread group
- * (or NULL for no owner - BSD locks).
- *
- * Lockd stuffs a "host" pointer into this.
- */
+/* legacy typedef, should eventually be removed */
typedef void *fl_owner_t;
struct file_lock_operations {
@@ -868,10 +862,13 @@ struct file_lock_operations {
struct lock_manager_operations {
int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
unsigned long (*lm_owner_key)(struct file_lock *);
+ void (*lm_get_owner)(struct file_lock *, struct file_lock *);
+ void (*lm_put_owner)(struct file_lock *);
void (*lm_notify)(struct file_lock *); /* unblock callback */
- int (*lm_grant)(struct file_lock *, struct file_lock *, int);
- void (*lm_break)(struct file_lock *);
- int (*lm_change)(struct file_lock **, int);
+ int (*lm_grant)(struct file_lock *, int);
+ bool (*lm_break)(struct file_lock *);
+ int (*lm_change)(struct file_lock **, int, struct list_head *);
+ void (*lm_setup)(struct file_lock *, void **);
};
struct lock_manager {
@@ -966,7 +963,7 @@ void locks_free_lock(struct file_lock *fl);
extern void locks_init_lock(struct file_lock *);
extern struct file_lock * locks_alloc_lock(void);
extern void locks_copy_lock(struct file_lock *, struct file_lock *);
-extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
+extern void locks_copy_conflock(struct file_lock *, struct file_lock *);
extern void locks_remove_posix(struct file *, fl_owner_t);
extern void locks_remove_file(struct file *);
extern void locks_release_private(struct file_lock *);
@@ -980,11 +977,9 @@ extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
extern void lease_get_mtime(struct inode *, struct timespec *time);
-extern int generic_setlease(struct file *, long, struct file_lock **);
-extern int vfs_setlease(struct file *, long, struct file_lock **);
-extern int lease_modify(struct file_lock **, int);
-extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
-extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
+extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
+extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
+extern int lease_modify(struct file_lock **, int, struct list_head *);
#else /* !CONFIG_FILE_LOCKING */
static inline int fcntl_getlk(struct file *file, unsigned int cmd,
struct flock __user *user)
@@ -1013,12 +1008,12 @@ static inline int fcntl_setlk64(unsigned int fd, struct file *file,
#endif
static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
{
- return 0;
+ return -EINVAL;
}
static inline int fcntl_getlease(struct file *filp)
{
- return 0;
+ return F_UNLCK;
}
static inline void locks_init_lock(struct file_lock *fl)
@@ -1026,7 +1021,7 @@ static inline void locks_init_lock(struct file_lock *fl)
return;
}
-static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
{
return;
}
@@ -1100,33 +1095,22 @@ static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
}
static inline int generic_setlease(struct file *filp, long arg,
- struct file_lock **flp)
+ struct file_lock **flp, void **priv)
{
return -EINVAL;
}
static inline int vfs_setlease(struct file *filp, long arg,
- struct file_lock **lease)
+ struct file_lock **lease, void **priv)
{
return -EINVAL;
}
-static inline int lease_modify(struct file_lock **before, int arg)
+static inline int lease_modify(struct file_lock **before, int arg,
+ struct list_head *dispose)
{
return -EINVAL;
}
-
-static inline int lock_may_read(struct inode *inode, loff_t start,
- unsigned long len)
-{
- return 1;
-}
-
-static inline int lock_may_write(struct inode *inode, loff_t start,
- unsigned long len)
-{
- return 1;
-}
#endif /* !CONFIG_FILE_LOCKING */
@@ -1151,8 +1135,8 @@ extern void fasync_free(struct fasync_struct *);
/* can be called from interrupts */
extern void kill_fasync(struct fasync_struct **, int, int);
-extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
-extern int f_setown(struct file *filp, unsigned long arg, int force);
+extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
+extern void f_setown(struct file *filp, unsigned long arg, int force);
extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
extern int send_sigurg(struct fown_struct *fown);
@@ -1506,7 +1490,7 @@ struct file_operations {
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
- int (*setlease)(struct file *, long, struct file_lock **);
+ int (*setlease)(struct file *, long, struct file_lock **, void **);
long (*fallocate)(struct file *file, int mode, loff_t offset,
loff_t len);
int (*show_fdinfo)(struct seq_file *m, struct file *f);
@@ -1855,7 +1839,8 @@ extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
extern void kern_unmount(struct vfsmount *mnt);
extern int may_umount_tree(struct vfsmount *);
extern int may_umount(struct vfsmount *);
-extern long do_mount(const char *, const char *, const char *, unsigned long, void *);
+extern long do_mount(const char *, const char __user *,
+ const char *, unsigned long, void *);
extern struct vfsmount *collect_mounts(struct path *);
extern void drop_collected_mounts(struct vfsmount *);
extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
@@ -1874,7 +1859,7 @@ extern int current_umask(void);
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
-static inline struct inode *file_inode(struct file *f)
+static inline struct inode *file_inode(const struct file *f)
{
return f->f_inode;
}
@@ -2611,6 +2596,7 @@ extern int simple_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata);
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
+extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
extern const struct dentry_operations simple_dentry_operations;
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index f49ddb1b2273..84d60cb841b1 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -781,13 +781,13 @@ struct fsl_ifc_regs {
__be32 amask;
u32 res4[0x2];
} amask_cs[FSL_IFC_BANK_COUNT];
- u32 res5[0x17];
+ u32 res5[0x18];
struct {
- __be32 csor_ext;
__be32 csor;
+ __be32 csor_ext;
u32 res6;
} csor_cs[FSL_IFC_BANK_COUNT];
- u32 res7[0x19];
+ u32 res7[0x18];
struct {
__be32 ftim[4];
u32 res8[0x8];
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f0b0edbf55a9..662697babd48 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -56,6 +56,8 @@ struct ftrace_ops;
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs);
+ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
+
/*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
* set in the flags member.
@@ -89,6 +91,9 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
* INITIALIZED - The ftrace_ops has already been initialized (first use time
* register_ftrace_function() is called, it will initialized the ops)
* DELETED - The ops are being deleted, do not let them be registered again.
+ * ADDING - The ops is in the process of being added.
+ * REMOVING - The ops is in the process of being removed.
+ * MODIFYING - The ops is in the process of changing its filter functions.
*/
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -100,6 +105,9 @@ enum {
FTRACE_OPS_FL_STUB = 1 << 6,
FTRACE_OPS_FL_INITIALIZED = 1 << 7,
FTRACE_OPS_FL_DELETED = 1 << 8,
+ FTRACE_OPS_FL_ADDING = 1 << 9,
+ FTRACE_OPS_FL_REMOVING = 1 << 10,
+ FTRACE_OPS_FL_MODIFYING = 1 << 11,
};
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -132,7 +140,7 @@ struct ftrace_ops {
int nr_trampolines;
struct ftrace_ops_hash local_hash;
struct ftrace_ops_hash *func_hash;
- struct ftrace_hash *tramp_hash;
+ struct ftrace_ops_hash old_hash;
unsigned long trampoline;
#endif
};
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 7cf5e9b32550..120ccc53fcb7 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -15,7 +15,7 @@ struct linux_binprm;
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
-extern int ima_file_check(struct file *file, int mask);
+extern int ima_file_check(struct file *file, int mask, int opened);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
extern int ima_module_check(struct file *file);
@@ -27,7 +27,7 @@ static inline int ima_bprm_check(struct linux_binprm *bprm)
return 0;
}
-static inline int ima_file_check(struct file *file, int mask)
+static inline int ima_file_check(struct file *file, int mask, int opened)
{
return 0;
}
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 2bb4c4f3531a..77fc43f8fb72 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -111,12 +111,21 @@ extern struct group_info init_groups;
#ifdef CONFIG_PREEMPT_RCU
#define INIT_TASK_RCU_PREEMPT(tsk) \
.rcu_read_lock_nesting = 0, \
- .rcu_read_unlock_special = 0, \
+ .rcu_read_unlock_special.s = 0, \
.rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
INIT_TASK_RCU_TREE_PREEMPT()
#else
#define INIT_TASK_RCU_PREEMPT(tsk)
#endif
+#ifdef CONFIG_TASKS_RCU
+#define INIT_TASK_RCU_TASKS(tsk) \
+ .rcu_tasks_holdout = false, \
+ .rcu_tasks_holdout_list = \
+ LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \
+ .rcu_tasks_idle_cpu = -1,
+#else
+#define INIT_TASK_RCU_TASKS(tsk)
+#endif
extern struct cred init_cred;
@@ -224,6 +233,7 @@ extern struct task_group root_task_group;
INIT_FTRACE_GRAPH \
INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \
+ INIT_TASK_RCU_TASKS(tsk) \
INIT_CPUSET_SEQ(tsk) \
INIT_RT_MUTEXES(tsk) \
INIT_VTIME(tsk) \
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 20f9a527922a..7b02bcc85b9e 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -80,6 +80,7 @@ enum iommu_attr {
DOMAIN_ATTR_FSL_PAMU_STASH,
DOMAIN_ATTR_FSL_PAMU_ENABLE,
DOMAIN_ATTR_FSL_PAMUV1,
+ DOMAIN_ATTR_NESTING, /* two stages of translation */
DOMAIN_ATTR_MAX,
};
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 784304b222b3..98f923b6a0ea 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -8,28 +8,28 @@
* Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
*
* Jump labels provide an interface to generate dynamic branches using
- * self-modifying code. Assuming toolchain and architecture support the result
- * of a "if (static_key_false(&key))" statement is a unconditional branch (which
+ * self-modifying code. Assuming toolchain and architecture support, the result
+ * of a "if (static_key_false(&key))" statement is an unconditional branch (which
* defaults to false - and the true block is placed out of line).
*
* However at runtime we can change the branch target using
* static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
- * object and for as long as there are references all branches referring to
+ * object, and for as long as there are references all branches referring to
* that particular key will point to the (out of line) true block.
*
- * Since this relies on modifying code the static_key_slow_{inc,dec}() functions
+ * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions
* must be considered absolute slow paths (machine wide synchronization etc.).
- * OTOH, since the affected branches are unconditional their runtime overhead
+ * OTOH, since the affected branches are unconditional, their runtime overhead
* will be absolutely minimal, esp. in the default (off) case where the total
* effect is a single NOP of appropriate size. The on case will patch in a jump
* to the out-of-line block.
*
- * When the control is directly exposed to userspace it is prudent to delay the
+ * When the control is directly exposed to userspace, it is prudent to delay the
* decrement to avoid high frequency code modifications which can (and do)
* cause significant performance degradation. Struct static_key_deferred and
* static_key_slow_dec_deferred() provide for this.
*
- * Lacking toolchain and or architecture support, it falls back to a simple
+ * Lacking toolchain and or architecture support, jump labels fall back to a simple
* conditional branch.
*
* struct static_key my_key = STATIC_KEY_INIT_TRUE;
@@ -43,8 +43,7 @@
*
* Not initializing the key (static data is initialized to 0s anyway) is the
* same as using STATIC_KEY_INIT_FALSE.
- *
-*/
+ */
#include <linux/types.h>
#include <linux/compiler.h>
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e9e420b6d931..35c8ffb0136f 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -496,6 +496,7 @@ static inline char *hex_byte_pack_upper(char *buf, u8 byte)
extern int hex_to_bin(char ch);
extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
+extern char *bin2hex(char *dst, const void *src, size_t count);
bool mac_pton(const char *s, u8 *mac);
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 44792ee649de..ff9f1d394235 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -53,6 +53,24 @@ typedef int (*request_key_actor_t)(struct key_construction *key,
const char *op, void *aux);
/*
+ * Preparsed matching criterion.
+ */
+struct key_match_data {
+ /* Comparison function, defaults to exact description match, but can be
+ * overridden by type->match_preparse(). Should return true if a match
+ * is found and false if not.
+ */
+ bool (*cmp)(const struct key *key,
+ const struct key_match_data *match_data);
+
+ const void *raw_data; /* Raw match data */
+ void *preparsed; /* For ->match_preparse() to stash stuff */
+ unsigned lookup_type; /* Type of lookup for this search. */
+#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */
+#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */
+};
+
+/*
* kernel managed key type definition
*/
struct key_type {
@@ -65,11 +83,6 @@ struct key_type {
*/
size_t def_datalen;
- /* Default key search algorithm. */
- unsigned def_lookup_type;
-#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */
-#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */
-
/* vet a description */
int (*vet_description)(const char *description);
@@ -96,8 +109,15 @@ struct key_type {
*/
int (*update)(struct key *key, struct key_preparsed_payload *prep);
- /* match a key against a description */
- int (*match)(const struct key *key, const void *desc);
+ /* Preparse the data supplied to ->match() (optional). The
+ * data to be preparsed can be found in match_data->raw_data.
+ * The lookup type can also be set by this function.
+ */
+ int (*match_preparse)(struct key_match_data *match_data);
+
+ /* Free preparsed match data (optional). This should be supplied it
+ * ->match_preparse() is supplied. */
+ void (*match_free)(struct key_match_data *match_data);
/* clear some of the data from a key on revokation (optional)
* - the key's semaphore will be write-locked by the caller
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 219d79627c05..ff82a32871b5 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -178,7 +178,6 @@ struct nlm_block {
unsigned char b_granted; /* VFS granted lock */
struct nlm_file * b_file; /* file in question */
struct cache_req * b_cache_req; /* deferred request handling */
- struct file_lock * b_fl; /* set for GETLK */
struct cache_deferred_req * b_deferred_req;
unsigned int b_flags; /* block flags */
#define B_QUEUED 1 /* lock queued */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 64c7425afbce..74ab23176e9b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -4,7 +4,7 @@
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
- * see Documentation/lockdep-design.txt for more details.
+ * see Documentation/locking/lockdep-design.txt for more details.
*/
#ifndef __LINUX_LOCKDEP_H
#define __LINUX_LOCKDEP_H
@@ -510,6 +510,7 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
+#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
#ifdef CONFIG_PROVE_LOCKING
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 8f6f2e91e7ae..57388171610d 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -5,6 +5,7 @@
#include <linux/fb.h>
#include <linux/io.h>
#include <linux/jiffies.h>
+#include <linux/mmc/card.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -83,6 +84,27 @@
*/
#define TMIO_MMC_HAVE_HIGH_REG (1 << 6)
+/*
+ * Some controllers have CMD12 automatically
+ * issue/non-issue register
+ */
+#define TMIO_MMC_HAVE_CMD12_CTRL (1 << 7)
+
+/*
+ * Some controllers needs to set 1 on SDIO status reserved bits
+ */
+#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8)
+
+/*
+ * Some controllers have DMA enable/disable register
+ */
+#define TMIO_MMC_HAVE_CTL_DMA_REG (1 << 9)
+
+/*
+ * Some controllers allows to set SDx actual clock
+ */
+#define TMIO_MMC_CLK_ACTUAL (1 << 10)
+
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
@@ -96,6 +118,7 @@ struct tmio_mmc_dma {
int slave_id_tx;
int slave_id_rx;
int alignment_shift;
+ dma_addr_t dma_rx_offset;
bool (*filter)(struct dma_chan *chan, void *arg);
};
@@ -120,6 +143,8 @@ struct tmio_mmc_data {
/* clock management callbacks */
int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
void (*clk_disable)(struct platform_device *pdev);
+ int (*multi_io_quirk)(struct mmc_card *card,
+ unsigned int direction, int blk_size);
};
/*
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 2e5b194b9b19..53d33dee70e1 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -37,6 +37,7 @@
/* struct phy_device dev_flags definitions */
#define MICREL_PHY_50MHZ_CLK 0x00000001
+#define MICREL_PHY_25MHZ_CLK 0x00000002
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d424b9de3aff..b0692d28f8e6 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -42,7 +42,8 @@ struct mmc_csd {
unsigned int read_partial:1,
read_misalign:1,
write_partial:1,
- write_misalign:1;
+ write_misalign:1,
+ dsr_imp:1;
};
struct mmc_ext_csd {
@@ -74,7 +75,7 @@ struct mmc_ext_csd {
unsigned int sec_trim_mult; /* Secure trim multiplier */
unsigned int sec_erase_mult; /* Secure erase multiplier */
unsigned int trim_timeout; /* In milliseconds */
- bool enhanced_area_en; /* enable bit */
+ bool partition_setting_completed; /* enable bit */
unsigned long long enhanced_area_offset; /* Units: Byte */
unsigned int enhanced_area_size; /* Units: KB */
unsigned int cache_size; /* Units: KB */
@@ -214,11 +215,12 @@ enum mmc_blk_status {
};
/* The number of MMC physical partitions. These consist of:
- * boot partitions (2), general purpose partitions (4) in MMC v4.4.
+ * boot partitions (2), general purpose partitions (4) and
+ * RPMB partition (1) in MMC v4.4.
*/
#define MMC_NUM_BOOT_PARTITION 2
#define MMC_NUM_GP_PARTITION 4
-#define MMC_NUM_PHY_PARTITION 6
+#define MMC_NUM_PHY_PARTITION 7
#define MAX_MMC_PART_NAME_LEN 20
/*
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 29ce014ab421..001366927cf4 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -26,6 +26,8 @@ enum dw_mci_state {
STATE_DATA_BUSY,
STATE_SENDING_STOP,
STATE_DATA_ERROR,
+ STATE_SENDING_CMD11,
+ STATE_WAITING_CMD11_DONE,
};
enum {
@@ -188,7 +190,7 @@ struct dw_mci {
/* Workaround flags */
u32 quirks;
- struct regulator *vmmc; /* Power regulator */
+ bool vqmmc_enabled;
unsigned long irq_flags; /* IRQ flags */
int irq;
};
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 7960424d0bc0..df0c15396bbf 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -42,6 +42,7 @@ struct mmc_ios {
#define MMC_POWER_OFF 0
#define MMC_POWER_UP 1
#define MMC_POWER_ON 2
+#define MMC_POWER_UNDEFINED 3
unsigned char bus_width; /* data bus width */
@@ -139,6 +140,13 @@ struct mmc_host_ops {
int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
void (*hw_reset)(struct mmc_host *host);
void (*card_event)(struct mmc_host *host);
+
+ /*
+ * Optional callback to support controllers with HW issues for multiple
+ * I/O. Returns the number of supported blocks for the request.
+ */
+ int (*multi_io_quirk)(struct mmc_card *card,
+ unsigned int direction, int blk_size);
};
struct mmc_card;
@@ -265,7 +273,6 @@ struct mmc_host {
#define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */
#define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */
-#define MMC_CAP2_NO_MULTI_READ (1 << 3) /* Multiblock reads don't work */
#define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */
#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
@@ -365,6 +372,9 @@ struct mmc_host {
unsigned int slotno; /* used for sdio acpi binding */
+ int dsr_req; /* DSR value is valid */
+ u32 dsr; /* optional driver stage (DSR) value */
+
unsigned long private[0] ____cacheline_aligned;
};
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 64ec963ed347..1cd00b3a75b9 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -53,6 +53,11 @@
#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */
#define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */
+#define MMC_TUNING_BLK_PATTERN_4BIT_SIZE 64
+#define MMC_TUNING_BLK_PATTERN_8BIT_SIZE 128
+extern const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE];
+extern const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE];
+
/* class 3 */
#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */
@@ -281,6 +286,7 @@ struct _mmc_csd {
#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */
#define EXT_CSD_GP_SIZE_MULT 143 /* R/W */
+#define EXT_CSD_PARTITION_SETTING_COMPLETED 155 /* R/W */
#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */
#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */
#define EXT_CSD_HPI_MGMT 161 /* R/W */
@@ -349,6 +355,7 @@ struct _mmc_csd {
#define EXT_CSD_PART_CONFIG_ACC_RPMB (0x3)
#define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4)
+#define EXT_CSD_PART_SETTING_COMPLETED (0x1)
#define EXT_CSD_PART_SUPPORT_PART_EN (0x1)
#define EXT_CSD_CMD_SET_NORMAL (1<<0)
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 09ebe57d5ce9..dba793e3a331 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -98,6 +98,8 @@ struct sdhci_host {
#define SDHCI_QUIRK2_BROKEN_HS200 (1<<6)
/* Controller does not support DDR50 */
#define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7)
+/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */
+#define SDHCI_QUIRK2_STOP_WITH_TC (1<<8)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -146,6 +148,7 @@ struct sdhci_host {
struct mmc_command *cmd; /* Current command */
struct mmc_data *data; /* Current data request */
unsigned int data_early:1; /* Data finished before cmd */
+ unsigned int busy_handle:1; /* Handling the order of Busy-end */
struct sg_mapping_iter sg_miter; /* SG state for PIO */
unsigned int blocks; /* remaining PIO blocks */
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index d2433381e828..e56fa24c9322 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -24,7 +24,10 @@ void mmc_gpio_free_cd(struct mmc_host *host);
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
- unsigned int debounce);
+ unsigned int debounce, bool *gpio_invert);
+int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
+ unsigned int idx, bool override_active_level,
+ unsigned int debounce, bool *gpio_invert);
void mmc_gpiod_free_cd(struct mmc_host *host);
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 8d5535c58cc2..cc31498fc526 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -52,7 +52,7 @@ struct mutex {
atomic_t count;
spinlock_t wait_lock;
struct list_head wait_list;
-#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
struct task_struct *owner;
#endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -133,7 +133,7 @@ static inline int mutex_is_locked(struct mutex *lock)
/*
* See kernel/locking/mutex.c for detailed documentation of these APIs.
- * Also see Documentation/mutex-design.txt.
+ * Also see Documentation/locking/mutex-design.txt.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
diff --git a/include/linux/of.h b/include/linux/of.h
index 6c4363b8ddc3..6545e7aec7bb 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -863,4 +863,7 @@ static inline int of_changeset_update_property(struct of_changeset *ocs,
}
#endif
+/* CONFIG_OF_RESOLVE api */
+extern int of_resolve_phandles(struct device_node *tree);
+
#endif /* _LINUX_OF_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index da9e6f753196..24f97bf74266 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2538,6 +2538,7 @@
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
+#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
@@ -2820,7 +2821,22 @@
#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43
#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44
#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
@@ -2862,6 +2878,7 @@
#define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601
#define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119
#define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a
+#define PCI_DEVICE_ID_INTEL_E6XX_CU 0x8183
#define PCI_DEVICE_ID_INTEL_ITC_LPC 0x8186
#define PCI_DEVICE_ID_INTEL_82454GX 0x84c4
#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 707617a8c0f6..893a0d07986f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -52,6 +52,7 @@ struct perf_guest_info_callbacks {
#include <linux/atomic.h>
#include <linux/sysfs.h>
#include <linux/perf_regs.h>
+#include <linux/workqueue.h>
#include <asm/local.h>
struct perf_callchain_entry {
@@ -268,6 +269,7 @@ struct pmu {
* enum perf_event_active_state - the states of a event
*/
enum perf_event_active_state {
+ PERF_EVENT_STATE_EXIT = -3,
PERF_EVENT_STATE_ERROR = -2,
PERF_EVENT_STATE_OFF = -1,
PERF_EVENT_STATE_INACTIVE = 0,
@@ -507,6 +509,9 @@ struct perf_event_context {
int nr_cgroups; /* cgroup evts */
int nr_branch_stack; /* branch_stack evt */
struct rcu_head rcu_head;
+
+ struct delayed_work orphans_remove;
+ bool orphans_remove_sched;
};
/*
@@ -604,6 +609,13 @@ struct perf_sample_data {
u64 txn;
};
+/* default value for data source */
+#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
+ PERF_MEM_S(LVL, NA) |\
+ PERF_MEM_S(SNOOP, NA) |\
+ PERF_MEM_S(LOCK, NA) |\
+ PERF_MEM_S(TLB, NA))
+
static inline void perf_sample_data_init(struct perf_sample_data *data,
u64 addr, u64 period)
{
@@ -616,7 +628,7 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
data->regs_user.regs = NULL;
data->stack_user_size = 0;
data->weight = 0;
- data->data_src.val = 0;
+ data->data_src.val = PERF_MEM_NA;
data->txn = 0;
}
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index d231aa17b1d7..a4a819ffb2d1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -47,14 +47,12 @@
#include <asm/barrier.h>
extern int rcu_expedited; /* for sysctl */
-#ifdef CONFIG_RCU_TORTURE_TEST
-extern int rcutorture_runnable; /* for sysctl */
-#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
enum rcutorture_type {
RCU_FLAVOR,
RCU_BH_FLAVOR,
RCU_SCHED_FLAVOR,
+ RCU_TASKS_FLAVOR,
SRCU_FLAVOR,
INVALID_RCU_FLAVOR
};
@@ -197,6 +195,28 @@ void call_rcu_sched(struct rcu_head *head,
void synchronize_sched(void);
+/**
+ * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_tasks() assumes
+ * that the read-side critical sections end at a voluntary context
+ * switch (not a preemption!), entry into idle, or transition to usermode
+ * execution. As such, there are no read-side primitives analogous to
+ * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
+ * to determine that all tasks have passed through a safe state, not so
+ * much for data-strcuture synchronization.
+ *
+ * See the description of call_rcu() for more detailed information on
+ * memory ordering guarantees.
+ */
+void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
+void synchronize_rcu_tasks(void);
+void rcu_barrier_tasks(void);
+
#ifdef CONFIG_PREEMPT_RCU
void __rcu_read_lock(void);
@@ -238,8 +258,8 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */
void rcu_init(void);
-void rcu_sched_qs(int cpu);
-void rcu_bh_qs(int cpu);
+void rcu_sched_qs(void);
+void rcu_bh_qs(void);
void rcu_check_callbacks(int cpu, int user);
struct notifier_block;
void rcu_idle_enter(void);
@@ -269,6 +289,14 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
#endif /* CONFIG_RCU_USER_QS */
+#ifdef CONFIG_RCU_NOCB_CPU
+void rcu_init_nohz(void);
+#else /* #ifdef CONFIG_RCU_NOCB_CPU */
+static inline void rcu_init_nohz(void)
+{
+}
+#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+
/**
* RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
* @a: Code that RCU needs to pay attention to.
@@ -294,6 +322,36 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
rcu_irq_exit(); \
} while (0)
+/*
+ * Note a voluntary context switch for RCU-tasks benefit. This is a
+ * macro rather than an inline function to avoid #include hell.
+ */
+#ifdef CONFIG_TASKS_RCU
+#define TASKS_RCU(x) x
+extern struct srcu_struct tasks_rcu_exit_srcu;
+#define rcu_note_voluntary_context_switch(t) \
+ do { \
+ if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
+ ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
+ } while (0)
+#else /* #ifdef CONFIG_TASKS_RCU */
+#define TASKS_RCU(x) do { } while (0)
+#define rcu_note_voluntary_context_switch(t) do { } while (0)
+#endif /* #else #ifdef CONFIG_TASKS_RCU */
+
+/**
+ * cond_resched_rcu_qs - Report potential quiescent states to RCU
+ *
+ * This macro resembles cond_resched(), except that it is defined to
+ * report potential quiescent states to RCU-tasks even if the cond_resched()
+ * machinery were to be shut off, as some advocate for PREEMPT kernels.
+ */
+#define cond_resched_rcu_qs() \
+do { \
+ rcu_note_voluntary_context_switch(current); \
+ cond_resched(); \
+} while (0)
+
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
bool __rcu_is_watching(void);
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
@@ -349,7 +407,7 @@ bool rcu_lockdep_current_cpu_online(void);
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
static inline bool rcu_lockdep_current_cpu_online(void)
{
- return 1;
+ return true;
}
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
@@ -371,41 +429,7 @@ extern struct lockdep_map rcu_sched_lock_map;
extern struct lockdep_map rcu_callback_map;
int debug_lockdep_rcu_enabled(void);
-/**
- * rcu_read_lock_held() - might we be in RCU read-side critical section?
- *
- * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
- * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
- * this assumes we are in an RCU read-side critical section unless it can
- * prove otherwise. This is useful for debug checks in functions that
- * require that they be called within an RCU read-side critical section.
- *
- * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
- * and while lockdep is disabled.
- *
- * Note that rcu_read_lock() and the matching rcu_read_unlock() must
- * occur in the same context, for example, it is illegal to invoke
- * rcu_read_unlock() in process context if the matching rcu_read_lock()
- * was invoked from within an irq handler.
- *
- * Note that rcu_read_lock() is disallowed if the CPU is either idle or
- * offline from an RCU perspective, so check for those as well.
- */
-static inline int rcu_read_lock_held(void)
-{
- if (!debug_lockdep_rcu_enabled())
- return 1;
- if (!rcu_is_watching())
- return 0;
- if (!rcu_lockdep_current_cpu_online())
- return 0;
- return lock_is_held(&rcu_lock_map);
-}
-
-/*
- * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
- * hell.
- */
+int rcu_read_lock_held(void);
int rcu_read_lock_bh_held(void);
/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index d40a6a451330..38cc5b1e252d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -80,7 +80,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
static inline void rcu_note_context_switch(int cpu)
{
- rcu_sched_qs(cpu);
+ rcu_sched_qs();
}
/*
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 48bf152761c7..67fc8fcdc4b0 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -38,6 +38,9 @@ extern int reboot_force;
extern int register_reboot_notifier(struct notifier_block *);
extern int unregister_reboot_notifier(struct notifier_block *);
+extern int register_restart_handler(struct notifier_block *);
+extern int unregister_restart_handler(struct notifier_block *);
+extern void do_kernel_restart(char *cmd);
/*
* Architecture-specific implementations of sys_reboot commands.
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 035d3c57fc8a..8f498cdde280 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -149,7 +149,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
* static then another method for expressing nested locking is
* the explicit definition of lock class keys and the use of
* lockdep_set_class() at lock initialization time.
- * See Documentation/lockdep-design.txt for more details.)
+ * See Documentation/locking/lockdep-design.txt for more details.)
*/
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5e63ba59258c..5e344bbe63ec 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -57,6 +57,7 @@ struct sched_param {
#include <linux/llist.h>
#include <linux/uidgid.h>
#include <linux/gfp.h>
+#include <linux/magic.h>
#include <asm/processor.h>
@@ -646,6 +647,7 @@ struct signal_struct {
* Live threads maintain their own counters and add to these
* in __exit_signal, except for the group leader.
*/
+ seqlock_t stats_lock;
cputime_t utime, stime, cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
@@ -1024,6 +1026,7 @@ struct sched_domain_topology_level {
extern struct sched_domain_topology_level *sched_domain_topology;
extern void set_sched_topology(struct sched_domain_topology_level *tl);
+extern void wake_up_if_idle(int cpu);
#ifdef CONFIG_SCHED_DEBUG
# define SD_INIT_NAME(type) .name = #type
@@ -1213,6 +1216,13 @@ struct sched_dl_entity {
struct hrtimer dl_timer;
};
+union rcu_special {
+ struct {
+ bool blocked;
+ bool need_qs;
+ } b;
+ short s;
+};
struct rcu_node;
enum perf_event_task_context {
@@ -1265,12 +1275,18 @@ struct task_struct {
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
- char rcu_read_unlock_special;
+ union rcu_special rcu_read_unlock_special;
struct list_head rcu_node_entry;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TREE_PREEMPT_RCU
struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
+#ifdef CONFIG_TASKS_RCU
+ unsigned long rcu_tasks_nvcsw;
+ bool rcu_tasks_holdout;
+ struct list_head rcu_tasks_holdout_list;
+ int rcu_tasks_idle_cpu;
+#endif /* #ifdef CONFIG_TASKS_RCU */
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info sched_info;
@@ -2014,29 +2030,21 @@ extern void task_clear_jobctl_trapping(struct task_struct *task);
extern void task_clear_jobctl_pending(struct task_struct *task,
unsigned int mask);
-#ifdef CONFIG_PREEMPT_RCU
-
-#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
-#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
-
static inline void rcu_copy_process(struct task_struct *p)
{
+#ifdef CONFIG_PREEMPT_RCU
p->rcu_read_lock_nesting = 0;
- p->rcu_read_unlock_special = 0;
-#ifdef CONFIG_TREE_PREEMPT_RCU
+ p->rcu_read_unlock_special.s = 0;
p->rcu_blocked_node = NULL;
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
INIT_LIST_HEAD(&p->rcu_node_entry);
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_TASKS_RCU
+ p->rcu_tasks_holdout = false;
+ INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
+ p->rcu_tasks_idle_cpu = -1;
+#endif /* #ifdef CONFIG_TASKS_RCU */
}
-#else
-
-static inline void rcu_copy_process(struct task_struct *p)
-{
-}
-
-#endif
-
static inline void tsk_restore_flags(struct task_struct *task,
unsigned long orig_flags, unsigned long flags)
{
@@ -2642,6 +2650,8 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
}
#endif
+#define task_stack_end_corrupted(task) \
+ (*(end_of_stack(task)) != STACK_END_MAGIC)
static inline int object_is_on_stack(void *obj)
{
@@ -2664,6 +2674,7 @@ static inline unsigned long stack_not_used(struct task_struct *p)
return (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif
+extern void set_task_stack_end_magic(struct task_struct *tsk);
/* set thread flags in other task's structures
* - see asm/thread_info.h for TIF_xxxx flags available
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 5d586a45a319..a19ddacdac30 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -27,19 +27,23 @@ struct seccomp {
struct seccomp_filter *filter;
};
-extern int __secure_computing(int);
-static inline int secure_computing(int this_syscall)
+#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
+extern int __secure_computing(void);
+static inline int secure_computing(void)
{
if (unlikely(test_thread_flag(TIF_SECCOMP)))
- return __secure_computing(this_syscall);
+ return __secure_computing();
return 0;
}
-/* A wrapper for architectures supporting only SECCOMP_MODE_STRICT. */
-static inline void secure_computing_strict(int this_syscall)
-{
- BUG_ON(secure_computing(this_syscall) != 0);
-}
+#define SECCOMP_PHASE1_OK 0
+#define SECCOMP_PHASE1_SKIP 1
+
+extern u32 seccomp_phase1(struct seccomp_data *sd);
+int seccomp_phase2(u32 phase1_result);
+#else
+extern void secure_computing_strict(int this_syscall);
+#endif
extern long prctl_get_seccomp(void);
extern long prctl_set_seccomp(unsigned long, char __user *);
@@ -56,8 +60,11 @@ static inline int seccomp_mode(struct seccomp *s)
struct seccomp { };
struct seccomp_filter { };
-static inline int secure_computing(int this_syscall) { return 0; }
+#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
+static inline int secure_computing(void) { return 0; }
+#else
static inline void secure_computing_strict(int this_syscall) { return; }
+#endif
static inline long prctl_get_seccomp(void)
{
diff --git a/include/linux/security.h b/include/linux/security.h
index 623f90e5f38d..ba96471c11ba 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1559,7 +1559,7 @@ struct security_operations {
int (*file_lock) (struct file *file, unsigned int cmd);
int (*file_fcntl) (struct file *file, unsigned int cmd,
unsigned long arg);
- int (*file_set_fowner) (struct file *file);
+ void (*file_set_fowner) (struct file *file);
int (*file_send_sigiotask) (struct task_struct *tsk,
struct fown_struct *fown, int sig);
int (*file_receive) (struct file *file);
@@ -1834,7 +1834,7 @@ int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
unsigned long prot);
int security_file_lock(struct file *file, unsigned int cmd);
int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
-int security_file_set_fowner(struct file *file);
+void security_file_set_fowner(struct file *file);
int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
@@ -2108,7 +2108,7 @@ static inline int security_dentry_init_security(struct dentry *dentry,
static inline int security_inode_init_security(struct inode *inode,
struct inode *dir,
const struct qstr *qstr,
- const initxattrs initxattrs,
+ const initxattrs xattrs,
void *fs_data)
{
return 0;
@@ -2312,9 +2312,9 @@ static inline int security_file_fcntl(struct file *file, unsigned int cmd,
return 0;
}
-static inline int security_file_set_fowner(struct file *file)
+static inline void security_file_set_fowner(struct file *file)
{
- return 0;
+ return;
}
static inline int security_file_send_sigiotask(struct task_struct *tsk,
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index cc359636cfa3..f5df8f687b4d 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -456,4 +456,23 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
spin_unlock_irqrestore(&sl->lock, flags);
}
+static inline unsigned long
+read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
+{
+ unsigned long flags = 0;
+
+ if (!(*seq & 1)) /* Even */
+ *seq = read_seqbegin(lock);
+ else /* Odd */
+ read_seqlock_excl_irqsave(lock, flags);
+
+ return flags;
+}
+
+static inline void
+done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
+{
+ if (seq & 1)
+ read_sequnlock_excl_irqrestore(lock, flags);
+}
#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 34347f26be9b..93dff5fff524 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -100,6 +100,7 @@ int smp_call_function_any(const struct cpumask *mask,
smp_call_func_t func, void *info, int wait);
void kick_all_cpus_sync(void);
+void wake_up_all_idle_cpus(void);
/*
* Generic and arch helpers
@@ -148,6 +149,7 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
}
static inline void kick_all_cpus_sync(void) { }
+static inline void wake_up_all_idle_cpus(void) { }
#endif /* !SMP */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3f2867ff0ced..262ba4ef9a8e 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -197,7 +197,13 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
_raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else
-# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
+/*
+ * Always evaluate the 'subclass' argument to avoid that the compiler
+ * warns about set-but-not-used variables when building with
+ * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
+ */
+# define raw_spin_lock_nested(lock, subclass) \
+ _raw_spin_lock(((void)(subclass), (lock)))
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
#endif
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 595ee86f5e0d..eda850ca757a 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -108,7 +108,7 @@ extern struct tick_sched *tick_get_tick_sched(int cpu);
extern void tick_irq_enter(void);
extern int tick_oneshot_mode_active(void);
# ifndef arch_needs_cpu
-# define arch_needs_cpu(cpu) (0)
+# define arch_needs_cpu() (0)
# endif
# else
static inline void tick_clock_notify(void) { }
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 5ca58fcbaf1b..7759fc3c622d 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -51,7 +51,7 @@
/* Definitions for online/offline exerciser. */
int torture_onoff_init(long ooholdoff, long oointerval);
-char *torture_onoff_stats(char *page);
+void torture_onoff_stats(void);
bool torture_onoff_failures(void);
/* Low-rider random number generator. */
@@ -77,7 +77,8 @@ int torture_stutter_init(int s);
/* Initialization and cleanup. */
bool torture_init_begin(char *ttype, bool v, int *runnable);
void torture_init_end(void);
-bool torture_cleanup(void);
+bool torture_cleanup_begin(void);
+void torture_cleanup_end(void);
bool torture_must_stop(void);
bool torture_must_stop_irq(void);
void torture_kthread_stopping(char *title);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index b1293f15f592..e08e21e5f601 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -157,6 +157,12 @@ extern void syscall_unregfunc(void);
* Make sure the alignment of the structure in the __tracepoints section will
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
+ *
+ * When lockdep is enabled, we make sure to always do the RCU portions of
+ * the tracepoint code, regardless of whether tracing is on or we match the
+ * condition. This lets us find RCU issues triggered with tracepoints even
+ * when this tracepoint is off. This code has no purpose other than poking
+ * RCU a bit.
*/
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
extern struct tracepoint __tracepoint_##name; \
@@ -167,6 +173,11 @@ extern void syscall_unregfunc(void);
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
TP_CONDITION(cond),,); \
+ if (IS_ENABLED(CONFIG_LOCKDEP)) { \
+ rcu_read_lock_sched_notrace(); \
+ rcu_dereference_sched(__tracepoint_##name.funcs);\
+ rcu_read_unlock_sched_notrace(); \
+ } \
} \
__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 290fbf0b6b8a..9b1581414cd4 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -80,6 +80,9 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
+size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i);
+size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
+size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i);
void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
unsigned long nr_segs, size_t count);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 80115bf88671..e4a8eb9312ea 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -281,9 +281,11 @@ do { \
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
- * The function returns 0 if the @timeout elapsed, or the remaining
- * jiffies (at least 1) if the @condition evaluated to %true before
- * the @timeout elapsed.
+ * Returns:
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * or the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed.
*/
#define wait_event_timeout(wq, condition, timeout) \
({ \
@@ -364,9 +366,11 @@ do { \
* change the result of the wait condition.
*
* Returns:
- * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
- * a signal, or the remaining jiffies (at least 1) if the @condition
- * evaluated to %true before the @timeout elapsed.
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
+ * interrupted by a signal.
*/
#define wait_event_interruptible_timeout(wq, condition, timeout) \
({ \
diff --git a/include/media/davinci/dm644x_ccdc.h b/include/media/davinci/dm644x_ccdc.h
index 852e96c4bb46..984fb79031de 100644
--- a/include/media/davinci/dm644x_ccdc.h
+++ b/include/media/davinci/dm644x_ccdc.h
@@ -114,7 +114,7 @@ struct ccdc_fault_pixel {
/* Number of fault pixel */
unsigned short fp_num;
/* Address of fault pixel table */
- unsigned int fpc_table_addr;
+ unsigned long fpc_table_addr;
};
/* Structure for CCDC configuration parameters for raw capture mode passed
diff --git a/include/media/omap3isp.h b/include/media/omap3isp.h
index c9d06d9f7e6e..398279dd1922 100644
--- a/include/media/omap3isp.h
+++ b/include/media/omap3isp.h
@@ -57,6 +57,8 @@ enum {
* 0 - Active high, 1 - Active low
* @vs_pol: Vertical synchronization polarity
* 0 - Active high, 1 - Active low
+ * @fld_pol: Field signal polarity
+ * 0 - Positive, 1 - Negative
* @data_pol: Data polarity
* 0 - Normal, 1 - One's complement
*/
@@ -65,6 +67,7 @@ struct isp_parallel_platform_data {
unsigned int clk_pol:1;
unsigned int hs_pol:1;
unsigned int vs_pol:1;
+ unsigned int fld_pol:1;
unsigned int data_pol:1;
};
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 80f951890b4c..e7a1514075ec 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -135,6 +135,7 @@ void rc_map_init(void);
#define RC_MAP_DM1105_NEC "rc-dm1105-nec"
#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro"
#define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t"
+#define RC_MAP_DVBSKY "rc-dvbsky"
#define RC_MAP_EMPTY "rc-empty"
#define RC_MAP_EM_TERRATEC "rc-em-terratec"
#define RC_MAP_ENCORE_ENLTV2 "rc-encore-enltv2"
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 2fefcf491aa8..6ef2d01197da 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -356,8 +356,8 @@ struct v4l2_fh;
* @buf_struct_size: size of the driver-specific buffer structure;
* "0" indicates the driver doesn't want to use a custom buffer
* structure type, so sizeof(struct vb2_buffer) will is used
- * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAGS_TIMESTAMP_* and
- * V4L2_BUF_FLAGS_TSTAMP_SRC_*
+ * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAG_TIMESTAMP_* and
+ * V4L2_BUF_FLAG_TSTAMP_SRC_*
* @gfp_flags: additional gfp flags used when allocating the buffers.
* Typically this is 0, but it may be e.g. GFP_DMA or __GFP_DMA32
* to force the buffer allocation to a specific memory zone.
@@ -366,6 +366,7 @@ struct v4l2_fh;
* cannot be started unless at least this number of buffers
* have been queued into the driver.
*
+ * @mmap_lock: private mutex used when buffers are allocated/freed/mmapped
* @memory: current memory type used
* @bufs: videobuf buffer structures
* @num_buffers: number of allocated/used buffers
@@ -402,6 +403,7 @@ struct vb2_queue {
u32 min_buffers_needed;
/* private: internal use only */
+ struct mutex mmap_lock;
enum v4l2_memory memory;
struct vb2_buffer *bufs[VIDEO_MAX_FRAME];
unsigned int num_buffers;
@@ -592,6 +594,15 @@ vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no)
return 0;
}
+/**
+ * vb2_start_streaming_called() - return streaming status of driver
+ * @q: videobuf queue
+ */
+static inline bool vb2_start_streaming_called(struct vb2_queue *q)
+{
+ return q->start_streaming_called;
+}
+
/*
* The following functions are not part of the vb2 core API, but are simple
* helper functions that you can use in your struct v4l2_file_operations,
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
new file mode 100644
index 000000000000..975cc7861f18
--- /dev/null
+++ b/include/misc/cxl.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _MISC_CXL_H
+#define _MISC_CXL_H
+
+#ifdef CONFIG_CXL_BASE
+
+#define CXL_IRQ_RANGES 4
+
+struct cxl_irq_ranges {
+ irq_hw_number_t offset[CXL_IRQ_RANGES];
+ irq_hw_number_t range[CXL_IRQ_RANGES];
+};
+
+extern atomic_t cxl_use_count;
+
+static inline bool cxl_ctx_in_use(void)
+{
+ return (atomic_read(&cxl_use_count) != 0);
+}
+
+static inline void cxl_ctx_get(void)
+{
+ atomic_inc(&cxl_use_count);
+}
+
+static inline void cxl_ctx_put(void)
+{
+ atomic_dec(&cxl_use_count);
+}
+
+void cxl_slbia(struct mm_struct *mm);
+
+#else /* CONFIG_CXL_BASE */
+
+static inline bool cxl_ctx_in_use(void) { return false; }
+static inline void cxl_slbia(struct mm_struct *mm) {}
+
+#endif /* CONFIG_CXL_BASE */
+
+#endif
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index 7a10cfcd8e33..48e18810a9be 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -1,11 +1,7 @@
#ifndef _IPV6_NF_REJECT_H
#define _IPV6_NF_REJECT_H
-#include <net/ipv6.h>
-#include <net/ip6_route.h>
-#include <net/ip6_fib.h>
-#include <net/ip6_checksum.h>
-#include <linux/netfilter_ipv6.h>
+#include <linux/icmpv6.h>
static inline void
nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
@@ -17,155 +13,6 @@ nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
}
-/* Send RST reply */
-static void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
-{
- struct sk_buff *nskb;
- struct tcphdr otcph, *tcph;
- unsigned int otcplen, hh_len;
- int tcphoff, needs_ack;
- const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
- struct ipv6hdr *ip6h;
-#define DEFAULT_TOS_VALUE 0x0U
- const __u8 tclass = DEFAULT_TOS_VALUE;
- struct dst_entry *dst = NULL;
- u8 proto;
- __be16 frag_off;
- struct flowi6 fl6;
-
- if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
- (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
- pr_debug("addr is not unicast.\n");
- return;
- }
-
- proto = oip6h->nexthdr;
- tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off);
-
- if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
- pr_debug("Cannot get TCP header.\n");
- return;
- }
-
- otcplen = oldskb->len - tcphoff;
-
- /* IP header checks: fragment, too short. */
- if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
- pr_debug("proto(%d) != IPPROTO_TCP, "
- "or too short. otcplen = %d\n",
- proto, otcplen);
- return;
- }
-
- if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr)))
- BUG();
-
- /* No RST for RST. */
- if (otcph.rst) {
- pr_debug("RST is set\n");
- return;
- }
-
- /* Check checksum. */
- if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
- pr_debug("TCP checksum is invalid\n");
- return;
- }
-
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_proto = IPPROTO_TCP;
- fl6.saddr = oip6h->daddr;
- fl6.daddr = oip6h->saddr;
- fl6.fl6_sport = otcph.dest;
- fl6.fl6_dport = otcph.source;
- security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
- dst = ip6_route_output(net, NULL, &fl6);
- if (dst == NULL || dst->error) {
- dst_release(dst);
- return;
- }
- dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
- if (IS_ERR(dst))
- return;
-
- hh_len = (dst->dev->hard_header_len + 15)&~15;
- nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
- + sizeof(struct tcphdr) + dst->trailer_len,
- GFP_ATOMIC);
-
- if (!nskb) {
- net_dbg_ratelimited("cannot alloc skb\n");
- dst_release(dst);
- return;
- }
-
- skb_dst_set(nskb, dst);
-
- skb_reserve(nskb, hh_len + dst->header_len);
-
- skb_put(nskb, sizeof(struct ipv6hdr));
- skb_reset_network_header(nskb);
- ip6h = ipv6_hdr(nskb);
- ip6_flow_hdr(ip6h, tclass, 0);
- ip6h->hop_limit = ip6_dst_hoplimit(dst);
- ip6h->nexthdr = IPPROTO_TCP;
- ip6h->saddr = oip6h->daddr;
- ip6h->daddr = oip6h->saddr;
-
- skb_reset_transport_header(nskb);
- tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
- /* Truncate to length (no data) */
- tcph->doff = sizeof(struct tcphdr)/4;
- tcph->source = otcph.dest;
- tcph->dest = otcph.source;
-
- if (otcph.ack) {
- needs_ack = 0;
- tcph->seq = otcph.ack_seq;
- tcph->ack_seq = 0;
- } else {
- needs_ack = 1;
- tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
- + otcplen - (otcph.doff<<2));
- tcph->seq = 0;
- }
-
- /* Reset flags */
- ((u_int8_t *)tcph)[13] = 0;
- tcph->rst = 1;
- tcph->ack = needs_ack;
- tcph->window = 0;
- tcph->urg_ptr = 0;
- tcph->check = 0;
-
- /* Adjust TCP checksum */
- tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
- &ipv6_hdr(nskb)->daddr,
- sizeof(struct tcphdr), IPPROTO_TCP,
- csum_partial(tcph,
- sizeof(struct tcphdr), 0));
-
- nf_ct_attach(nskb, oldskb);
-
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- /* If we use ip6_local_out for bridged traffic, the MAC source on
- * the RST will be ours, instead of the destination's. This confuses
- * some routers/firewalls, and they drop the packet. So we need to
- * build the eth header using the original destination's MAC as the
- * source, and send the RST packet directly.
- */
- if (oldskb->nf_bridge) {
- struct ethhdr *oeth = eth_hdr(oldskb);
- nskb->dev = oldskb->nf_bridge->physindev;
- nskb->protocol = htons(ETH_P_IPV6);
- ip6h->payload_len = htons(sizeof(struct tcphdr));
- if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
- oeth->h_source, oeth->h_dest, nskb->len) < 0)
- return;
- dev_queue_xmit(nskb);
- } else
-#endif
- ip6_local_out(nskb);
-}
+void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook);
#endif /* _IPV6_NF_REJECT_H */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 6f3e10ca0e32..e862497f7556 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -183,6 +183,7 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_G723_40_1B _SNDRV_PCM_FMTBIT(G723_40_1B)
#define SNDRV_PCM_FMTBIT_DSD_U8 _SNDRV_PCM_FMTBIT(DSD_U8)
#define SNDRV_PCM_FMTBIT_DSD_U16_LE _SNDRV_PCM_FMTBIT(DSD_U16_LE)
+#define SNDRV_PCM_FMTBIT_DSD_U32_LE _SNDRV_PCM_FMTBIT(DSD_U32_LE)
#ifdef SNDRV_LITTLE_ENDIAN
#define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_LE
@@ -365,6 +366,7 @@ struct snd_pcm_runtime {
struct snd_pcm_group { /* keep linked substreams */
spinlock_t lock;
+ struct mutex mutex;
struct list_head substreams;
int count;
};
@@ -460,6 +462,7 @@ struct snd_pcm {
void (*private_free) (struct snd_pcm *pcm);
struct device *dev; /* actual hw device this belongs to */
bool internal; /* pcm is for internal use only */
+ bool nonatomic; /* whole PCM operations are in non-atomic context */
#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
struct snd_pcm_oss oss;
#endif
@@ -492,8 +495,6 @@ int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree);
* Native I/O
*/
-extern rwlock_t snd_pcm_link_rwlock;
-
int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info);
int snd_pcm_info_user(struct snd_pcm_substream *substream,
struct snd_pcm_info __user *info);
@@ -537,41 +538,18 @@ static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream)
return substream->group != &substream->self_group;
}
-static inline void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
-{
- read_lock(&snd_pcm_link_rwlock);
- spin_lock(&substream->self_group.lock);
-}
-
-static inline void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
-{
- spin_unlock(&substream->self_group.lock);
- read_unlock(&snd_pcm_link_rwlock);
-}
-
-static inline void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
-{
- read_lock_irq(&snd_pcm_link_rwlock);
- spin_lock(&substream->self_group.lock);
-}
-
-static inline void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
-{
- spin_unlock(&substream->self_group.lock);
- read_unlock_irq(&snd_pcm_link_rwlock);
-}
-
-#define snd_pcm_stream_lock_irqsave(substream, flags) \
-do { \
- read_lock_irqsave(&snd_pcm_link_rwlock, (flags)); \
- spin_lock(&substream->self_group.lock); \
-} while (0)
-
-#define snd_pcm_stream_unlock_irqrestore(substream, flags) \
-do { \
- spin_unlock(&substream->self_group.lock); \
- read_unlock_irqrestore(&snd_pcm_link_rwlock, (flags)); \
-} while (0)
+void snd_pcm_stream_lock(struct snd_pcm_substream *substream);
+void snd_pcm_stream_unlock(struct snd_pcm_substream *substream);
+void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream);
+void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream);
+unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
+#define snd_pcm_stream_lock_irqsave(substream, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _snd_pcm_stream_lock_irqsave(substream); \
+ } while (0)
+void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
+ unsigned long flags);
#define snd_pcm_group_for_each_entry(s, substream) \
list_for_each_entry(s, &substream->group->substreams, link_list)
diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h
index 1de744c242f6..a5352712194b 100644
--- a/include/sound/rt5645.h
+++ b/include/sound/rt5645.h
@@ -20,6 +20,9 @@ struct rt5645_platform_data {
/* 0 = IN2N; 1 = GPIO5; 2 = GPIO11 */
unsigned int dmic2_data_pin;
/* 0 = IN2P; 1 = GPIO6; 2 = GPIO10; 3 = GPIO12 */
+
+ unsigned int hp_det_gpio;
+ bool gpio_hp_det_active_high;
};
#endif
diff --git a/include/sound/rt5677.h b/include/sound/rt5677.h
index 3da14313bcfc..082670e3a353 100644
--- a/include/sound/rt5677.h
+++ b/include/sound/rt5677.h
@@ -12,10 +12,21 @@
#ifndef __LINUX_SND_RT5677_H
#define __LINUX_SND_RT5677_H
+enum rt5677_dmic2_clk {
+ RT5677_DMIC_CLK1 = 0,
+ RT5677_DMIC_CLK2 = 1,
+};
+
+
struct rt5677_platform_data {
- /* IN1 IN2 can optionally be differential */
+ /* IN1/IN2/LOUT1/LOUT2/LOUT3 can optionally be differential */
bool in1_diff;
bool in2_diff;
+ bool lout1_diff;
+ bool lout2_diff;
+ bool lout3_diff;
+ /* DMIC2 clock source selection */
+ enum rt5677_dmic2_clk dmic2_clk_pin;
};
#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index aac04ff84eea..3a4d7da67b8d 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -432,6 +432,7 @@ int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
const char *pin);
void snd_soc_dapm_auto_nc_pins(struct snd_soc_card *card);
+unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol);
/* Mostly internal - should not normally be used */
void dapm_mark_io_dirty(struct snd_soc_dapm_context *dapm);
@@ -587,13 +588,13 @@ struct snd_soc_dapm_context {
enum snd_soc_bias_level suspend_bias_level;
struct delayed_work delayed_work;
unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
-
+ /* Go to BIAS_OFF in suspend if the DAPM context is idle */
+ unsigned int suspend_bias_off:1;
void (*seq_notifier)(struct snd_soc_dapm_context *,
enum snd_soc_dapm_type, int);
struct device *dev; /* from parent - for debug */
struct snd_soc_component *component; /* parent component */
- struct snd_soc_codec *codec; /* parent codec */
struct snd_soc_card *card; /* parent card */
/* used during DAPM updates */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index c83a334dd00f..7ba7130037a0 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -690,6 +690,17 @@ struct snd_soc_compr_ops {
struct snd_soc_component_driver {
const char *name;
+ /* Default control and setup, added after probe() is run */
+ const struct snd_kcontrol_new *controls;
+ unsigned int num_controls;
+ const struct snd_soc_dapm_widget *dapm_widgets;
+ unsigned int num_dapm_widgets;
+ const struct snd_soc_dapm_route *dapm_routes;
+ unsigned int num_dapm_routes;
+
+ int (*probe)(struct snd_soc_component *);
+ void (*remove)(struct snd_soc_component *);
+
/* DT */
int (*of_xlate_dai_name)(struct snd_soc_component *component,
struct of_phandle_args *args,
@@ -697,6 +708,10 @@ struct snd_soc_component_driver {
void (*seq_notifier)(struct snd_soc_component *, enum snd_soc_dapm_type,
int subseq);
int (*stream_event)(struct snd_soc_component *, int event);
+
+ /* probe ordering - for components with runtime dependencies */
+ int probe_order;
+ int remove_order;
};
struct snd_soc_component {
@@ -710,6 +725,7 @@ struct snd_soc_component {
unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
unsigned int registered_as_component:1;
+ unsigned int probed:1;
struct list_head list;
@@ -728,9 +744,35 @@ struct snd_soc_component {
struct mutex io_mutex;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+#endif
+
+ /*
+ * DO NOT use any of the fields below in drivers, they are temporary and
+ * are going to be removed again soon. If you use them in driver code the
+ * driver will be marked as BROKEN when these fields are removed.
+ */
+
/* Don't use these, use snd_soc_component_get_dapm() */
struct snd_soc_dapm_context dapm;
struct snd_soc_dapm_context *dapm_ptr;
+
+ const struct snd_kcontrol_new *controls;
+ unsigned int num_controls;
+ const struct snd_soc_dapm_widget *dapm_widgets;
+ unsigned int num_dapm_widgets;
+ const struct snd_soc_dapm_route *dapm_routes;
+ unsigned int num_dapm_routes;
+ struct snd_soc_codec *codec;
+
+ int (*probe)(struct snd_soc_component *);
+ void (*remove)(struct snd_soc_component *);
+
+#ifdef CONFIG_DEBUG_FS
+ void (*init_debugfs)(struct snd_soc_component *component);
+ const char *debugfs_prefix;
+#endif
};
/* SoC Audio Codec device */
@@ -746,11 +788,9 @@ struct snd_soc_codec {
struct snd_ac97 *ac97; /* for ad-hoc ac97 devices */
unsigned int cache_bypass:1; /* Suppress access to the cache */
unsigned int suspended:1; /* Codec is in suspend PM state */
- unsigned int probed:1; /* Codec has been probed */
unsigned int ac97_registered:1; /* Codec has been AC97 registered */
unsigned int ac97_created:1; /* Codec has been created by SoC */
unsigned int cache_init:1; /* codec cache has been initialized */
- u32 cache_only; /* Suppress writes to hardware */
u32 cache_sync; /* Cache needs to be synced to hardware */
/* codec IO */
@@ -766,7 +806,6 @@ struct snd_soc_codec {
struct snd_soc_dapm_context dapm;
#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs_codec_root;
struct dentry *debugfs_reg;
#endif
};
@@ -808,15 +847,12 @@ struct snd_soc_codec_driver {
int (*set_bias_level)(struct snd_soc_codec *,
enum snd_soc_bias_level level);
bool idle_bias_off;
+ bool suspend_bias_off;
void (*seq_notifier)(struct snd_soc_dapm_context *,
enum snd_soc_dapm_type, int);
bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
-
- /* probe ordering - for components with runtime dependencies */
- int probe_order;
- int remove_order;
};
/* SoC platform interface */
@@ -832,14 +868,6 @@ struct snd_soc_platform_driver {
int (*pcm_new)(struct snd_soc_pcm_runtime *);
void (*pcm_free)(struct snd_pcm *);
- /* Default control and setup, added after probe() is run */
- const struct snd_kcontrol_new *controls;
- int num_controls;
- const struct snd_soc_dapm_widget *dapm_widgets;
- int num_dapm_widgets;
- const struct snd_soc_dapm_route *dapm_routes;
- int num_dapm_routes;
-
/*
* For platform caused delay reporting.
* Optional.
@@ -853,13 +881,6 @@ struct snd_soc_platform_driver {
/* platform stream compress ops */
const struct snd_compr_ops *compr_ops;
- /* probe ordering - for components with runtime dependencies */
- int probe_order;
- int remove_order;
-
- /* platform IO - used for platform DAPM */
- unsigned int (*read)(struct snd_soc_platform *, unsigned int);
- int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
int (*bespoke_trigger)(struct snd_pcm_substream *, int);
};
@@ -874,15 +895,10 @@ struct snd_soc_platform {
const struct snd_soc_platform_driver *driver;
unsigned int suspended:1; /* platform is suspended */
- unsigned int probed:1;
struct list_head list;
struct snd_soc_component component;
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs_platform_root;
-#endif
};
struct snd_soc_dai_link {
@@ -897,7 +913,7 @@ struct snd_soc_dai_link {
* only for codec to codec links, or systems using device tree.
*/
const char *cpu_name;
- const struct device_node *cpu_of_node;
+ struct device_node *cpu_of_node;
/*
* You MAY specify the DAI name of the CPU DAI. If this information is
* omitted, the CPU-side DAI is matched using .cpu_name/.cpu_of_node
@@ -909,7 +925,7 @@ struct snd_soc_dai_link {
* DT/OF node, but not both.
*/
const char *codec_name;
- const struct device_node *codec_of_node;
+ struct device_node *codec_of_node;
/* You MUST specify the DAI name within the codec */
const char *codec_dai_name;
@@ -922,7 +938,7 @@ struct snd_soc_dai_link {
* do not need a platform.
*/
const char *platform_name;
- const struct device_node *platform_of_node;
+ struct device_node *platform_of_node;
int be_id; /* optional ID for machine driver BE identification */
const struct snd_soc_pcm_stream *params;
@@ -994,7 +1010,7 @@ struct snd_soc_aux_dev {
const struct device_node *codec_of_node;
/* codec/machine specific init - e.g. add machine controls */
- int (*init)(struct snd_soc_dapm_context *dapm);
+ int (*init)(struct snd_soc_component *component);
};
/* SoC card */
@@ -1112,6 +1128,7 @@ struct snd_soc_pcm_runtime {
struct snd_soc_platform *platform;
struct snd_soc_dai *codec_dai;
struct snd_soc_dai *cpu_dai;
+ struct snd_soc_component *component; /* Only valid for AUX dev rtds */
struct snd_soc_dai **codec_dais;
unsigned int num_codecs;
@@ -1260,9 +1277,6 @@ void snd_soc_component_async_complete(struct snd_soc_component *component);
int snd_soc_component_test_bits(struct snd_soc_component *component,
unsigned int reg, unsigned int mask, unsigned int value);
-int snd_soc_component_init_io(struct snd_soc_component *component,
- struct regmap *regmap);
-
/* device driver data */
static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
@@ -1276,26 +1290,37 @@ static inline void *snd_soc_card_get_drvdata(struct snd_soc_card *card)
return card->drvdata;
}
+static inline void snd_soc_component_set_drvdata(struct snd_soc_component *c,
+ void *data)
+{
+ dev_set_drvdata(c->dev, data);
+}
+
+static inline void *snd_soc_component_get_drvdata(struct snd_soc_component *c)
+{
+ return dev_get_drvdata(c->dev);
+}
+
static inline void snd_soc_codec_set_drvdata(struct snd_soc_codec *codec,
void *data)
{
- dev_set_drvdata(codec->dev, data);
+ snd_soc_component_set_drvdata(&codec->component, data);
}
static inline void *snd_soc_codec_get_drvdata(struct snd_soc_codec *codec)
{
- return dev_get_drvdata(codec->dev);
+ return snd_soc_component_get_drvdata(&codec->component);
}
static inline void snd_soc_platform_set_drvdata(struct snd_soc_platform *platform,
void *data)
{
- dev_set_drvdata(platform->dev, data);
+ snd_soc_component_set_drvdata(&platform->component, data);
}
static inline void *snd_soc_platform_get_drvdata(struct snd_soc_platform *platform)
{
- return dev_get_drvdata(platform->dev);
+ return snd_soc_component_get_drvdata(&platform->component);
}
static inline void snd_soc_pcm_set_drvdata(struct snd_soc_pcm_runtime *rtd,
diff --git a/include/sound/vx_core.h b/include/sound/vx_core.h
index f634f8f85db5..cae9c9d4ef22 100644
--- a/include/sound/vx_core.h
+++ b/include/sound/vx_core.h
@@ -80,8 +80,6 @@ struct vx_pipe {
unsigned int references; /* an output pipe may be used for monitoring and/or playback */
struct vx_pipe *monitoring_pipe; /* pointer to the monitoring pipe (capture pipe only)*/
-
- struct tasklet_struct start_tq;
};
struct vx_core;
@@ -165,9 +163,7 @@ struct vx_core {
struct snd_vx_hardware *hw;
struct snd_vx_ops *ops;
- spinlock_t lock;
- spinlock_t irq_lock;
- struct tasklet_struct tq;
+ struct mutex lock;
unsigned int chip_status;
unsigned int pcm_running;
@@ -223,6 +219,7 @@ void snd_vx_free_firmware(struct vx_core *chip);
* interrupt handler; exported for pcmcia
*/
irqreturn_t snd_vx_irq_handler(int irq, void *dev);
+irqreturn_t snd_vx_threaded_irq_handler(int irq, void *dev);
/*
* lowlevel functions
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 0194a641e4e2..b04ee7e5a466 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -175,7 +175,7 @@ TRACE_EVENT(snd_soc_dapm_output_path,
__entry->path_sink = (long)path->sink;
),
- TP_printk("%c%s -> %s -> %s\n",
+ TP_printk("%c%s -> %s -> %s",
(int) __entry->path_sink &&
(int) __entry->path_connect ? '*' : ' ',
__get_str(wname), __get_str(pname), __get_str(psname))
@@ -204,7 +204,7 @@ TRACE_EVENT(snd_soc_dapm_input_path,
__entry->path_source = (long)path->source;
),
- TP_printk("%c%s <- %s <- %s\n",
+ TP_printk("%c%s <- %s <- %s",
(int) __entry->path_source &&
(int) __entry->path_connect ? '*' : ' ',
__get_str(wname), __get_str(pname), __get_str(psname))
@@ -226,7 +226,7 @@ TRACE_EVENT(snd_soc_dapm_connected,
__entry->stream = stream;
),
- TP_printk("%s: found %d paths\n",
+ TP_printk("%s: found %d paths",
__entry->stream ? "capture" : "playback", __entry->paths)
);
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 4ee4e30d26d9..1faecea101f3 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -23,6 +23,7 @@ struct map_lookup;
struct extent_buffer;
struct btrfs_work;
struct __btrfs_workqueue;
+struct btrfs_qgroup_operation;
#define show_ref_type(type) \
__print_symbolic(type, \
@@ -157,12 +158,13 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
#define show_map_flags(flag) \
__print_flags(flag, "|", \
- { EXTENT_FLAG_PINNED, "PINNED" }, \
- { EXTENT_FLAG_COMPRESSED, "COMPRESSED" }, \
- { EXTENT_FLAG_VACANCY, "VACANCY" }, \
- { EXTENT_FLAG_PREALLOC, "PREALLOC" }, \
- { EXTENT_FLAG_LOGGING, "LOGGING" }, \
- { EXTENT_FLAG_FILLING, "FILLING" })
+ { (1 << EXTENT_FLAG_PINNED), "PINNED" },\
+ { (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
+ { (1 << EXTENT_FLAG_VACANCY), "VACANCY" },\
+ { (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
+ { (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
+ { (1 << EXTENT_FLAG_FILLING), "FILLING" },\
+ { (1 << EXTENT_FLAG_FS_MAPPING), "FS_MAPPING" })
TRACE_EVENT_CONDITION(btrfs_get_extent,
@@ -996,6 +998,7 @@ DECLARE_EVENT_CLASS(btrfs__work,
__field( void *, func )
__field( void *, ordered_func )
__field( void *, ordered_free )
+ __field( void *, normal_work )
),
TP_fast_assign(
@@ -1004,11 +1007,13 @@ DECLARE_EVENT_CLASS(btrfs__work,
__entry->func = work->func;
__entry->ordered_func = work->ordered_func;
__entry->ordered_free = work->ordered_free;
+ __entry->normal_work = &work->normal_work;
),
- TP_printk("work=%p, wq=%p, func=%p, ordered_func=%p, ordered_free=%p",
- __entry->work, __entry->wq, __entry->func,
- __entry->ordered_func, __entry->ordered_free)
+ TP_printk("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p,"
+ " ordered_free=%p",
+ __entry->work, __entry->normal_work, __entry->wq,
+ __entry->func, __entry->ordered_func, __entry->ordered_free)
);
/* For situiations that the work is freed */
@@ -1043,13 +1048,6 @@ DEFINE_EVENT(btrfs__work, btrfs_work_sched,
TP_ARGS(work)
);
-DEFINE_EVENT(btrfs__work, btrfs_normal_work_done,
-
- TP_PROTO(struct btrfs_work *work),
-
- TP_ARGS(work)
-);
-
DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
TP_PROTO(struct btrfs_work *work),
@@ -1119,6 +1117,61 @@ DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy,
TP_ARGS(wq)
);
+#define show_oper_type(type) \
+ __print_symbolic(type, \
+ { BTRFS_QGROUP_OPER_ADD_EXCL, "OPER_ADD_EXCL" }, \
+ { BTRFS_QGROUP_OPER_ADD_SHARED, "OPER_ADD_SHARED" }, \
+ { BTRFS_QGROUP_OPER_SUB_EXCL, "OPER_SUB_EXCL" }, \
+ { BTRFS_QGROUP_OPER_SUB_SHARED, "OPER_SUB_SHARED" })
+
+DECLARE_EVENT_CLASS(btrfs_qgroup_oper,
+
+ TP_PROTO(struct btrfs_qgroup_operation *oper),
+
+ TP_ARGS(oper),
+
+ TP_STRUCT__entry(
+ __field( u64, ref_root )
+ __field( u64, bytenr )
+ __field( u64, num_bytes )
+ __field( u64, seq )
+ __field( int, type )
+ __field( u64, elem_seq )
+ ),
+
+ TP_fast_assign(
+ __entry->ref_root = oper->ref_root;
+ __entry->bytenr = oper->bytenr,
+ __entry->num_bytes = oper->num_bytes;
+ __entry->seq = oper->seq;
+ __entry->type = oper->type;
+ __entry->elem_seq = oper->elem.seq;
+ ),
+
+ TP_printk("ref_root = %llu, bytenr = %llu, num_bytes = %llu, "
+ "seq = %llu, elem.seq = %llu, type = %s",
+ (unsigned long long)__entry->ref_root,
+ (unsigned long long)__entry->bytenr,
+ (unsigned long long)__entry->num_bytes,
+ (unsigned long long)__entry->seq,
+ (unsigned long long)__entry->elem_seq,
+ show_oper_type(__entry->type))
+);
+
+DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_account,
+
+ TP_PROTO(struct btrfs_qgroup_operation *oper),
+
+ TP_ARGS(oper)
+);
+
+DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_record_ref,
+
+ TP_PROTO(struct btrfs_qgroup_operation *oper),
+
+ TP_ARGS(oper)
+);
+
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index 59d11c22f076..a0d008070962 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -53,15 +53,15 @@ DECLARE_EVENT_CLASS(filelock_lease,
),
TP_fast_assign(
- __entry->fl = fl;
+ __entry->fl = fl ? fl : NULL;
__entry->s_dev = inode->i_sb->s_dev;
__entry->i_ino = inode->i_ino;
- __entry->fl_next = fl->fl_next;
- __entry->fl_owner = fl->fl_owner;
- __entry->fl_flags = fl->fl_flags;
- __entry->fl_type = fl->fl_type;
- __entry->fl_break_time = fl->fl_break_time;
- __entry->fl_downgrade_time = fl->fl_downgrade_time;
+ __entry->fl_next = fl ? fl->fl_next : NULL;
+ __entry->fl_owner = fl ? fl->fl_owner : NULL;
+ __entry->fl_flags = fl ? fl->fl_flags : 0;
+ __entry->fl_type = fl ? fl->fl_type : 0;
+ __entry->fl_break_time = fl ? fl->fl_break_time : 0;
+ __entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0;
),
TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index aca382266411..9b56f37148cf 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -180,9 +180,12 @@ TRACE_EVENT(rcu_grace_period_init,
* argument is a string as follows:
*
* "WakeEmpty": Wake rcuo kthread, first CB to empty list.
+ * "WakeEmptyIsDeferred": Wake rcuo kthread later, first CB to empty list.
* "WakeOvf": Wake rcuo kthread, CB list is huge.
+ * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge.
* "WakeNot": Don't wake rcuo kthread.
* "WakeNotPoll": Don't wake rcuo kthread because it is polling.
+ * "DeferredWake": Carried out the "IsDeferred" wakeup.
* "Poll": Start of new polling cycle for rcu_nocb_poll.
* "Sleep": Sleep waiting for CBs for !rcu_nocb_poll.
* "WokeEmpty": rcuo kthread woke to find empty list.
diff --git a/include/uapi/Kbuild b/include/uapi/Kbuild
index 81d2106287fe..245aa6e05e6a 100644
--- a/include/uapi/Kbuild
+++ b/include/uapi/Kbuild
@@ -12,3 +12,4 @@ header-y += video/
header-y += drm/
header-y += xen/
header-y += scsi/
+header-y += misc/
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 70e150ebc6c9..3cc8e1c2b996 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -355,6 +355,7 @@ header-y += serio.h
header-y += shm.h
header-y += signal.h
header-y += signalfd.h
+header-y += smiapp.h
header-y += snmp.h
header-y += sock_diag.h
header-y += socket.h
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index ef6103bf1f9b..ea9bf2561b9e 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -391,6 +391,8 @@ typedef struct elf64_shdr {
#define NT_S390_LAST_BREAK 0x306 /* s390 breaking event address */
#define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */
#define NT_S390_TDB 0x308 /* s390 transaction diagnostic block */
+#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 upper half */
+#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */
#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
#define NT_ARM_TLS 0x401 /* ARM TLS register */
#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index c26df6787fb0..f31fe7b660a5 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -774,7 +774,7 @@ enum nft_reject_inet_code {
NFT_REJECT_ICMPX_ADMIN_PROHIBITED,
__NFT_REJECT_ICMPX_MAX
};
-#define NFT_REJECT_ICMPX_MAX (__NFT_REJECT_ICMPX_MAX + 1)
+#define NFT_REJECT_ICMPX_MAX (__NFT_REJECT_ICMPX_MAX - 1)
/**
* enum nft_reject_attributes - nf_tables reject expression netlink attributes
diff --git a/include/uapi/linux/smiapp.h b/include/uapi/linux/smiapp.h
new file mode 100644
index 000000000000..53938f4412ee
--- /dev/null
+++ b/include/uapi/linux/smiapp.h
@@ -0,0 +1,29 @@
+/*
+ * include/uapi/linux/smiapp.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2014 Intel Corporation
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef __UAPI_LINUX_SMIAPP_H_
+#define __UAPI_LINUX_SMIAPP_H_
+
+#define V4L2_SMIAPP_TEST_PATTERN_MODE_DISABLED 0
+#define V4L2_SMIAPP_TEST_PATTERN_MODE_SOLID_COLOUR 1
+#define V4L2_SMIAPP_TEST_PATTERN_MODE_COLOUR_BARS 2
+#define V4L2_SMIAPP_TEST_PATTERN_MODE_COLOUR_BARS_GREY 3
+#define V4L2_SMIAPP_TEST_PATTERN_MODE_PN9 4
+
+#endif /* __UAPI_LINUX_SMIAPP_H_ */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index e946e43fb8d5..661f119a51b8 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -746,6 +746,8 @@ enum v4l2_auto_focus_range {
V4L2_AUTO_FOCUS_RANGE_INFINITY = 3,
};
+#define V4L2_CID_PAN_SPEED (V4L2_CID_CAMERA_CLASS_BASE+32)
+#define V4L2_CID_TILT_SPEED (V4L2_CID_CAMERA_CLASS_BASE+33)
/* FM Modulator class control IDs */
@@ -865,6 +867,10 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_CID_VBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 1)
#define V4L2_CID_HBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 2)
#define V4L2_CID_ANALOGUE_GAIN (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 3)
+#define V4L2_CID_TEST_PATTERN_RED (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 4)
+#define V4L2_CID_TEST_PATTERN_GREENR (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 5)
+#define V4L2_CID_TEST_PATTERN_BLUE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6)
+#define V4L2_CID_TEST_PATTERN_GREENB (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7)
/* Image processing controls */
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index 6c8f159e416e..6a0764c89fcb 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -21,17 +21,8 @@
#ifndef _V4L2_DV_TIMINGS_H
#define _V4L2_DV_TIMINGS_H
-#if __GNUC__ < 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ < 6))
-/* Sadly gcc versions older than 4.6 have a bug in how they initialize
- anonymous unions where they require additional curly brackets.
- This violates the C1x standard. This workaround adds the curly brackets
- if needed. */
#define V4L2_INIT_BT_TIMINGS(_width, args...) \
{ .bt = { _width , ## args } }
-#else
-#define V4L2_INIT_BT_TIMINGS(_width, args...) \
- .bt = { _width , ## args }
-#endif
/* CEA-861-E timings (i.e. standard HDTV timings) */
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 6612974c64bf..29715d27548f 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -33,6 +33,9 @@
/* Check if EEH is supported */
#define VFIO_EEH 5
+/* Two-stage IOMMU */
+#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
+
/*
* The IOCTL interface is designed for extensibility by embedding the
* structure length (argsz) and flags into structures passed between
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 778a3298fb34..1c2f84fd4d99 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -79,6 +79,7 @@
/* Four-character-code (FOURCC) */
#define v4l2_fourcc(a, b, c, d)\
((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24))
+#define v4l2_fourcc_be(a, b, c, d) (v4l2_fourcc(a, b, c, d) | (1 << 31))
/*
* E N U M S
@@ -307,6 +308,8 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_XRGB555 v4l2_fourcc('X', 'R', '1', '5') /* 16 XRGB-1-5-5-5 */
#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */
#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */
+#define V4L2_PIX_FMT_ARGB555X v4l2_fourcc_be('A', 'R', '1', '5') /* 16 ARGB-5-5-5 BE */
+#define V4L2_PIX_FMT_XRGB555X v4l2_fourcc_be('X', 'R', '1', '5') /* 16 XRGB-5-5-5 BE */
#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */
#define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */
#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */
@@ -1285,11 +1288,11 @@ struct v4l2_ext_control {
union {
__s32 value;
__s64 value64;
- char *string;
- __u8 *p_u8;
- __u16 *p_u16;
- __u32 *p_u32;
- void *ptr;
+ char __user *string;
+ __u8 __user *p_u8;
+ __u16 __user *p_u16;
+ __u32 __user *p_u32;
+ void __user *ptr;
};
} __attribute__ ((packed));
diff --git a/include/uapi/misc/Kbuild b/include/uapi/misc/Kbuild
new file mode 100644
index 000000000000..e96cae7d58c9
--- /dev/null
+++ b/include/uapi/misc/Kbuild
@@ -0,0 +1,2 @@
+# misc Header export list
+header-y += cxl.h
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
new file mode 100644
index 000000000000..cd6d789b73ec
--- /dev/null
+++ b/include/uapi/misc/cxl.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_MISC_CXL_H
+#define _UAPI_MISC_CXL_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+
+struct cxl_ioctl_start_work {
+ __u64 flags;
+ __u64 work_element_descriptor;
+ __u64 amr;
+ __s16 num_interrupts;
+ __s16 reserved1;
+ __s32 reserved2;
+ __u64 reserved3;
+ __u64 reserved4;
+ __u64 reserved5;
+ __u64 reserved6;
+};
+
+#define CXL_START_WORK_AMR 0x0000000000000001ULL
+#define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL
+#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\
+ CXL_START_WORK_NUM_IRQS)
+
+/* ioctl numbers */
+#define CXL_MAGIC 0xCA
+#define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work)
+#define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32)
+
+#define CXL_READ_MIN_SIZE 0x1000 /* 4K */
+
+/* Events from read() */
+enum cxl_event_type {
+ CXL_EVENT_RESERVED = 0,
+ CXL_EVENT_AFU_INTERRUPT = 1,
+ CXL_EVENT_DATA_STORAGE = 2,
+ CXL_EVENT_AFU_ERROR = 3,
+};
+
+struct cxl_event_header {
+ __u16 type;
+ __u16 size;
+ __u16 process_element;
+ __u16 reserved1;
+};
+
+struct cxl_event_afu_interrupt {
+ __u16 flags;
+ __u16 irq; /* Raised AFU interrupt number */
+ __u32 reserved1;
+};
+
+struct cxl_event_data_storage {
+ __u16 flags;
+ __u16 reserved1;
+ __u32 reserved2;
+ __u64 addr;
+ __u64 dsisr;
+ __u64 reserved3;
+};
+
+struct cxl_event_afu_error {
+ __u16 flags;
+ __u16 reserved1;
+ __u32 reserved2;
+ __u64 error;
+};
+
+struct cxl_event {
+ struct cxl_event_header header;
+ union {
+ struct cxl_event_afu_interrupt irq;
+ struct cxl_event_data_storage fault;
+ struct cxl_event_afu_error afu_error;
+ };
+};
+
+#endif /* _UAPI_MISC_CXL_H */
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 32168f7ffce3..6ee586728df9 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -219,7 +219,8 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_G723_40_1B ((__force snd_pcm_format_t) 47) /* 1 sample in 1 byte */
#define SNDRV_PCM_FORMAT_DSD_U8 ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */
#define SNDRV_PCM_FORMAT_DSD_U16_LE ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */
-#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U16_LE
+#define SNDRV_PCM_FORMAT_DSD_U32_LE ((__force snd_pcm_format_t) 50) /* DSD, 4-byte samples DSD (x32), little endian */
+#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_LE
#ifdef SNDRV_LITTLE_ENDIAN
#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE
diff --git a/include/xen/events.h b/include/xen/events.h
index 8bee7a75e850..5321cd9636e6 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -28,6 +28,8 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
unsigned long irqflags,
const char *devname,
void *dev_id);
+int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
+ unsigned int remote_port);
int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
unsigned int remote_port,
irq_handler_t handler,
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h
index 6f4eae328ca7..f90b03454659 100644
--- a/include/xen/interface/elfnote.h
+++ b/include/xen/interface/elfnote.h
@@ -3,6 +3,24 @@
*
* Definitions used for the Xen ELF notes.
*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
* Copyright (c) 2006, Ian Campbell, XenSource Ltd.
*/
@@ -18,12 +36,13 @@
*
* LEGACY indicated the fields in the legacy __xen_guest string which
* this a note type replaces.
+ *
+ * String values (for non-legacy) are NULL terminated ASCII, also known
+ * as ASCIZ type.
*/
/*
* NAME=VALUE pair (string).
- *
- * LEGACY: FEATURES and PAE
*/
#define XEN_ELFNOTE_INFO 0
@@ -137,10 +156,30 @@
/*
* Whether or not the guest supports cooperative suspend cancellation.
+ * This is a numeric value.
+ *
+ * Default is 0
*/
#define XEN_ELFNOTE_SUSPEND_CANCEL 14
/*
+ * The (non-default) location the initial phys-to-machine map should be
+ * placed at by the hypervisor (Dom0) or the tools (DomU).
+ * The kernel must be prepared for this mapping to be established using
+ * large pages, despite such otherwise not being available to guests.
+ * The kernel must also be able to handle the page table pages used for
+ * this mapping not being accessible through the initial mapping.
+ * (Only x86-64 supports this at present.)
+ */
+#define XEN_ELFNOTE_INIT_P2M 15
+
+/*
+ * Whether or not the guest can deal with being passed an initrd not
+ * mapped through its initial page tables.
+ */
+#define XEN_ELFNOTE_MOD_START_PFN 16
+
+/*
* The features supported by this kernel (numeric).
*
* Other than XEN_ELFNOTE_FEATURES on pre-4.2 Xen, this note allows a
@@ -153,6 +192,11 @@
*/
#define XEN_ELFNOTE_SUPPORTED_FEATURES 17
+/*
+ * The number of the highest elfnote defined.
+ */
+#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUPPORTED_FEATURES
+
#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
/*
diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h
new file mode 100644
index 000000000000..d07d7aca8d1c
--- /dev/null
+++ b/include/xen/interface/io/vscsiif.h
@@ -0,0 +1,229 @@
+/******************************************************************************
+ * vscsiif.h
+ *
+ * Based on the blkif.h code.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright(c) FUJITSU Limited 2008.
+ */
+
+#ifndef __XEN__PUBLIC_IO_SCSI_H__
+#define __XEN__PUBLIC_IO_SCSI_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ * Feature and Parameter Negotiation
+ * =================================
+ * The two halves of a Xen pvSCSI driver utilize nodes within the XenStore to
+ * communicate capabilities and to negotiate operating parameters. This
+ * section enumerates these nodes which reside in the respective front and
+ * backend portions of the XenStore, following the XenBus convention.
+ *
+ * Any specified default value is in effect if the corresponding XenBus node
+ * is not present in the XenStore.
+ *
+ * XenStore nodes in sections marked "PRIVATE" are solely for use by the
+ * driver side whose XenBus tree contains them.
+ *
+ *****************************************************************************
+ * Backend XenBus Nodes
+ *****************************************************************************
+ *
+ *------------------ Backend Device Identification (PRIVATE) ------------------
+ *
+ * p-devname
+ * Values: string
+ *
+ * A free string used to identify the physical device (e.g. a disk name).
+ *
+ * p-dev
+ * Values: string
+ *
+ * A string specifying the backend device: either a 4-tuple "h:c:t:l"
+ * (host, controller, target, lun, all integers), or a WWN (e.g.
+ * "naa.60014054ac780582").
+ *
+ * v-dev
+ * Values: string
+ *
+ * A string specifying the frontend device in form of a 4-tuple "h:c:t:l"
+ * (host, controller, target, lun, all integers).
+ *
+ *--------------------------------- Features ---------------------------------
+ *
+ * feature-sg-grant
+ * Values: unsigned [VSCSIIF_SG_TABLESIZE...65535]
+ * Default Value: 0
+ *
+ * Specifies the maximum number of scatter/gather elements in grant pages
+ * supported. If not set, the backend supports up to VSCSIIF_SG_TABLESIZE
+ * SG elements specified directly in the request.
+ *
+ *****************************************************************************
+ * Frontend XenBus Nodes
+ *****************************************************************************
+ *
+ *----------------------- Request Transport Parameters -----------------------
+ *
+ * event-channel
+ * Values: unsigned
+ *
+ * The identifier of the Xen event channel used to signal activity
+ * in the ring buffer.
+ *
+ * ring-ref
+ * Values: unsigned
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * the sole page in a single page sized ring buffer.
+ *
+ * protocol
+ * Values: string (XEN_IO_PROTO_ABI_*)
+ * Default Value: XEN_IO_PROTO_ABI_NATIVE
+ *
+ * The machine ABI rules governing the format of all ring request and
+ * response structures.
+ */
+
+/* Requests from the frontend to the backend */
+
+/*
+ * Request a SCSI operation specified via a CDB in vscsiif_request.cmnd.
+ * The target is specified via channel, id and lun.
+ *
+ * The operation to be performed is specified via a CDB in cmnd[], the length
+ * of the CDB is in cmd_len. sc_data_direction specifies the direction of data
+ * (to the device, from the device, or none at all).
+ *
+ * If data is to be transferred to or from the device the buffer(s) in the
+ * guest memory is/are specified via one or multiple scsiif_request_segment
+ * descriptors each specifying a memory page via a grant_ref_t, a offset into
+ * the page and the length of the area in that page. All scsiif_request_segment
+ * areas concatenated form the resulting data buffer used by the operation.
+ * If the number of scsiif_request_segment areas is not too large (less than
+ * or equal VSCSIIF_SG_TABLESIZE) the areas can be specified directly in the
+ * seg[] array and the number of valid scsiif_request_segment elements is to be
+ * set in nr_segments.
+ *
+ * If "feature-sg-grant" in the Xenstore is set it is possible to specify more
+ * than VSCSIIF_SG_TABLESIZE scsiif_request_segment elements via indirection.
+ * The maximum number of allowed scsiif_request_segment elements is the value
+ * of the "feature-sg-grant" entry from Xenstore. When using indirection the
+ * seg[] array doesn't contain specifications of the data buffers, but
+ * references to scsiif_request_segment arrays, which in turn reference the
+ * data buffers. While nr_segments holds the number of populated seg[] entries
+ * (plus the set VSCSIIF_SG_GRANT bit), the number of scsiif_request_segment
+ * elements referencing the target data buffers is calculated from the lengths
+ * of the seg[] elements (the sum of all valid seg[].length divided by the
+ * size of one scsiif_request_segment structure).
+ */
+#define VSCSIIF_ACT_SCSI_CDB 1
+
+/*
+ * Request abort of a running operation for the specified target given by
+ * channel, id, lun and the operation's rqid in ref_rqid.
+ */
+#define VSCSIIF_ACT_SCSI_ABORT 2
+
+/*
+ * Request a device reset of the specified target (channel and id).
+ */
+#define VSCSIIF_ACT_SCSI_RESET 3
+
+/*
+ * Preset scatter/gather elements for a following request. Deprecated.
+ * Keeping the define only to avoid usage of the value "4" for other actions.
+ */
+#define VSCSIIF_ACT_SCSI_SG_PRESET 4
+
+/*
+ * Maximum scatter/gather segments per request.
+ *
+ * Considering balance between allocating at least 16 "vscsiif_request"
+ * structures on one page (4096 bytes) and the number of scatter/gather
+ * elements needed, we decided to use 26 as a magic number.
+ *
+ * If "feature-sg-grant" is set, more scatter/gather elements can be specified
+ * by placing them in one or more (up to VSCSIIF_SG_TABLESIZE) granted pages.
+ * In this case the vscsiif_request seg elements don't contain references to
+ * the user data, but to the SG elements referencing the user data.
+ */
+#define VSCSIIF_SG_TABLESIZE 26
+
+/*
+ * based on Linux kernel 2.6.18, still valid
+ * Changing these values requires support of multiple protocols via the rings
+ * as "old clients" will blindly use these values and the resulting structure
+ * sizes.
+ */
+#define VSCSIIF_MAX_COMMAND_SIZE 16
+#define VSCSIIF_SENSE_BUFFERSIZE 96
+
+struct scsiif_request_segment {
+ grant_ref_t gref;
+ uint16_t offset;
+ uint16_t length;
+};
+
+#define VSCSIIF_SG_PER_PAGE (PAGE_SIZE / sizeof(struct scsiif_request_segment))
+
+/* Size of one request is 252 bytes */
+struct vscsiif_request {
+ uint16_t rqid; /* private guest value, echoed in resp */
+ uint8_t act; /* command between backend and frontend */
+ uint8_t cmd_len; /* valid CDB bytes */
+
+ uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; /* the CDB */
+ uint16_t timeout_per_command; /* deprecated */
+ uint16_t channel, id, lun; /* (virtual) device specification */
+ uint16_t ref_rqid; /* command abort reference */
+ uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1)
+ DMA_FROM_DEVICE(2)
+ DMA_NONE(3) requests */
+ uint8_t nr_segments; /* Number of pieces of scatter-gather */
+/*
+ * flag in nr_segments: SG elements via grant page
+ *
+ * If VSCSIIF_SG_GRANT is set, the low 7 bits of nr_segments specify the number
+ * of grant pages containing SG elements. Usable if "feature-sg-grant" set.
+ */
+#define VSCSIIF_SG_GRANT 0x80
+
+ struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
+ uint32_t reserved[3];
+};
+
+/* Size of one response is 252 bytes */
+struct vscsiif_response {
+ uint16_t rqid; /* identifies request */
+ uint8_t padding;
+ uint8_t sense_len;
+ uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
+ int32_t rslt;
+ uint32_t residual_len; /* request bufflen -
+ return the value from physical device */
+ uint32_t reserved[36];
+};
+
+DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
+
+#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index de082130ba4b..f68719f405af 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -3,6 +3,24 @@
*
* Guest OS interface to Xen.
*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
* Copyright (c) 2004, K A Fraser
*/
@@ -73,13 +91,23 @@
* VIRTUAL INTERRUPTS
*
* Virtual interrupts that a guest OS may receive from Xen.
+ * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
+ * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
+ * The latter can be allocated only once per guest: they must initially be
+ * allocated to VCPU0 but can subsequently be re-bound.
*/
-#define VIRQ_TIMER 0 /* Timebase update, and/or requested timeout. */
-#define VIRQ_DEBUG 1 /* Request guest to dump debug info. */
-#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */
-#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */
-#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */
-#define VIRQ_PCPU_STATE 9 /* (DOM0) PCPU state changed */
+#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
+#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
+#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
+#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */
+#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */
+#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
+#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
+#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
+#define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */
+#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */
+#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */
+#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
/* Architecture-specific VIRQ definitions. */
#define VIRQ_ARCH_0 16
@@ -92,24 +120,68 @@
#define VIRQ_ARCH_7 23
#define NR_VIRQS 24
+
/*
- * MMU-UPDATE REQUESTS
- *
- * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
- * A foreigndom (FD) can be specified (or DOMID_SELF for none).
- * Where the FD has some effect, it is described below.
- * ptr[1:0] specifies the appropriate MMU_* command.
+ * enum neg_errnoval HYPERVISOR_mmu_update(const struct mmu_update reqs[],
+ * unsigned count, unsigned *done_out,
+ * unsigned foreigndom)
+ * @reqs is an array of mmu_update_t structures ((ptr, val) pairs).
+ * @count is the length of the above array.
+ * @pdone is an output parameter indicating number of completed operations
+ * @foreigndom[15:0]: FD, the expected owner of data pages referenced in this
+ * hypercall invocation. Can be DOMID_SELF.
+ * @foreigndom[31:16]: PFD, the expected owner of pagetable pages referenced
+ * in this hypercall invocation. The value of this field
+ * (x) encodes the PFD as follows:
+ * x == 0 => PFD == DOMID_SELF
+ * x != 0 => PFD == x - 1
*
+ * Sub-commands: ptr[1:0] specifies the appropriate MMU_* command.
+ * -------------
* ptr[1:0] == MMU_NORMAL_PT_UPDATE:
- * Updates an entry in a page table. If updating an L1 table, and the new
- * table entry is valid/present, the mapped frame must belong to the FD, if
- * an FD has been specified. If attempting to map an I/O page then the
- * caller assumes the privilege of the FD.
+ * Updates an entry in a page table belonging to PFD. If updating an L1 table,
+ * and the new table entry is valid/present, the mapped frame must belong to
+ * FD. If attempting to map an I/O page then the caller assumes the privilege
+ * of the FD.
* FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
* FD == DOMID_XEN: Map restricted areas of Xen's heap space.
* ptr[:2] -- Machine address of the page-table entry to modify.
* val -- Value to write.
*
+ * There also certain implicit requirements when using this hypercall. The
+ * pages that make up a pagetable must be mapped read-only in the guest.
+ * This prevents uncontrolled guest updates to the pagetable. Xen strictly
+ * enforces this, and will disallow any pagetable update which will end up
+ * mapping pagetable page RW, and will disallow using any writable page as a
+ * pagetable. In practice it means that when constructing a page table for a
+ * process, thread, etc, we MUST be very dilligient in following these rules:
+ * 1). Start with top-level page (PGD or in Xen language: L4). Fill out
+ * the entries.
+ * 2). Keep on going, filling out the upper (PUD or L3), and middle (PMD
+ * or L2).
+ * 3). Start filling out the PTE table (L1) with the PTE entries. Once
+ * done, make sure to set each of those entries to RO (so writeable bit
+ * is unset). Once that has been completed, set the PMD (L2) for this
+ * PTE table as RO.
+ * 4). When completed with all of the PMD (L2) entries, and all of them have
+ * been set to RO, make sure to set RO the PUD (L3). Do the same
+ * operation on PGD (L4) pagetable entries that have a PUD (L3) entry.
+ * 5). Now before you can use those pages (so setting the cr3), you MUST also
+ * pin them so that the hypervisor can verify the entries. This is done
+ * via the HYPERVISOR_mmuext_op(MMUEXT_PIN_L4_TABLE, guest physical frame
+ * number of the PGD (L4)). And this point the HYPERVISOR_mmuext_op(
+ * MMUEXT_NEW_BASEPTR, guest physical frame number of the PGD (L4)) can be
+ * issued.
+ * For 32-bit guests, the L4 is not used (as there is less pagetables), so
+ * instead use L3.
+ * At this point the pagetables can be modified using the MMU_NORMAL_PT_UPDATE
+ * hypercall. Also if so desired the OS can also try to write to the PTE
+ * and be trapped by the hypervisor (as the PTE entry is RO).
+ *
+ * To deallocate the pages, the operations are the reverse of the steps
+ * mentioned above. The argument is MMUEXT_UNPIN_TABLE for all levels and the
+ * pagetable MUST not be in use (meaning that the cr3 is not set to it).
+ *
* ptr[1:0] == MMU_MACHPHYS_UPDATE:
* Updates an entry in the machine->pseudo-physical mapping table.
* ptr[:2] -- Machine address within the frame whose mapping to modify.
@@ -119,6 +191,72 @@
* ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD:
* As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed
* with those in @val.
+ *
+ * @val is usually the machine frame number along with some attributes.
+ * The attributes by default follow the architecture defined bits. Meaning that
+ * if this is a X86_64 machine and four page table layout is used, the layout
+ * of val is:
+ * - 63 if set means No execute (NX)
+ * - 46-13 the machine frame number
+ * - 12 available for guest
+ * - 11 available for guest
+ * - 10 available for guest
+ * - 9 available for guest
+ * - 8 global
+ * - 7 PAT (PSE is disabled, must use hypercall to make 4MB or 2MB pages)
+ * - 6 dirty
+ * - 5 accessed
+ * - 4 page cached disabled
+ * - 3 page write through
+ * - 2 userspace accessible
+ * - 1 writeable
+ * - 0 present
+ *
+ * The one bits that does not fit with the default layout is the PAGE_PSE
+ * also called PAGE_PAT). The MMUEXT_[UN]MARK_SUPER arguments to the
+ * HYPERVISOR_mmuext_op serve as mechanism to set a pagetable to be 4MB
+ * (or 2MB) instead of using the PAGE_PSE bit.
+ *
+ * The reason that the PAGE_PSE (bit 7) is not being utilized is due to Xen
+ * using it as the Page Attribute Table (PAT) bit - for details on it please
+ * refer to Intel SDM 10.12. The PAT allows to set the caching attributes of
+ * pages instead of using MTRRs.
+ *
+ * The PAT MSR is as follows (it is a 64-bit value, each entry is 8 bits):
+ * PAT4 PAT0
+ * +-----+-----+----+----+----+-----+----+----+
+ * | UC | UC- | WC | WB | UC | UC- | WC | WB | <= Linux
+ * +-----+-----+----+----+----+-----+----+----+
+ * | UC | UC- | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots)
+ * +-----+-----+----+----+----+-----+----+----+
+ * | rsv | rsv | WP | WC | UC | UC- | WT | WB | <= Xen
+ * +-----+-----+----+----+----+-----+----+----+
+ *
+ * The lookup of this index table translates to looking up
+ * Bit 7, Bit 4, and Bit 3 of val entry:
+ *
+ * PAT/PSE (bit 7) ... PCD (bit 4) .. PWT (bit 3).
+ *
+ * If all bits are off, then we are using PAT0. If bit 3 turned on,
+ * then we are using PAT1, if bit 3 and bit 4, then PAT2..
+ *
+ * As you can see, the Linux PAT1 translates to PAT4 under Xen. Which means
+ * that if a guest that follows Linux's PAT setup and would like to set Write
+ * Combined on pages it MUST use PAT4 entry. Meaning that Bit 7 (PAGE_PAT) is
+ * set. For example, under Linux it only uses PAT0, PAT1, and PAT2 for the
+ * caching as:
+ *
+ * WB = none (so PAT0)
+ * WC = PWT (bit 3 on)
+ * UC = PWT | PCD (bit 3 and 4 are on).
+ *
+ * To make it work with Xen, it needs to translate the WC bit as so:
+ *
+ * PWT (so bit 3 on) --> PAT (so bit 7 is on) and clear bit 3
+ *
+ * And to translate back it would:
+ *
+ * PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7.
*/
#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
@@ -127,7 +265,12 @@
/*
* MMU EXTENDED OPERATIONS
*
- * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
+ * enum neg_errnoval HYPERVISOR_mmuext_op(mmuext_op_t uops[],
+ * unsigned int count,
+ * unsigned int *pdone,
+ * unsigned int foreigndom)
+ */
+/* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
* A foreigndom (FD) can be specified (or DOMID_SELF for none).
* Where the FD has some effect, it is described below.
*
@@ -164,9 +307,23 @@
* cmd: MMUEXT_FLUSH_CACHE
* No additional arguments. Writes back and flushes cache contents.
*
+ * cmd: MMUEXT_FLUSH_CACHE_GLOBAL
+ * No additional arguments. Writes back and flushes cache contents
+ * on all CPUs in the system.
+ *
* cmd: MMUEXT_SET_LDT
* linear_addr: Linear address of LDT base (NB. must be page-aligned).
* nr_ents: Number of entries in LDT.
+ *
+ * cmd: MMUEXT_CLEAR_PAGE
+ * mfn: Machine frame number to be cleared.
+ *
+ * cmd: MMUEXT_COPY_PAGE
+ * mfn: Machine frame number of the destination page.
+ * src_mfn: Machine frame number of the source page.
+ *
+ * cmd: MMUEXT_[UN]MARK_SUPER
+ * mfn: Machine frame number of head of superpage to be [un]marked.
*/
#define MMUEXT_PIN_L1_TABLE 0
#define MMUEXT_PIN_L2_TABLE 1
@@ -183,12 +340,18 @@
#define MMUEXT_FLUSH_CACHE 12
#define MMUEXT_SET_LDT 13
#define MMUEXT_NEW_USER_BASEPTR 15
+#define MMUEXT_CLEAR_PAGE 16
+#define MMUEXT_COPY_PAGE 17
+#define MMUEXT_FLUSH_CACHE_GLOBAL 18
+#define MMUEXT_MARK_SUPER 19
+#define MMUEXT_UNMARK_SUPER 20
#ifndef __ASSEMBLY__
struct mmuext_op {
unsigned int cmd;
union {
- /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
+ /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
+ * CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */
xen_pfn_t mfn;
/* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
unsigned long linear_addr;
@@ -198,6 +361,8 @@ struct mmuext_op {
unsigned int nr_ents;
/* TLB_FLUSH_MULTI, INVLPG_MULTI */
void *vcpumask;
+ /* COPY_PAGE */
+ xen_pfn_t src_mfn;
} arg2;
};
DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
@@ -225,10 +390,23 @@ DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
*/
#define VMASST_CMD_enable 0
#define VMASST_CMD_disable 1
+
+/* x86/32 guests: simulate full 4GB segment limits. */
#define VMASST_TYPE_4gb_segments 0
+
+/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
#define VMASST_TYPE_4gb_segments_notify 1
+
+/*
+ * x86 guests: support writes to bottom-level PTEs.
+ * NB1. Page-directory entries cannot be written.
+ * NB2. Guest must continue to remove all writable mappings of PTEs.
+ */
#define VMASST_TYPE_writable_pagetables 2
+
+/* x86/PAE guests: support PDPTs above 4GB. */
#define VMASST_TYPE_pae_extended_cr3 3
+
#define MAX_VMASST_TYPE 3
#ifndef __ASSEMBLY__
@@ -260,6 +438,15 @@ typedef uint16_t domid_t;
*/
#define DOMID_XEN (0x7FF2U)
+/* DOMID_COW is used as the owner of sharable pages */
+#define DOMID_COW (0x7FF3U)
+
+/* DOMID_INVALID is used to identify pages with unknown owner. */
+#define DOMID_INVALID (0x7FF4U)
+
+/* Idle domain. */
+#define DOMID_IDLE (0x7FFFU)
+
/*
* Send an array of these to HYPERVISOR_mmu_update().
* NB. The fields are natural pointer/address size for this architecture.
@@ -272,7 +459,9 @@ DEFINE_GUEST_HANDLE_STRUCT(mmu_update);
/*
* Send an array of these to HYPERVISOR_multicall().
- * NB. The fields are natural register size for this architecture.
+ * NB. The fields are logically the natural register size for this
+ * architecture. In cases where xen_ulong_t is larger than this then
+ * any unused bits in the upper portion must be zero.
*/
struct multicall_entry {
xen_ulong_t op;
@@ -442,8 +631,48 @@ struct start_info {
unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
int8_t cmd_line[MAX_GUEST_CMDLINE];
+ /* The pfn range here covers both page table and p->m table frames. */
+ unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */
+ unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */
};
+/* These flags are passed in the 'flags' field of start_info_t. */
+#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
+#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
+#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
+#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
+#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
+
+/*
+ * A multiboot module is a package containing modules very similar to a
+ * multiboot module array. The only differences are:
+ * - the array of module descriptors is by convention simply at the beginning
+ * of the multiboot module,
+ * - addresses in the module descriptors are based on the beginning of the
+ * multiboot module,
+ * - the number of modules is determined by a termination descriptor that has
+ * mod_start == 0.
+ *
+ * This permits to both build it statically and reference it in a configuration
+ * file, and let the PV guest easily rebase the addresses to virtual addresses
+ * and at the same time count the number of modules.
+ */
+struct xen_multiboot_mod_list {
+ /* Address of first byte of the module */
+ uint32_t mod_start;
+ /* Address of last byte of the module (inclusive) */
+ uint32_t mod_end;
+ /* Address of zero-terminated command line */
+ uint32_t cmdline;
+ /* Unused, must be zero */
+ uint32_t pad;
+};
+/*
+ * The console structure in start_info.console.dom0
+ *
+ * This structure includes a variety of information required to
+ * have a working VGA/VESA console.
+ */
struct dom0_vga_console_info {
uint8_t video_type;
#define XEN_VGATYPE_TEXT_MODE_3 0x03
@@ -484,11 +713,6 @@ struct dom0_vga_console_info {
} u;
};
-/* These flags are passed in the 'flags' field of start_info_t. */
-#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
-#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
-#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
-
typedef uint64_t cpumap_t;
typedef uint8_t xen_domain_handle_t[16];
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 0324c6d340c1..b78f21caf55a 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -86,6 +86,7 @@ struct xenbus_device_id
/* A xenbus driver. */
struct xenbus_driver {
+ const char *name; /* defaults to ids[0].devicetype */
const struct xenbus_device_id *ids;
int (*probe)(struct xenbus_device *dev,
const struct xenbus_device_id *id);
@@ -100,20 +101,22 @@ struct xenbus_driver {
int (*is_ready)(struct xenbus_device *dev);
};
-#define DEFINE_XENBUS_DRIVER(var, drvname, methods...) \
-struct xenbus_driver var ## _driver = { \
- .driver.name = drvname + 0 ?: var ## _ids->devicetype, \
- .driver.owner = THIS_MODULE, \
- .ids = var ## _ids, ## methods \
-}
-
static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
{
return container_of(drv, struct xenbus_driver, driver);
}
-int __must_check xenbus_register_frontend(struct xenbus_driver *);
-int __must_check xenbus_register_backend(struct xenbus_driver *);
+int __must_check __xenbus_register_frontend(struct xenbus_driver *drv,
+ struct module *owner,
+ const char *mod_name);
+int __must_check __xenbus_register_backend(struct xenbus_driver *drv,
+ struct module *owner,
+ const char *mod_name);
+
+#define xenbus_register_frontend(drv) \
+ __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
+#define xenbus_register_backend(drv) \
+ __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
void xenbus_unregister_driver(struct xenbus_driver *drv);
diff --git a/init/Kconfig b/init/Kconfig
index d2355812ba48..1c505e090422 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -507,6 +507,16 @@ config PREEMPT_RCU
This option enables preemptible-RCU code that is common between
TREE_PREEMPT_RCU and, in the old days, TINY_PREEMPT_RCU.
+config TASKS_RCU
+ bool "Task_based RCU implementation using voluntary context switch"
+ default n
+ help
+ This option enables a task-based RCU implementation that uses
+ only voluntary context switch (not preemption!), idle, and
+ user-mode execution as quiescent states.
+
+ If unsure, say N.
+
config RCU_STALL_COMMON
def_bool ( TREE_RCU || TREE_PREEMPT_RCU || RCU_TRACE )
help
@@ -737,7 +747,7 @@ choice
config RCU_NOCB_CPU_NONE
bool "No build_forced no-CBs CPUs"
- depends on RCU_NOCB_CPU && !NO_HZ_FULL_ALL
+ depends on RCU_NOCB_CPU
help
This option does not force any of the CPUs to be no-CBs CPUs.
Only CPUs designated by the rcu_nocbs= boot parameter will be
@@ -751,7 +761,7 @@ config RCU_NOCB_CPU_NONE
config RCU_NOCB_CPU_ZERO
bool "CPU 0 is a build_forced no-CBs CPU"
- depends on RCU_NOCB_CPU && !NO_HZ_FULL_ALL
+ depends on RCU_NOCB_CPU
help
This option forces CPU 0 to be a no-CBs CPU, so that its RCU
callbacks are invoked by a per-CPU kthread whose name begins
diff --git a/init/main.c b/init/main.c
index 8af2f1abfe38..89ec862da2d4 100644
--- a/init/main.c
+++ b/init/main.c
@@ -508,6 +508,7 @@ asmlinkage __visible void __init start_kernel(void)
* lockdep hash:
*/
lockdep_init();
+ set_task_stack_end_magic(&init_task);
smp_setup_processor_id();
debug_objects_early_init();
@@ -583,6 +584,7 @@ asmlinkage __visible void __init start_kernel(void)
early_irq_init();
init_IRQ();
tick_init();
+ rcu_init_nohz();
init_timers();
hrtimers_init();
softirq_init();
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 81e2a388a0f6..356450f09c1f 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -79,6 +79,8 @@ static struct {
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
+#define cpuhp_lock_acquire_tryread() \
+ lock_map_acquire_tryread(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
@@ -91,10 +93,22 @@ void get_online_cpus(void)
mutex_lock(&cpu_hotplug.lock);
cpu_hotplug.refcount++;
mutex_unlock(&cpu_hotplug.lock);
-
}
EXPORT_SYMBOL_GPL(get_online_cpus);
+bool try_get_online_cpus(void)
+{
+ if (cpu_hotplug.active_writer == current)
+ return true;
+ if (!mutex_trylock(&cpu_hotplug.lock))
+ return false;
+ cpuhp_lock_acquire_tryread();
+ cpu_hotplug.refcount++;
+ mutex_unlock(&cpu_hotplug.lock);
+ return true;
+}
+EXPORT_SYMBOL_GPL(try_get_online_cpus);
+
void put_online_cpus(void)
{
if (cpu_hotplug.active_writer == current)
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 97b67df8fbfe..f2a88de87a49 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -52,7 +52,7 @@ static void release_callchain_buffers(void)
struct callchain_cpus_entries *entries;
entries = callchain_cpus_entries;
- rcu_assign_pointer(callchain_cpus_entries, NULL);
+ RCU_INIT_POINTER(callchain_cpus_entries, NULL);
call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b1c663593f5c..094df8c0742d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -47,6 +47,8 @@
#include <asm/irq_regs.h>
+static struct workqueue_struct *perf_wq;
+
struct remote_function_call {
struct task_struct *p;
int (*func)(void *info);
@@ -120,6 +122,13 @@ static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
return data.ret;
}
+#define EVENT_OWNER_KERNEL ((void *) -1)
+
+static bool is_kernel_event(struct perf_event *event)
+{
+ return event->owner == EVENT_OWNER_KERNEL;
+}
+
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
PERF_FLAG_FD_OUTPUT |\
PERF_FLAG_PID_CGROUP |\
@@ -897,13 +906,23 @@ static void put_ctx(struct perf_event_context *ctx)
}
}
-static void unclone_ctx(struct perf_event_context *ctx)
+/*
+ * This must be done under the ctx->lock, such as to serialize against
+ * context_equiv(), therefore we cannot call put_ctx() since that might end up
+ * calling scheduler related locks and ctx->lock nests inside those.
+ */
+static __must_check struct perf_event_context *
+unclone_ctx(struct perf_event_context *ctx)
{
- if (ctx->parent_ctx) {
- put_ctx(ctx->parent_ctx);
+ struct perf_event_context *parent_ctx = ctx->parent_ctx;
+
+ lockdep_assert_held(&ctx->lock);
+
+ if (parent_ctx)
ctx->parent_ctx = NULL;
- }
ctx->generation++;
+
+ return parent_ctx;
}
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
@@ -1370,6 +1389,45 @@ out:
perf_event__header_size(tmp);
}
+/*
+ * User event without the task.
+ */
+static bool is_orphaned_event(struct perf_event *event)
+{
+ return event && !is_kernel_event(event) && !event->owner;
+}
+
+/*
+ * Event has a parent but parent's task finished and it's
+ * alive only because of children holding refference.
+ */
+static bool is_orphaned_child(struct perf_event *event)
+{
+ return is_orphaned_event(event->parent);
+}
+
+static void orphans_remove_work(struct work_struct *work);
+
+static void schedule_orphans_remove(struct perf_event_context *ctx)
+{
+ if (!ctx->task || ctx->orphans_remove_sched || !perf_wq)
+ return;
+
+ if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) {
+ get_ctx(ctx);
+ ctx->orphans_remove_sched = true;
+ }
+}
+
+static int __init perf_workqueue_init(void)
+{
+ perf_wq = create_singlethread_workqueue("perf");
+ WARN(!perf_wq, "failed to create perf workqueue\n");
+ return perf_wq ? 0 : -1;
+}
+
+core_initcall(perf_workqueue_init);
+
static inline int
event_filter_match(struct perf_event *event)
{
@@ -1419,6 +1477,9 @@ event_sched_out(struct perf_event *event,
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
+ if (is_orphaned_child(event))
+ schedule_orphans_remove(ctx);
+
perf_pmu_enable(event->pmu);
}
@@ -1726,6 +1787,9 @@ event_sched_in(struct perf_event *event,
if (event->attr.exclusive)
cpuctx->exclusive = 1;
+ if (is_orphaned_child(event))
+ schedule_orphans_remove(ctx);
+
out:
perf_pmu_enable(event->pmu);
@@ -2205,6 +2269,9 @@ static void ctx_sched_out(struct perf_event_context *ctx,
static int context_equiv(struct perf_event_context *ctx1,
struct perf_event_context *ctx2)
{
+ lockdep_assert_held(&ctx1->lock);
+ lockdep_assert_held(&ctx2->lock);
+
/* Pinning disables the swap optimization */
if (ctx1->pin_count || ctx2->pin_count)
return 0;
@@ -2326,7 +2393,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
next_parent = rcu_dereference(next_ctx->parent_ctx);
/* If neither context have a parent context; they cannot be clones. */
- if (!parent || !next_parent)
+ if (!parent && !next_parent)
goto unlock;
if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
@@ -2938,6 +3005,7 @@ static int event_enable_on_exec(struct perf_event *event,
*/
static void perf_event_enable_on_exec(struct perf_event_context *ctx)
{
+ struct perf_event_context *clone_ctx = NULL;
struct perf_event *event;
unsigned long flags;
int enabled = 0;
@@ -2969,7 +3037,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
* Unclone this context if we enabled any event.
*/
if (enabled)
- unclone_ctx(ctx);
+ clone_ctx = unclone_ctx(ctx);
raw_spin_unlock(&ctx->lock);
@@ -2979,6 +3047,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
perf_event_context_sched_in(ctx, ctx->task);
out:
local_irq_restore(flags);
+
+ if (clone_ctx)
+ put_ctx(clone_ctx);
}
void perf_event_exec(void)
@@ -3073,6 +3144,7 @@ static void __perf_event_init_context(struct perf_event_context *ctx)
INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
atomic_set(&ctx->refcount, 1);
+ INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work);
}
static struct perf_event_context *
@@ -3130,7 +3202,7 @@ errout:
static struct perf_event_context *
find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
{
- struct perf_event_context *ctx;
+ struct perf_event_context *ctx, *clone_ctx = NULL;
struct perf_cpu_context *cpuctx;
unsigned long flags;
int ctxn, err;
@@ -3164,9 +3236,12 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
retry:
ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
- unclone_ctx(ctx);
+ clone_ctx = unclone_ctx(ctx);
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
+
+ if (clone_ctx)
+ put_ctx(clone_ctx);
} else {
ctx = alloc_perf_context(pmu, task);
err = -ENOMEM;
@@ -3318,16 +3393,12 @@ static void free_event(struct perf_event *event)
}
/*
- * Called when the last reference to the file is gone.
+ * Remove user event from the owner task.
*/
-static void put_event(struct perf_event *event)
+static void perf_remove_from_owner(struct perf_event *event)
{
- struct perf_event_context *ctx = event->ctx;
struct task_struct *owner;
- if (!atomic_long_dec_and_test(&event->refcount))
- return;
-
rcu_read_lock();
owner = ACCESS_ONCE(event->owner);
/*
@@ -3360,6 +3431,20 @@ static void put_event(struct perf_event *event)
mutex_unlock(&owner->perf_event_mutex);
put_task_struct(owner);
}
+}
+
+/*
+ * Called when the last reference to the file is gone.
+ */
+static void put_event(struct perf_event *event)
+{
+ struct perf_event_context *ctx = event->ctx;
+
+ if (!atomic_long_dec_and_test(&event->refcount))
+ return;
+
+ if (!is_kernel_event(event))
+ perf_remove_from_owner(event);
WARN_ON_ONCE(ctx->parent_ctx);
/*
@@ -3394,6 +3479,42 @@ static int perf_release(struct inode *inode, struct file *file)
return 0;
}
+/*
+ * Remove all orphanes events from the context.
+ */
+static void orphans_remove_work(struct work_struct *work)
+{
+ struct perf_event_context *ctx;
+ struct perf_event *event, *tmp;
+
+ ctx = container_of(work, struct perf_event_context,
+ orphans_remove.work);
+
+ mutex_lock(&ctx->mutex);
+ list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
+ struct perf_event *parent_event = event->parent;
+
+ if (!is_orphaned_child(event))
+ continue;
+
+ perf_remove_from_context(event, true);
+
+ mutex_lock(&parent_event->child_mutex);
+ list_del_init(&event->child_list);
+ mutex_unlock(&parent_event->child_mutex);
+
+ free_event(event);
+ put_event(parent_event);
+ }
+
+ raw_spin_lock_irq(&ctx->lock);
+ ctx->orphans_remove_sched = false;
+ raw_spin_unlock_irq(&ctx->lock);
+ mutex_unlock(&ctx->mutex);
+
+ put_ctx(ctx);
+}
+
u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
{
struct perf_event *child;
@@ -3491,6 +3612,19 @@ static int perf_event_read_one(struct perf_event *event,
return n * sizeof(u64);
}
+static bool is_event_hup(struct perf_event *event)
+{
+ bool no_children;
+
+ if (event->state != PERF_EVENT_STATE_EXIT)
+ return false;
+
+ mutex_lock(&event->child_mutex);
+ no_children = list_empty(&event->child_list);
+ mutex_unlock(&event->child_mutex);
+ return no_children;
+}
+
/*
* Read the performance event - simple non blocking version for now
*/
@@ -3532,7 +3666,12 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
{
struct perf_event *event = file->private_data;
struct ring_buffer *rb;
- unsigned int events = POLL_HUP;
+ unsigned int events = POLLHUP;
+
+ poll_wait(file, &event->waitq, wait);
+
+ if (is_event_hup(event))
+ return events;
/*
* Pin the event->rb by taking event->mmap_mutex; otherwise
@@ -3543,9 +3682,6 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
if (rb)
events = atomic_xchg(&rb->poll, 0);
mutex_unlock(&event->mmap_mutex);
-
- poll_wait(file, &event->waitq, wait);
-
return events;
}
@@ -5809,7 +5945,7 @@ static void swevent_hlist_release(struct swevent_htable *swhash)
if (!hlist)
return;
- rcu_assign_pointer(swhash->swevent_hlist, NULL);
+ RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
kfree_rcu(hlist, rcu_head);
}
@@ -7392,6 +7528,9 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
goto err;
}
+ /* Mark owner so we could distinguish it from user events. */
+ event->owner = EVENT_OWNER_KERNEL;
+
account_event(event);
ctx = find_get_context(event->pmu, task, cpu);
@@ -7479,6 +7618,12 @@ static void sync_child_event(struct perf_event *child_event,
mutex_unlock(&parent_event->child_mutex);
/*
+ * Make sure user/parent get notified, that we just
+ * lost one event.
+ */
+ perf_event_wakeup(parent_event);
+
+ /*
* Release the parent event, if this was the last
* reference to it.
*/
@@ -7512,13 +7657,16 @@ __perf_event_exit_task(struct perf_event *child_event,
if (child_event->parent) {
sync_child_event(child_event, child);
free_event(child_event);
+ } else {
+ child_event->state = PERF_EVENT_STATE_EXIT;
+ perf_event_wakeup(child_event);
}
}
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
{
struct perf_event *child_event, *next;
- struct perf_event_context *child_ctx, *parent_ctx;
+ struct perf_event_context *child_ctx, *clone_ctx = NULL;
unsigned long flags;
if (likely(!child->perf_event_ctxp[ctxn])) {
@@ -7545,28 +7693,16 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
child->perf_event_ctxp[ctxn] = NULL;
/*
- * In order to avoid freeing: child_ctx->parent_ctx->task
- * under perf_event_context::lock, grab another reference.
- */
- parent_ctx = child_ctx->parent_ctx;
- if (parent_ctx)
- get_ctx(parent_ctx);
-
- /*
* If this context is a clone; unclone it so it can't get
* swapped to another process while we're removing all
* the events from it.
*/
- unclone_ctx(child_ctx);
+ clone_ctx = unclone_ctx(child_ctx);
update_context_time(child_ctx);
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
- /*
- * Now that we no longer hold perf_event_context::lock, drop
- * our extra child_ctx->parent_ctx reference.
- */
- if (parent_ctx)
- put_ctx(parent_ctx);
+ if (clone_ctx)
+ put_ctx(clone_ctx);
/*
* Report the task dead after unscheduling the events so that we
@@ -7695,6 +7831,7 @@ inherit_event(struct perf_event *parent_event,
struct perf_event *group_leader,
struct perf_event_context *child_ctx)
{
+ enum perf_event_active_state parent_state = parent_event->state;
struct perf_event *child_event;
unsigned long flags;
@@ -7715,7 +7852,8 @@ inherit_event(struct perf_event *parent_event,
if (IS_ERR(child_event))
return child_event;
- if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
+ if (is_orphaned_event(parent_event) ||
+ !atomic_long_inc_not_zero(&parent_event->refcount)) {
free_event(child_event);
return NULL;
}
@@ -7727,7 +7865,7 @@ inherit_event(struct perf_event *parent_event,
* not its attr.disabled bit. We hold the parent's mutex,
* so we won't race with perf_event_{en, dis}able_family.
*/
- if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
+ if (parent_state >= PERF_EVENT_STATE_INACTIVE)
child_event->state = PERF_EVENT_STATE_INACTIVE;
else
child_event->state = PERF_EVENT_STATE_OFF;
diff --git a/kernel/exit.c b/kernel/exit.c
index 32c58f7433a3..5d30019ff953 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -115,32 +115,33 @@ static void __exit_signal(struct task_struct *tsk)
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
- /*
- * Accumulate here the counters for all threads but the
- * group leader as they die, so they can be added into
- * the process-wide totals when those are taken.
- * The group leader stays around as a zombie as long
- * as there are other threads. When it gets reaped,
- * the exit.c code will add its counts into these totals.
- * We won't ever get here for the group leader, since it
- * will have been the last reference on the signal_struct.
- */
- task_cputime(tsk, &utime, &stime);
- sig->utime += utime;
- sig->stime += stime;
- sig->gtime += task_gtime(tsk);
- sig->min_flt += tsk->min_flt;
- sig->maj_flt += tsk->maj_flt;
- sig->nvcsw += tsk->nvcsw;
- sig->nivcsw += tsk->nivcsw;
- sig->inblock += task_io_get_inblock(tsk);
- sig->oublock += task_io_get_oublock(tsk);
- task_io_accounting_add(&sig->ioac, &tsk->ioac);
- sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
}
+ /*
+ * Accumulate here the counters for all threads but the group leader
+ * as they die, so they can be added into the process-wide totals
+ * when those are taken. The group leader stays around as a zombie as
+ * long as there are other threads. When it gets reaped, the exit.c
+ * code will add its counts into these totals. We won't ever get here
+ * for the group leader, since it will have been the last reference on
+ * the signal_struct.
+ */
+ task_cputime(tsk, &utime, &stime);
+ write_seqlock(&sig->stats_lock);
+ sig->utime += utime;
+ sig->stime += stime;
+ sig->gtime += task_gtime(tsk);
+ sig->min_flt += tsk->min_flt;
+ sig->maj_flt += tsk->maj_flt;
+ sig->nvcsw += tsk->nvcsw;
+ sig->nivcsw += tsk->nivcsw;
+ sig->inblock += task_io_get_inblock(tsk);
+ sig->oublock += task_io_get_oublock(tsk);
+ task_io_accounting_add(&sig->ioac, &tsk->ioac);
+ sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
sig->nr_threads--;
__unhash_process(tsk, group_dead);
+ write_sequnlock(&sig->stats_lock);
/*
* Do this under ->siglock, we can race with another thread
@@ -667,6 +668,7 @@ void do_exit(long code)
{
struct task_struct *tsk = current;
int group_dead;
+ TASKS_RCU(int tasks_rcu_i);
profile_task_exit(tsk);
@@ -775,6 +777,7 @@ void do_exit(long code)
*/
flush_ptrace_hw_breakpoint(tsk);
+ TASKS_RCU(tasks_rcu_i = __srcu_read_lock(&tasks_rcu_exit_srcu));
exit_notify(tsk, group_dead);
proc_exit_connector(tsk);
#ifdef CONFIG_NUMA
@@ -814,6 +817,7 @@ void do_exit(long code)
if (tsk->nr_dirtied)
__this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
exit_rcu();
+ TASKS_RCU(__srcu_read_unlock(&tasks_rcu_exit_srcu, tasks_rcu_i));
/*
* The setting of TASK_RUNNING by try_to_wake_up() may be delayed
@@ -1043,6 +1047,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
spin_lock_irq(&p->real_parent->sighand->siglock);
psig = p->real_parent->signal;
sig = p->signal;
+ write_seqlock(&psig->stats_lock);
psig->cutime += tgutime + sig->cutime;
psig->cstime += tgstime + sig->cstime;
psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
@@ -1065,6 +1070,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
psig->cmaxrss = maxrss;
task_io_accounting_add(&psig->ioac, &p->ioac);
task_io_accounting_add(&psig->ioac, &sig->ioac);
+ write_sequnlock(&psig->stats_lock);
spin_unlock_irq(&p->real_parent->sighand->siglock);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 8c162d102740..9b7d746d6d62 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -294,11 +294,18 @@ int __weak arch_dup_task_struct(struct task_struct *dst,
return 0;
}
+void set_task_stack_end_magic(struct task_struct *tsk)
+{
+ unsigned long *stackend;
+
+ stackend = end_of_stack(tsk);
+ *stackend = STACK_END_MAGIC; /* for overflow detection */
+}
+
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
- unsigned long *stackend;
int node = tsk_fork_get_node(orig);
int err;
@@ -328,8 +335,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
setup_thread_stack(tsk, orig);
clear_user_return_notifier(tsk);
clear_tsk_need_resched(tsk);
- stackend = end_of_stack(tsk);
- *stackend = STACK_END_MAGIC; /* for overflow detection */
+ set_task_stack_end_magic(tsk);
#ifdef CONFIG_CC_STACKPROTECTOR
tsk->stack_canary = get_random_int();
@@ -1067,6 +1073,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->curr_target = tsk;
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
+ seqlock_init(&sig->stats_lock);
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sig->real_timer.function = it_real_fn;
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 0955b885d0dc..ec8cce259779 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -20,30 +20,20 @@
* Author: Paul E. McKenney <paulmck@us.ibm.com>
* Based on kernel/rcu/torture.c.
*/
-#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kthread.h>
-#include <linux/err.h>
#include <linux/spinlock.h>
+#include <linux/rwlock.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/atomic.h>
-#include <linux/bitops.h>
-#include <linux/completion.h>
#include <linux/moduleparam.h>
-#include <linux/percpu.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/freezer.h>
-#include <linux/cpu.h>
#include <linux/delay.h>
-#include <linux/stat.h>
#include <linux/slab.h>
-#include <linux/trace_clock.h>
-#include <asm/byteorder.h>
#include <linux/torture.h>
MODULE_LICENSE("GPL");
@@ -51,6 +41,8 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
torture_param(int, nwriters_stress, -1,
"Number of write-locking stress-test threads");
+torture_param(int, nreaders_stress, -1,
+ "Number of read-locking stress-test threads");
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0,
"Time between CPU hotplugs (s), 0=disable");
@@ -66,30 +58,28 @@ torture_param(bool, verbose, true,
static char *torture_type = "spin_lock";
module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type,
- "Type of lock to torture (spin_lock, spin_lock_irq, ...)");
-
-static atomic_t n_lock_torture_errors;
+ "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
static struct task_struct *stats_task;
static struct task_struct **writer_tasks;
+static struct task_struct **reader_tasks;
-static int nrealwriters_stress;
static bool lock_is_write_held;
+static bool lock_is_read_held;
-struct lock_writer_stress_stats {
- long n_write_lock_fail;
- long n_write_lock_acquired;
+struct lock_stress_stats {
+ long n_lock_fail;
+ long n_lock_acquired;
};
-static struct lock_writer_stress_stats *lwsa;
#if defined(MODULE)
#define LOCKTORTURE_RUNNABLE_INIT 1
#else
#define LOCKTORTURE_RUNNABLE_INIT 0
#endif
-int locktorture_runnable = LOCKTORTURE_RUNNABLE_INIT;
-module_param(locktorture_runnable, int, 0444);
-MODULE_PARM_DESC(locktorture_runnable, "Start locktorture at module init");
+int torture_runnable = LOCKTORTURE_RUNNABLE_INIT;
+module_param(torture_runnable, int, 0444);
+MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
/* Forward reference. */
static void lock_torture_cleanup(void);
@@ -102,12 +92,25 @@ struct lock_torture_ops {
int (*writelock)(void);
void (*write_delay)(struct torture_random_state *trsp);
void (*writeunlock)(void);
+ int (*readlock)(void);
+ void (*read_delay)(struct torture_random_state *trsp);
+ void (*readunlock)(void);
unsigned long flags;
const char *name;
};
-static struct lock_torture_ops *cur_ops;
-
+struct lock_torture_cxt {
+ int nrealwriters_stress;
+ int nrealreaders_stress;
+ bool debug_lock;
+ atomic_t n_lock_torture_errors;
+ struct lock_torture_ops *cur_ops;
+ struct lock_stress_stats *lwsa; /* writer statistics */
+ struct lock_stress_stats *lrsa; /* reader statistics */
+};
+static struct lock_torture_cxt cxt = { 0, 0, false,
+ ATOMIC_INIT(0),
+ NULL, NULL};
/*
* Definitions for lock torture testing.
*/
@@ -123,10 +126,10 @@ static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
/* We want a long delay occasionally to force massive contention. */
if (!(torture_random(trsp) %
- (nrealwriters_stress * 2000 * longdelay_us)))
+ (cxt.nrealwriters_stress * 2000 * longdelay_us)))
mdelay(longdelay_us);
#ifdef CONFIG_PREEMPT
- if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
+ if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
preempt_schedule(); /* Allow test to be preempted. */
#endif
}
@@ -140,6 +143,9 @@ static struct lock_torture_ops lock_busted_ops = {
.writelock = torture_lock_busted_write_lock,
.write_delay = torture_lock_busted_write_delay,
.writeunlock = torture_lock_busted_write_unlock,
+ .readlock = NULL,
+ .read_delay = NULL,
+ .readunlock = NULL,
.name = "lock_busted"
};
@@ -160,13 +166,13 @@ static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
* we want a long delay occasionally to force massive contention.
*/
if (!(torture_random(trsp) %
- (nrealwriters_stress * 2000 * longdelay_us)))
+ (cxt.nrealwriters_stress * 2000 * longdelay_us)))
mdelay(longdelay_us);
if (!(torture_random(trsp) %
- (nrealwriters_stress * 2 * shortdelay_us)))
+ (cxt.nrealwriters_stress * 2 * shortdelay_us)))
udelay(shortdelay_us);
#ifdef CONFIG_PREEMPT
- if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
+ if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
preempt_schedule(); /* Allow test to be preempted. */
#endif
}
@@ -180,39 +186,253 @@ static struct lock_torture_ops spin_lock_ops = {
.writelock = torture_spin_lock_write_lock,
.write_delay = torture_spin_lock_write_delay,
.writeunlock = torture_spin_lock_write_unlock,
+ .readlock = NULL,
+ .read_delay = NULL,
+ .readunlock = NULL,
.name = "spin_lock"
};
static int torture_spin_lock_write_lock_irq(void)
-__acquires(torture_spinlock_irq)
+__acquires(torture_spinlock)
{
unsigned long flags;
spin_lock_irqsave(&torture_spinlock, flags);
- cur_ops->flags = flags;
+ cxt.cur_ops->flags = flags;
return 0;
}
static void torture_lock_spin_write_unlock_irq(void)
__releases(torture_spinlock)
{
- spin_unlock_irqrestore(&torture_spinlock, cur_ops->flags);
+ spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
}
static struct lock_torture_ops spin_lock_irq_ops = {
.writelock = torture_spin_lock_write_lock_irq,
.write_delay = torture_spin_lock_write_delay,
.writeunlock = torture_lock_spin_write_unlock_irq,
+ .readlock = NULL,
+ .read_delay = NULL,
+ .readunlock = NULL,
.name = "spin_lock_irq"
};
+static DEFINE_RWLOCK(torture_rwlock);
+
+static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
+{
+ write_lock(&torture_rwlock);
+ return 0;
+}
+
+static void torture_rwlock_write_delay(struct torture_random_state *trsp)
+{
+ const unsigned long shortdelay_us = 2;
+ const unsigned long longdelay_ms = 100;
+
+ /* We want a short delay mostly to emulate likely code, and
+ * we want a long delay occasionally to force massive contention.
+ */
+ if (!(torture_random(trsp) %
+ (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+ mdelay(longdelay_ms);
+ else
+ udelay(shortdelay_us);
+}
+
+static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
+{
+ write_unlock(&torture_rwlock);
+}
+
+static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
+{
+ read_lock(&torture_rwlock);
+ return 0;
+}
+
+static void torture_rwlock_read_delay(struct torture_random_state *trsp)
+{
+ const unsigned long shortdelay_us = 10;
+ const unsigned long longdelay_ms = 100;
+
+ /* We want a short delay mostly to emulate likely code, and
+ * we want a long delay occasionally to force massive contention.
+ */
+ if (!(torture_random(trsp) %
+ (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
+ mdelay(longdelay_ms);
+ else
+ udelay(shortdelay_us);
+}
+
+static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
+{
+ read_unlock(&torture_rwlock);
+}
+
+static struct lock_torture_ops rw_lock_ops = {
+ .writelock = torture_rwlock_write_lock,
+ .write_delay = torture_rwlock_write_delay,
+ .writeunlock = torture_rwlock_write_unlock,
+ .readlock = torture_rwlock_read_lock,
+ .read_delay = torture_rwlock_read_delay,
+ .readunlock = torture_rwlock_read_unlock,
+ .name = "rw_lock"
+};
+
+static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&torture_rwlock, flags);
+ cxt.cur_ops->flags = flags;
+ return 0;
+}
+
+static void torture_rwlock_write_unlock_irq(void)
+__releases(torture_rwlock)
+{
+ write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
+}
+
+static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
+{
+ unsigned long flags;
+
+ read_lock_irqsave(&torture_rwlock, flags);
+ cxt.cur_ops->flags = flags;
+ return 0;
+}
+
+static void torture_rwlock_read_unlock_irq(void)
+__releases(torture_rwlock)
+{
+ write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
+}
+
+static struct lock_torture_ops rw_lock_irq_ops = {
+ .writelock = torture_rwlock_write_lock_irq,
+ .write_delay = torture_rwlock_write_delay,
+ .writeunlock = torture_rwlock_write_unlock_irq,
+ .readlock = torture_rwlock_read_lock_irq,
+ .read_delay = torture_rwlock_read_delay,
+ .readunlock = torture_rwlock_read_unlock_irq,
+ .name = "rw_lock_irq"
+};
+
+static DEFINE_MUTEX(torture_mutex);
+
+static int torture_mutex_lock(void) __acquires(torture_mutex)
+{
+ mutex_lock(&torture_mutex);
+ return 0;
+}
+
+static void torture_mutex_delay(struct torture_random_state *trsp)
+{
+ const unsigned long longdelay_ms = 100;
+
+ /* We want a long delay occasionally to force massive contention. */
+ if (!(torture_random(trsp) %
+ (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+ mdelay(longdelay_ms * 5);
+ else
+ mdelay(longdelay_ms / 5);
+#ifdef CONFIG_PREEMPT
+ if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
+ preempt_schedule(); /* Allow test to be preempted. */
+#endif
+}
+
+static void torture_mutex_unlock(void) __releases(torture_mutex)
+{
+ mutex_unlock(&torture_mutex);
+}
+
+static struct lock_torture_ops mutex_lock_ops = {
+ .writelock = torture_mutex_lock,
+ .write_delay = torture_mutex_delay,
+ .writeunlock = torture_mutex_unlock,
+ .readlock = NULL,
+ .read_delay = NULL,
+ .readunlock = NULL,
+ .name = "mutex_lock"
+};
+
+static DECLARE_RWSEM(torture_rwsem);
+static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
+{
+ down_write(&torture_rwsem);
+ return 0;
+}
+
+static void torture_rwsem_write_delay(struct torture_random_state *trsp)
+{
+ const unsigned long longdelay_ms = 100;
+
+ /* We want a long delay occasionally to force massive contention. */
+ if (!(torture_random(trsp) %
+ (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+ mdelay(longdelay_ms * 10);
+ else
+ mdelay(longdelay_ms / 10);
+#ifdef CONFIG_PREEMPT
+ if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
+ preempt_schedule(); /* Allow test to be preempted. */
+#endif
+}
+
+static void torture_rwsem_up_write(void) __releases(torture_rwsem)
+{
+ up_write(&torture_rwsem);
+}
+
+static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
+{
+ down_read(&torture_rwsem);
+ return 0;
+}
+
+static void torture_rwsem_read_delay(struct torture_random_state *trsp)
+{
+ const unsigned long longdelay_ms = 100;
+
+ /* We want a long delay occasionally to force massive contention. */
+ if (!(torture_random(trsp) %
+ (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+ mdelay(longdelay_ms * 2);
+ else
+ mdelay(longdelay_ms / 2);
+#ifdef CONFIG_PREEMPT
+ if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
+ preempt_schedule(); /* Allow test to be preempted. */
+#endif
+}
+
+static void torture_rwsem_up_read(void) __releases(torture_rwsem)
+{
+ up_read(&torture_rwsem);
+}
+
+static struct lock_torture_ops rwsem_lock_ops = {
+ .writelock = torture_rwsem_down_write,
+ .write_delay = torture_rwsem_write_delay,
+ .writeunlock = torture_rwsem_up_write,
+ .readlock = torture_rwsem_down_read,
+ .read_delay = torture_rwsem_read_delay,
+ .readunlock = torture_rwsem_up_read,
+ .name = "rwsem_lock"
+};
+
/*
* Lock torture writer kthread. Repeatedly acquires and releases
* the lock, checking for duplicate acquisitions.
*/
static int lock_torture_writer(void *arg)
{
- struct lock_writer_stress_stats *lwsp = arg;
+ struct lock_stress_stats *lwsp = arg;
static DEFINE_TORTURE_RANDOM(rand);
VERBOSE_TOROUT_STRING("lock_torture_writer task started");
@@ -221,14 +441,19 @@ static int lock_torture_writer(void *arg)
do {
if ((torture_random(&rand) & 0xfffff) == 0)
schedule_timeout_uninterruptible(1);
- cur_ops->writelock();
+
+ cxt.cur_ops->writelock();
if (WARN_ON_ONCE(lock_is_write_held))
- lwsp->n_write_lock_fail++;
+ lwsp->n_lock_fail++;
lock_is_write_held = 1;
- lwsp->n_write_lock_acquired++;
- cur_ops->write_delay(&rand);
+ if (WARN_ON_ONCE(lock_is_read_held))
+ lwsp->n_lock_fail++; /* rare, but... */
+
+ lwsp->n_lock_acquired++;
+ cxt.cur_ops->write_delay(&rand);
lock_is_write_held = 0;
- cur_ops->writeunlock();
+ cxt.cur_ops->writeunlock();
+
stutter_wait("lock_torture_writer");
} while (!torture_must_stop());
torture_kthread_stopping("lock_torture_writer");
@@ -236,32 +461,66 @@ static int lock_torture_writer(void *arg)
}
/*
+ * Lock torture reader kthread. Repeatedly acquires and releases
+ * the reader lock.
+ */
+static int lock_torture_reader(void *arg)
+{
+ struct lock_stress_stats *lrsp = arg;
+ static DEFINE_TORTURE_RANDOM(rand);
+
+ VERBOSE_TOROUT_STRING("lock_torture_reader task started");
+ set_user_nice(current, MAX_NICE);
+
+ do {
+ if ((torture_random(&rand) & 0xfffff) == 0)
+ schedule_timeout_uninterruptible(1);
+
+ cxt.cur_ops->readlock();
+ lock_is_read_held = 1;
+ if (WARN_ON_ONCE(lock_is_write_held))
+ lrsp->n_lock_fail++; /* rare, but... */
+
+ lrsp->n_lock_acquired++;
+ cxt.cur_ops->read_delay(&rand);
+ lock_is_read_held = 0;
+ cxt.cur_ops->readunlock();
+
+ stutter_wait("lock_torture_reader");
+ } while (!torture_must_stop());
+ torture_kthread_stopping("lock_torture_reader");
+ return 0;
+}
+
+/*
* Create an lock-torture-statistics message in the specified buffer.
*/
-static void lock_torture_printk(char *page)
+static void __torture_print_stats(char *page,
+ struct lock_stress_stats *statp, bool write)
{
bool fail = 0;
- int i;
+ int i, n_stress;
long max = 0;
- long min = lwsa[0].n_write_lock_acquired;
+ long min = statp[0].n_lock_acquired;
long long sum = 0;
- for (i = 0; i < nrealwriters_stress; i++) {
- if (lwsa[i].n_write_lock_fail)
+ n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
+ for (i = 0; i < n_stress; i++) {
+ if (statp[i].n_lock_fail)
fail = true;
- sum += lwsa[i].n_write_lock_acquired;
- if (max < lwsa[i].n_write_lock_fail)
- max = lwsa[i].n_write_lock_fail;
- if (min > lwsa[i].n_write_lock_fail)
- min = lwsa[i].n_write_lock_fail;
+ sum += statp[i].n_lock_acquired;
+ if (max < statp[i].n_lock_fail)
+ max = statp[i].n_lock_fail;
+ if (min > statp[i].n_lock_fail)
+ min = statp[i].n_lock_fail;
}
- page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
page += sprintf(page,
- "Writes: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
+ "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
+ write ? "Writes" : "Reads ",
sum, max, min, max / 2 > min ? "???" : "",
fail, fail ? "!!!" : "");
if (fail)
- atomic_inc(&n_lock_torture_errors);
+ atomic_inc(&cxt.n_lock_torture_errors);
}
/*
@@ -274,18 +533,35 @@ static void lock_torture_printk(char *page)
*/
static void lock_torture_stats_print(void)
{
- int size = nrealwriters_stress * 200 + 8192;
+ int size = cxt.nrealwriters_stress * 200 + 8192;
char *buf;
+ if (cxt.cur_ops->readlock)
+ size += cxt.nrealreaders_stress * 200 + 8192;
+
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
pr_err("lock_torture_stats_print: Out of memory, need: %d",
size);
return;
}
- lock_torture_printk(buf);
+
+ __torture_print_stats(buf, cxt.lwsa, true);
pr_alert("%s", buf);
kfree(buf);
+
+ if (cxt.cur_ops->readlock) {
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ pr_err("lock_torture_stats_print: Out of memory, need: %d",
+ size);
+ return;
+ }
+
+ __torture_print_stats(buf, cxt.lrsa, false);
+ pr_alert("%s", buf);
+ kfree(buf);
+ }
}
/*
@@ -312,9 +588,10 @@ lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
const char *tag)
{
pr_alert("%s" TORTURE_FLAG
- "--- %s: nwriters_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
- torture_type, tag, nrealwriters_stress, stat_interval, verbose,
- shuffle_interval, stutter, shutdown_secs,
+ "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
+ torture_type, tag, cxt.debug_lock ? " [debug]": "",
+ cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
+ verbose, shuffle_interval, stutter, shutdown_secs,
onoff_interval, onoff_holdoff);
}
@@ -322,46 +599,59 @@ static void lock_torture_cleanup(void)
{
int i;
- if (torture_cleanup())
+ if (torture_cleanup_begin())
return;
if (writer_tasks) {
- for (i = 0; i < nrealwriters_stress; i++)
+ for (i = 0; i < cxt.nrealwriters_stress; i++)
torture_stop_kthread(lock_torture_writer,
writer_tasks[i]);
kfree(writer_tasks);
writer_tasks = NULL;
}
+ if (reader_tasks) {
+ for (i = 0; i < cxt.nrealreaders_stress; i++)
+ torture_stop_kthread(lock_torture_reader,
+ reader_tasks[i]);
+ kfree(reader_tasks);
+ reader_tasks = NULL;
+ }
+
torture_stop_kthread(lock_torture_stats, stats_task);
lock_torture_stats_print(); /* -After- the stats thread is stopped! */
- if (atomic_read(&n_lock_torture_errors))
- lock_torture_print_module_parms(cur_ops,
+ if (atomic_read(&cxt.n_lock_torture_errors))
+ lock_torture_print_module_parms(cxt.cur_ops,
"End of test: FAILURE");
else if (torture_onoff_failures())
- lock_torture_print_module_parms(cur_ops,
+ lock_torture_print_module_parms(cxt.cur_ops,
"End of test: LOCK_HOTPLUG");
else
- lock_torture_print_module_parms(cur_ops,
+ lock_torture_print_module_parms(cxt.cur_ops,
"End of test: SUCCESS");
+ torture_cleanup_end();
}
static int __init lock_torture_init(void)
{
- int i;
+ int i, j;
int firsterr = 0;
static struct lock_torture_ops *torture_ops[] = {
- &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops,
+ &lock_busted_ops,
+ &spin_lock_ops, &spin_lock_irq_ops,
+ &rw_lock_ops, &rw_lock_irq_ops,
+ &mutex_lock_ops,
+ &rwsem_lock_ops,
};
- if (!torture_init_begin(torture_type, verbose, &locktorture_runnable))
+ if (!torture_init_begin(torture_type, verbose, &torture_runnable))
return -EBUSY;
/* Process args and tell the world that the torturer is on the job. */
for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
- cur_ops = torture_ops[i];
- if (strcmp(torture_type, cur_ops->name) == 0)
+ cxt.cur_ops = torture_ops[i];
+ if (strcmp(torture_type, cxt.cur_ops->name) == 0)
break;
}
if (i == ARRAY_SIZE(torture_ops)) {
@@ -374,31 +664,69 @@ static int __init lock_torture_init(void)
torture_init_end();
return -EINVAL;
}
- if (cur_ops->init)
- cur_ops->init(); /* no "goto unwind" prior to this point!!! */
+ if (cxt.cur_ops->init)
+ cxt.cur_ops->init(); /* no "goto unwind" prior to this point!!! */
if (nwriters_stress >= 0)
- nrealwriters_stress = nwriters_stress;
+ cxt.nrealwriters_stress = nwriters_stress;
else
- nrealwriters_stress = 2 * num_online_cpus();
- lock_torture_print_module_parms(cur_ops, "Start of test");
+ cxt.nrealwriters_stress = 2 * num_online_cpus();
+
+#ifdef CONFIG_DEBUG_MUTEXES
+ if (strncmp(torture_type, "mutex", 5) == 0)
+ cxt.debug_lock = true;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ if ((strncmp(torture_type, "spin", 4) == 0) ||
+ (strncmp(torture_type, "rw_lock", 7) == 0))
+ cxt.debug_lock = true;
+#endif
/* Initialize the statistics so that each run gets its own numbers. */
lock_is_write_held = 0;
- lwsa = kmalloc(sizeof(*lwsa) * nrealwriters_stress, GFP_KERNEL);
- if (lwsa == NULL) {
- VERBOSE_TOROUT_STRING("lwsa: Out of memory");
+ cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
+ if (cxt.lwsa == NULL) {
+ VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
firsterr = -ENOMEM;
goto unwind;
}
- for (i = 0; i < nrealwriters_stress; i++) {
- lwsa[i].n_write_lock_fail = 0;
- lwsa[i].n_write_lock_acquired = 0;
+ for (i = 0; i < cxt.nrealwriters_stress; i++) {
+ cxt.lwsa[i].n_lock_fail = 0;
+ cxt.lwsa[i].n_lock_acquired = 0;
}
- /* Start up the kthreads. */
+ if (cxt.cur_ops->readlock) {
+ if (nreaders_stress >= 0)
+ cxt.nrealreaders_stress = nreaders_stress;
+ else {
+ /*
+ * By default distribute evenly the number of
+ * readers and writers. We still run the same number
+ * of threads as the writer-only locks default.
+ */
+ if (nwriters_stress < 0) /* user doesn't care */
+ cxt.nrealwriters_stress = num_online_cpus();
+ cxt.nrealreaders_stress = cxt.nrealwriters_stress;
+ }
+
+ lock_is_read_held = 0;
+ cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
+ if (cxt.lrsa == NULL) {
+ VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
+ firsterr = -ENOMEM;
+ kfree(cxt.lwsa);
+ goto unwind;
+ }
+
+ for (i = 0; i < cxt.nrealreaders_stress; i++) {
+ cxt.lrsa[i].n_lock_fail = 0;
+ cxt.lrsa[i].n_lock_acquired = 0;
+ }
+ }
+ lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
+ /* Prepare torture context. */
if (onoff_interval > 0) {
firsterr = torture_onoff_init(onoff_holdoff * HZ,
onoff_interval * HZ);
@@ -422,18 +750,51 @@ static int __init lock_torture_init(void)
goto unwind;
}
- writer_tasks = kzalloc(nrealwriters_stress * sizeof(writer_tasks[0]),
+ writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
GFP_KERNEL);
if (writer_tasks == NULL) {
VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
firsterr = -ENOMEM;
goto unwind;
}
- for (i = 0; i < nrealwriters_stress; i++) {
- firsterr = torture_create_kthread(lock_torture_writer, &lwsa[i],
+
+ if (cxt.cur_ops->readlock) {
+ reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]),
+ GFP_KERNEL);
+ if (reader_tasks == NULL) {
+ VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
+ }
+
+ /*
+ * Create the kthreads and start torturing (oh, those poor little locks).
+ *
+ * TODO: Note that we interleave writers with readers, giving writers a
+ * slight advantage, by creating its kthread first. This can be modified
+ * for very specific needs, or even let the user choose the policy, if
+ * ever wanted.
+ */
+ for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
+ j < cxt.nrealreaders_stress; i++, j++) {
+ if (i >= cxt.nrealwriters_stress)
+ goto create_reader;
+
+ /* Create writer. */
+ firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
writer_tasks[i]);
if (firsterr)
goto unwind;
+
+ create_reader:
+ if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
+ continue;
+ /* Create reader. */
+ firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
+ reader_tasks[j]);
+ if (firsterr)
+ goto unwind;
}
if (stat_interval > 0) {
firsterr = torture_create_kthread(lock_torture_stats, NULL,
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index 23e89c5930e9..4d60986fcbee 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -56,9 +56,6 @@ do { \
* If the lock has already been acquired, then this will proceed to spin
* on this node->locked until the previous lock holder sets the node->locked
* in mcs_spin_unlock().
- *
- * We don't inline mcs_spin_lock() so that perf can correctly account for the
- * time spent in this lock function.
*/
static inline
void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index ae712b25e492..dadbf88c22c4 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -15,7 +15,7 @@
* by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
* and Sven Dietrich.
*
- * Also see Documentation/mutex-design.txt.
+ * Also see Documentation/locking/mutex-design.txt.
*/
#include <linux/mutex.h>
#include <linux/ww_mutex.h>
@@ -106,6 +106,92 @@ void __sched mutex_lock(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock);
#endif
+static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
+ struct ww_acquire_ctx *ww_ctx)
+{
+#ifdef CONFIG_DEBUG_MUTEXES
+ /*
+ * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
+ * but released with a normal mutex_unlock in this call.
+ *
+ * This should never happen, always use ww_mutex_unlock.
+ */
+ DEBUG_LOCKS_WARN_ON(ww->ctx);
+
+ /*
+ * Not quite done after calling ww_acquire_done() ?
+ */
+ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
+
+ if (ww_ctx->contending_lock) {
+ /*
+ * After -EDEADLK you tried to
+ * acquire a different ww_mutex? Bad!
+ */
+ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
+
+ /*
+ * You called ww_mutex_lock after receiving -EDEADLK,
+ * but 'forgot' to unlock everything else first?
+ */
+ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
+ ww_ctx->contending_lock = NULL;
+ }
+
+ /*
+ * Naughty, using a different class will lead to undefined behavior!
+ */
+ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
+#endif
+ ww_ctx->acquired++;
+}
+
+/*
+ * after acquiring lock with fastpath or when we lost out in contested
+ * slowpath, set ctx and wake up any waiters so they can recheck.
+ *
+ * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
+ * as the fastpath and opportunistic spinning are disabled in that case.
+ */
+static __always_inline void
+ww_mutex_set_context_fastpath(struct ww_mutex *lock,
+ struct ww_acquire_ctx *ctx)
+{
+ unsigned long flags;
+ struct mutex_waiter *cur;
+
+ ww_mutex_lock_acquired(lock, ctx);
+
+ lock->ctx = ctx;
+
+ /*
+ * The lock->ctx update should be visible on all cores before
+ * the atomic read is done, otherwise contended waiters might be
+ * missed. The contended waiters will either see ww_ctx == NULL
+ * and keep spinning, or it will acquire wait_lock, add itself
+ * to waiter list and sleep.
+ */
+ smp_mb(); /* ^^^ */
+
+ /*
+ * Check if lock is contended, if not there is nobody to wake up
+ */
+ if (likely(atomic_read(&lock->base.count) == 0))
+ return;
+
+ /*
+ * Uh oh, we raced in fastpath, wake up everyone in this case,
+ * so they can see the new lock->ctx.
+ */
+ spin_lock_mutex(&lock->base.wait_lock, flags);
+ list_for_each_entry(cur, &lock->base.wait_list, list) {
+ debug_mutex_wake_waiter(&lock->base, cur);
+ wake_up_process(cur->task);
+ }
+ spin_unlock_mutex(&lock->base.wait_lock, flags);
+}
+
+
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/*
* In order to avoid a stampede of mutex spinners from acquiring the mutex
@@ -180,6 +266,129 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
*/
return retval;
}
+
+/*
+ * Atomically try to take the lock when it is available
+ */
+static inline bool mutex_try_to_acquire(struct mutex *lock)
+{
+ return !mutex_is_locked(lock) &&
+ (atomic_cmpxchg(&lock->count, 1, 0) == 1);
+}
+
+/*
+ * Optimistic spinning.
+ *
+ * We try to spin for acquisition when we find that the lock owner
+ * is currently running on a (different) CPU and while we don't
+ * need to reschedule. The rationale is that if the lock owner is
+ * running, it is likely to release the lock soon.
+ *
+ * Since this needs the lock owner, and this mutex implementation
+ * doesn't track the owner atomically in the lock field, we need to
+ * track it non-atomically.
+ *
+ * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
+ * to serialize everything.
+ *
+ * The mutex spinners are queued up using MCS lock so that only one
+ * spinner can compete for the mutex. However, if mutex spinning isn't
+ * going to happen, there is no point in going through the lock/unlock
+ * overhead.
+ *
+ * Returns true when the lock was taken, otherwise false, indicating
+ * that we need to jump to the slowpath and sleep.
+ */
+static bool mutex_optimistic_spin(struct mutex *lock,
+ struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
+{
+ struct task_struct *task = current;
+
+ if (!mutex_can_spin_on_owner(lock))
+ goto done;
+
+ if (!osq_lock(&lock->osq))
+ goto done;
+
+ while (true) {
+ struct task_struct *owner;
+
+ if (use_ww_ctx && ww_ctx->acquired > 0) {
+ struct ww_mutex *ww;
+
+ ww = container_of(lock, struct ww_mutex, base);
+ /*
+ * If ww->ctx is set the contents are undefined, only
+ * by acquiring wait_lock there is a guarantee that
+ * they are not invalid when reading.
+ *
+ * As such, when deadlock detection needs to be
+ * performed the optimistic spinning cannot be done.
+ */
+ if (ACCESS_ONCE(ww->ctx))
+ break;
+ }
+
+ /*
+ * If there's an owner, wait for it to either
+ * release the lock or go to sleep.
+ */
+ owner = ACCESS_ONCE(lock->owner);
+ if (owner && !mutex_spin_on_owner(lock, owner))
+ break;
+
+ /* Try to acquire the mutex if it is unlocked. */
+ if (mutex_try_to_acquire(lock)) {
+ lock_acquired(&lock->dep_map, ip);
+
+ if (use_ww_ctx) {
+ struct ww_mutex *ww;
+ ww = container_of(lock, struct ww_mutex, base);
+
+ ww_mutex_set_context_fastpath(ww, ww_ctx);
+ }
+
+ mutex_set_owner(lock);
+ osq_unlock(&lock->osq);
+ return true;
+ }
+
+ /*
+ * When there's no owner, we might have preempted between the
+ * owner acquiring the lock and setting the owner field. If
+ * we're an RT task that will live-lock because we won't let
+ * the owner complete.
+ */
+ if (!owner && (need_resched() || rt_task(task)))
+ break;
+
+ /*
+ * The cpu_relax() call is a compiler barrier which forces
+ * everything in this loop to be re-loaded. We don't need
+ * memory barriers as we'll eventually observe the right
+ * values at the cost of a few extra spins.
+ */
+ cpu_relax_lowlatency();
+ }
+
+ osq_unlock(&lock->osq);
+done:
+ /*
+ * If we fell out of the spin path because of need_resched(),
+ * reschedule now, before we try-lock the mutex. This avoids getting
+ * scheduled out right after we obtained the mutex.
+ */
+ if (need_resched())
+ schedule_preempt_disabled();
+
+ return false;
+}
+#else
+static bool mutex_optimistic_spin(struct mutex *lock,
+ struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
+{
+ return false;
+}
#endif
__visible __used noinline
@@ -277,91 +486,6 @@ __mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
return 0;
}
-static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
- struct ww_acquire_ctx *ww_ctx)
-{
-#ifdef CONFIG_DEBUG_MUTEXES
- /*
- * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
- * but released with a normal mutex_unlock in this call.
- *
- * This should never happen, always use ww_mutex_unlock.
- */
- DEBUG_LOCKS_WARN_ON(ww->ctx);
-
- /*
- * Not quite done after calling ww_acquire_done() ?
- */
- DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
-
- if (ww_ctx->contending_lock) {
- /*
- * After -EDEADLK you tried to
- * acquire a different ww_mutex? Bad!
- */
- DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
-
- /*
- * You called ww_mutex_lock after receiving -EDEADLK,
- * but 'forgot' to unlock everything else first?
- */
- DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
- ww_ctx->contending_lock = NULL;
- }
-
- /*
- * Naughty, using a different class will lead to undefined behavior!
- */
- DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
-#endif
- ww_ctx->acquired++;
-}
-
-/*
- * after acquiring lock with fastpath or when we lost out in contested
- * slowpath, set ctx and wake up any waiters so they can recheck.
- *
- * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
- * as the fastpath and opportunistic spinning are disabled in that case.
- */
-static __always_inline void
-ww_mutex_set_context_fastpath(struct ww_mutex *lock,
- struct ww_acquire_ctx *ctx)
-{
- unsigned long flags;
- struct mutex_waiter *cur;
-
- ww_mutex_lock_acquired(lock, ctx);
-
- lock->ctx = ctx;
-
- /*
- * The lock->ctx update should be visible on all cores before
- * the atomic read is done, otherwise contended waiters might be
- * missed. The contended waiters will either see ww_ctx == NULL
- * and keep spinning, or it will acquire wait_lock, add itself
- * to waiter list and sleep.
- */
- smp_mb(); /* ^^^ */
-
- /*
- * Check if lock is contended, if not there is nobody to wake up
- */
- if (likely(atomic_read(&lock->base.count) == 0))
- return;
-
- /*
- * Uh oh, we raced in fastpath, wake up everyone in this case,
- * so they can see the new lock->ctx.
- */
- spin_lock_mutex(&lock->base.wait_lock, flags);
- list_for_each_entry(cur, &lock->base.wait_list, list) {
- debug_mutex_wake_waiter(&lock->base, cur);
- wake_up_process(cur->task);
- }
- spin_unlock_mutex(&lock->base.wait_lock, flags);
-}
-
/*
* Lock a mutex (possibly interruptible), slowpath:
*/
@@ -378,104 +502,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
preempt_disable();
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
-#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- /*
- * Optimistic spinning.
- *
- * We try to spin for acquisition when we find that the lock owner
- * is currently running on a (different) CPU and while we don't
- * need to reschedule. The rationale is that if the lock owner is
- * running, it is likely to release the lock soon.
- *
- * Since this needs the lock owner, and this mutex implementation
- * doesn't track the owner atomically in the lock field, we need to
- * track it non-atomically.
- *
- * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
- * to serialize everything.
- *
- * The mutex spinners are queued up using MCS lock so that only one
- * spinner can compete for the mutex. However, if mutex spinning isn't
- * going to happen, there is no point in going through the lock/unlock
- * overhead.
- */
- if (!mutex_can_spin_on_owner(lock))
- goto slowpath;
-
- if (!osq_lock(&lock->osq))
- goto slowpath;
-
- for (;;) {
- struct task_struct *owner;
-
- if (use_ww_ctx && ww_ctx->acquired > 0) {
- struct ww_mutex *ww;
-
- ww = container_of(lock, struct ww_mutex, base);
- /*
- * If ww->ctx is set the contents are undefined, only
- * by acquiring wait_lock there is a guarantee that
- * they are not invalid when reading.
- *
- * As such, when deadlock detection needs to be
- * performed the optimistic spinning cannot be done.
- */
- if (ACCESS_ONCE(ww->ctx))
- break;
- }
-
- /*
- * If there's an owner, wait for it to either
- * release the lock or go to sleep.
- */
- owner = ACCESS_ONCE(lock->owner);
- if (owner && !mutex_spin_on_owner(lock, owner))
- break;
-
- /* Try to acquire the mutex if it is unlocked. */
- if (!mutex_is_locked(lock) &&
- (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
- lock_acquired(&lock->dep_map, ip);
- if (use_ww_ctx) {
- struct ww_mutex *ww;
- ww = container_of(lock, struct ww_mutex, base);
-
- ww_mutex_set_context_fastpath(ww, ww_ctx);
- }
-
- mutex_set_owner(lock);
- osq_unlock(&lock->osq);
- preempt_enable();
- return 0;
- }
-
- /*
- * When there's no owner, we might have preempted between the
- * owner acquiring the lock and setting the owner field. If
- * we're an RT task that will live-lock because we won't let
- * the owner complete.
- */
- if (!owner && (need_resched() || rt_task(task)))
- break;
-
- /*
- * The cpu_relax() call is a compiler barrier which forces
- * everything in this loop to be re-loaded. We don't need
- * memory barriers as we'll eventually observe the right
- * values at the cost of a few extra spins.
- */
- cpu_relax_lowlatency();
+ if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
+ /* got the lock, yay! */
+ preempt_enable();
+ return 0;
}
- osq_unlock(&lock->osq);
-slowpath:
- /*
- * If we fell out of the spin path because of need_resched(),
- * reschedule now, before we try-lock the mutex. This avoids getting
- * scheduled out right after we obtained the mutex.
- */
- if (need_resched())
- schedule_preempt_disabled();
-#endif
+
spin_lock_mutex(&lock->wait_lock, flags);
/*
@@ -679,15 +711,21 @@ EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
* Release the lock, slowpath:
*/
static inline void
-__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
+__mutex_unlock_common_slowpath(struct mutex *lock, int nested)
{
- struct mutex *lock = container_of(lock_count, struct mutex, count);
unsigned long flags;
/*
- * some architectures leave the lock unlocked in the fastpath failure
+ * As a performance measurement, release the lock before doing other
+ * wakeup related duties to follow. This allows other tasks to acquire
+ * the lock sooner, while still handling cleanups in past unlock calls.
+ * This can be done as we do not enforce strict equivalence between the
+ * mutex counter and wait_list.
+ *
+ *
+ * Some architectures leave the lock unlocked in the fastpath failure
* case, others need to leave it locked. In the later case we have to
- * unlock it here
+ * unlock it here - as the lock counter is currently 0 or negative.
*/
if (__mutex_slowpath_needs_to_unlock())
atomic_set(&lock->count, 1);
@@ -716,7 +754,9 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
__visible void
__mutex_unlock_slowpath(atomic_t *lock_count)
{
- __mutex_unlock_common_slowpath(lock_count, 1);
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+ __mutex_unlock_common_slowpath(lock, 1);
}
#ifndef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
index 4115fbf83b12..5cda397607f2 100644
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -16,7 +16,7 @@
#define mutex_remove_waiter(lock, waiter, ti) \
__list_del((waiter)->list.prev, (waiter)->list.next)
-#ifdef CONFIG_SMP
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
static inline void mutex_set_owner(struct mutex *lock)
{
lock->owner = current;
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index a0ea2a141b3b..7c98873a3077 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -8,7 +8,7 @@
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
* Copyright (C) 2006 Esben Nielsen
*
- * See Documentation/rt-mutex-design.txt for details.
+ * See Documentation/locking/rt-mutex-design.txt for details.
*/
#include <linux/spinlock.h>
#include <linux/export.h>
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index d6203faf2eb1..7628c3fc37ca 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -246,19 +246,22 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
return sem;
}
+EXPORT_SYMBOL(rwsem_down_read_failed);
static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
{
- if (!(count & RWSEM_ACTIVE_MASK)) {
- /* try acquiring the write lock */
- if (sem->count == RWSEM_WAITING_BIAS &&
- cmpxchg(&sem->count, RWSEM_WAITING_BIAS,
- RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
- if (!list_is_singular(&sem->wait_list))
- rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
- return true;
- }
+ /*
+ * Try acquiring the write lock. Check count first in order
+ * to reduce unnecessary expensive cmpxchg() operations.
+ */
+ if (count == RWSEM_WAITING_BIAS &&
+ cmpxchg(&sem->count, RWSEM_WAITING_BIAS,
+ RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
+ if (!list_is_singular(&sem->wait_list))
+ rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
+ return true;
}
+
return false;
}
@@ -465,6 +468,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
return sem;
}
+EXPORT_SYMBOL(rwsem_down_write_failed);
/*
* handle waking up a waiter on the semaphore
@@ -485,6 +489,7 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
return sem;
}
+EXPORT_SYMBOL(rwsem_wake);
/*
* downgrade a write lock into a read lock
@@ -506,8 +511,4 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
return sem;
}
-
-EXPORT_SYMBOL(rwsem_down_read_failed);
-EXPORT_SYMBOL(rwsem_down_write_failed);
-EXPORT_SYMBOL(rwsem_wake);
EXPORT_SYMBOL(rwsem_downgrade_wake);
diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c
index 6815171a4fff..b8120abe594b 100644
--- a/kernel/locking/semaphore.c
+++ b/kernel/locking/semaphore.c
@@ -36,7 +36,7 @@
static noinline void __down(struct semaphore *sem);
static noinline int __down_interruptible(struct semaphore *sem);
static noinline int __down_killable(struct semaphore *sem);
-static noinline int __down_timeout(struct semaphore *sem, long jiffies);
+static noinline int __down_timeout(struct semaphore *sem, long timeout);
static noinline void __up(struct semaphore *sem);
/**
@@ -145,14 +145,14 @@ EXPORT_SYMBOL(down_trylock);
/**
* down_timeout - acquire the semaphore within a specified time
* @sem: the semaphore to be acquired
- * @jiffies: how long to wait before failing
+ * @timeout: how long to wait before failing
*
* Attempts to acquire the semaphore. If no more tasks are allowed to
* acquire the semaphore, calling this function will put the task to sleep.
* If the semaphore is not released within the specified number of jiffies,
* this function returns -ETIME. It returns 0 if the semaphore was acquired.
*/
-int down_timeout(struct semaphore *sem, long jiffies)
+int down_timeout(struct semaphore *sem, long timeout)
{
unsigned long flags;
int result = 0;
@@ -161,7 +161,7 @@ int down_timeout(struct semaphore *sem, long jiffies)
if (likely(sem->count > 0))
sem->count--;
else
- result = __down_timeout(sem, jiffies);
+ result = __down_timeout(sem, timeout);
raw_spin_unlock_irqrestore(&sem->lock, flags);
return result;
@@ -248,9 +248,9 @@ static noinline int __sched __down_killable(struct semaphore *sem)
return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
}
-static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
+static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
{
- return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
+ return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
}
static noinline void __sched __up(struct semaphore *sem)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 1ce770687ea8..7a6e69441f75 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -519,14 +519,13 @@ struct devkmsg_user {
char buf[8192];
};
-static ssize_t devkmsg_writev(struct kiocb *iocb, const struct iovec *iv,
- unsigned long count, loff_t pos)
+static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
{
char *buf, *line;
int i;
int level = default_message_loglevel;
int facility = 1; /* LOG_USER */
- size_t len = iov_length(iv, count);
+ size_t len = iocb->ki_nbytes;
ssize_t ret = len;
if (len > LOG_LINE_MAX)
@@ -535,13 +534,10 @@ static ssize_t devkmsg_writev(struct kiocb *iocb, const struct iovec *iv,
if (buf == NULL)
return -ENOMEM;
- line = buf;
- for (i = 0; i < count; i++) {
- if (copy_from_user(line, iv[i].iov_base, iv[i].iov_len)) {
- ret = -EFAULT;
- goto out;
- }
- line += iv[i].iov_len;
+ buf[len] = '\0';
+ if (copy_from_iter(buf, len, from) != len) {
+ kfree(buf);
+ return -EFAULT;
}
/*
@@ -567,10 +563,8 @@ static ssize_t devkmsg_writev(struct kiocb *iocb, const struct iovec *iv,
line = endp;
}
}
- line[len] = '\0';
printk_emit(facility, level, NULL, 0, "%s", line);
-out:
kfree(buf);
return ret;
}
@@ -802,7 +796,7 @@ static int devkmsg_release(struct inode *inode, struct file *file)
const struct file_operations kmsg_fops = {
.open = devkmsg_open,
.read = devkmsg_read,
- .aio_write = devkmsg_writev,
+ .write_iter = devkmsg_write,
.llseek = devkmsg_llseek,
.poll = devkmsg_poll,
.release = devkmsg_release,
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 948a7693748e..240fa9094f83 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -49,11 +49,19 @@
#include <linux/trace_clock.h>
#include <asm/byteorder.h>
#include <linux/torture.h>
+#include <linux/vmalloc.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
+torture_param(int, cbflood_inter_holdoff, HZ,
+ "Holdoff between floods (jiffies)");
+torture_param(int, cbflood_intra_holdoff, 1,
+ "Holdoff between bursts (jiffies)");
+torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
+torture_param(int, cbflood_n_per_burst, 20000,
+ "# callbacks per burst in flood");
torture_param(int, fqs_duration, 0,
"Duration of fqs bursts (us), 0 to disable");
torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
@@ -96,10 +104,12 @@ module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
static int nrealreaders;
+static int ncbflooders;
static struct task_struct *writer_task;
static struct task_struct **fakewriter_tasks;
static struct task_struct **reader_tasks;
static struct task_struct *stats_task;
+static struct task_struct **cbflood_task;
static struct task_struct *fqs_task;
static struct task_struct *boost_tasks[NR_CPUS];
static struct task_struct *stall_task;
@@ -138,6 +148,7 @@ static long n_rcu_torture_boosts;
static long n_rcu_torture_timers;
static long n_barrier_attempts;
static long n_barrier_successes;
+static atomic_long_t n_cbfloods;
static struct list_head rcu_torture_removed;
static int rcu_torture_writer_state;
@@ -157,9 +168,9 @@ static int rcu_torture_writer_state;
#else
#define RCUTORTURE_RUNNABLE_INIT 0
#endif
-int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
-module_param(rcutorture_runnable, int, 0444);
-MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");
+static int torture_runnable = RCUTORTURE_RUNNABLE_INIT;
+module_param(torture_runnable, int, 0444);
+MODULE_PARM_DESC(torture_runnable, "Start rcutorture at boot");
#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
#define rcu_can_boost() 1
@@ -182,7 +193,7 @@ static u64 notrace rcu_trace_clock_local(void)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
static unsigned long boost_starttime; /* jiffies of next boost test start. */
-DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
+static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
/* and boost task create/destroy. */
static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
static bool barrier_phase; /* Test phase. */
@@ -242,7 +253,7 @@ struct rcu_torture_ops {
void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
void (*cb_barrier)(void);
void (*fqs)(void);
- void (*stats)(char *page);
+ void (*stats)(void);
int irq_capable;
int can_boost;
const char *name;
@@ -525,21 +536,21 @@ static void srcu_torture_barrier(void)
srcu_barrier(&srcu_ctl);
}
-static void srcu_torture_stats(char *page)
+static void srcu_torture_stats(void)
{
int cpu;
int idx = srcu_ctl.completed & 0x1;
- page += sprintf(page, "%s%s per-CPU(idx=%d):",
- torture_type, TORTURE_FLAG, idx);
+ pr_alert("%s%s per-CPU(idx=%d):",
+ torture_type, TORTURE_FLAG, idx);
for_each_possible_cpu(cpu) {
long c0, c1;
c0 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx];
c1 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx];
- page += sprintf(page, " %d(%ld,%ld)", cpu, c0, c1);
+ pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
}
- sprintf(page, "\n");
+ pr_cont("\n");
}
static void srcu_torture_synchronize_expedited(void)
@@ -601,6 +612,52 @@ static struct rcu_torture_ops sched_ops = {
.name = "sched"
};
+#ifdef CONFIG_TASKS_RCU
+
+/*
+ * Definitions for RCU-tasks torture testing.
+ */
+
+static int tasks_torture_read_lock(void)
+{
+ return 0;
+}
+
+static void tasks_torture_read_unlock(int idx)
+{
+}
+
+static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
+{
+ call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
+}
+
+static struct rcu_torture_ops tasks_ops = {
+ .ttype = RCU_TASKS_FLAVOR,
+ .init = rcu_sync_torture_init,
+ .readlock = tasks_torture_read_lock,
+ .read_delay = rcu_read_delay, /* just reuse rcu's version. */
+ .readunlock = tasks_torture_read_unlock,
+ .completed = rcu_no_completed,
+ .deferred_free = rcu_tasks_torture_deferred_free,
+ .sync = synchronize_rcu_tasks,
+ .exp_sync = synchronize_rcu_tasks,
+ .call = call_rcu_tasks,
+ .cb_barrier = rcu_barrier_tasks,
+ .fqs = NULL,
+ .stats = NULL,
+ .irq_capable = 1,
+ .name = "tasks"
+};
+
+#define RCUTORTURE_TASKS_OPS &tasks_ops,
+
+#else /* #ifdef CONFIG_TASKS_RCU */
+
+#define RCUTORTURE_TASKS_OPS
+
+#endif /* #else #ifdef CONFIG_TASKS_RCU */
+
/*
* RCU torture priority-boost testing. Runs one real-time thread per
* CPU for moderate bursts, repeatedly registering RCU callbacks and
@@ -667,7 +724,7 @@ static int rcu_torture_boost(void *arg)
}
call_rcu_time = jiffies;
}
- cond_resched();
+ cond_resched_rcu_qs();
stutter_wait("rcu_torture_boost");
if (torture_must_stop())
goto checkwait;
@@ -707,6 +764,58 @@ checkwait: stutter_wait("rcu_torture_boost");
return 0;
}
+static void rcu_torture_cbflood_cb(struct rcu_head *rhp)
+{
+}
+
+/*
+ * RCU torture callback-flood kthread. Repeatedly induces bursts of calls
+ * to call_rcu() or analogous, increasing the probability of occurrence
+ * of callback-overflow corner cases.
+ */
+static int
+rcu_torture_cbflood(void *arg)
+{
+ int err = 1;
+ int i;
+ int j;
+ struct rcu_head *rhp;
+
+ if (cbflood_n_per_burst > 0 &&
+ cbflood_inter_holdoff > 0 &&
+ cbflood_intra_holdoff > 0 &&
+ cur_ops->call &&
+ cur_ops->cb_barrier) {
+ rhp = vmalloc(sizeof(*rhp) *
+ cbflood_n_burst * cbflood_n_per_burst);
+ err = !rhp;
+ }
+ if (err) {
+ VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM");
+ while (!torture_must_stop())
+ schedule_timeout_interruptible(HZ);
+ return 0;
+ }
+ VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
+ do {
+ schedule_timeout_interruptible(cbflood_inter_holdoff);
+ atomic_long_inc(&n_cbfloods);
+ WARN_ON(signal_pending(current));
+ for (i = 0; i < cbflood_n_burst; i++) {
+ for (j = 0; j < cbflood_n_per_burst; j++) {
+ cur_ops->call(&rhp[i * cbflood_n_per_burst + j],
+ rcu_torture_cbflood_cb);
+ }
+ schedule_timeout_interruptible(cbflood_intra_holdoff);
+ WARN_ON(signal_pending(current));
+ }
+ cur_ops->cb_barrier();
+ stutter_wait("rcu_torture_cbflood");
+ } while (!torture_must_stop());
+ torture_kthread_stopping("rcu_torture_cbflood");
+ return 0;
+}
+
/*
* RCU torture force-quiescent-state kthread. Repeatedly induces
* bursts of calls to force_quiescent_state(), increasing the probability
@@ -1019,7 +1128,7 @@ rcu_torture_reader(void *arg)
__this_cpu_inc(rcu_torture_batch[completed]);
preempt_enable();
cur_ops->readunlock(idx);
- cond_resched();
+ cond_resched_rcu_qs();
stutter_wait("rcu_torture_reader");
} while (!torture_must_stop());
if (irqreader && cur_ops->irq_capable) {
@@ -1031,10 +1140,15 @@ rcu_torture_reader(void *arg)
}
/*
- * Create an RCU-torture statistics message in the specified buffer.
+ * Print torture statistics. Caller must ensure that there is only
+ * one call to this function at a given time!!! This is normally
+ * accomplished by relying on the module system to only have one copy
+ * of the module loaded, and then by giving the rcu_torture_stats
+ * kthread full control (or the init/cleanup functions when rcu_torture_stats
+ * thread is not running).
*/
static void
-rcu_torture_printk(char *page)
+rcu_torture_stats_print(void)
{
int cpu;
int i;
@@ -1052,55 +1166,61 @@ rcu_torture_printk(char *page)
if (pipesummary[i] != 0)
break;
}
- page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
- page += sprintf(page,
- "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
- rcu_torture_current,
- rcu_torture_current_version,
- list_empty(&rcu_torture_freelist),
- atomic_read(&n_rcu_torture_alloc),
- atomic_read(&n_rcu_torture_alloc_fail),
- atomic_read(&n_rcu_torture_free));
- page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
- atomic_read(&n_rcu_torture_mberror),
- n_rcu_torture_boost_ktrerror,
- n_rcu_torture_boost_rterror);
- page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
- n_rcu_torture_boost_failure,
- n_rcu_torture_boosts,
- n_rcu_torture_timers);
- page = torture_onoff_stats(page);
- page += sprintf(page, "barrier: %ld/%ld:%ld",
- n_barrier_successes,
- n_barrier_attempts,
- n_rcu_torture_barrier_error);
- page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
+
+ pr_alert("%s%s ", torture_type, TORTURE_FLAG);
+ pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
+ rcu_torture_current,
+ rcu_torture_current_version,
+ list_empty(&rcu_torture_freelist),
+ atomic_read(&n_rcu_torture_alloc),
+ atomic_read(&n_rcu_torture_alloc_fail),
+ atomic_read(&n_rcu_torture_free));
+ pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
+ atomic_read(&n_rcu_torture_mberror),
+ n_rcu_torture_boost_ktrerror,
+ n_rcu_torture_boost_rterror);
+ pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
+ n_rcu_torture_boost_failure,
+ n_rcu_torture_boosts,
+ n_rcu_torture_timers);
+ torture_onoff_stats();
+ pr_cont("barrier: %ld/%ld:%ld ",
+ n_barrier_successes,
+ n_barrier_attempts,
+ n_rcu_torture_barrier_error);
+ pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
+
+ pr_alert("%s%s ", torture_type, TORTURE_FLAG);
if (atomic_read(&n_rcu_torture_mberror) != 0 ||
n_rcu_torture_barrier_error != 0 ||
n_rcu_torture_boost_ktrerror != 0 ||
n_rcu_torture_boost_rterror != 0 ||
n_rcu_torture_boost_failure != 0 ||
i > 1) {
- page += sprintf(page, "!!! ");
+ pr_cont("%s", "!!! ");
atomic_inc(&n_rcu_torture_error);
WARN_ON_ONCE(1);
}
- page += sprintf(page, "Reader Pipe: ");
+ pr_cont("Reader Pipe: ");
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
- page += sprintf(page, " %ld", pipesummary[i]);
- page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
- page += sprintf(page, "Reader Batch: ");
+ pr_cont(" %ld", pipesummary[i]);
+ pr_cont("\n");
+
+ pr_alert("%s%s ", torture_type, TORTURE_FLAG);
+ pr_cont("Reader Batch: ");
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
- page += sprintf(page, " %ld", batchsummary[i]);
- page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
- page += sprintf(page, "Free-Block Circulation: ");
+ pr_cont(" %ld", batchsummary[i]);
+ pr_cont("\n");
+
+ pr_alert("%s%s ", torture_type, TORTURE_FLAG);
+ pr_cont("Free-Block Circulation: ");
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
- page += sprintf(page, " %d",
- atomic_read(&rcu_torture_wcount[i]));
+ pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
}
- page += sprintf(page, "\n");
+ pr_cont("\n");
+
if (cur_ops->stats)
- cur_ops->stats(page);
+ cur_ops->stats();
if (rtcv_snap == rcu_torture_current_version &&
rcu_torture_current != NULL) {
int __maybe_unused flags;
@@ -1109,10 +1229,9 @@ rcu_torture_printk(char *page)
rcutorture_get_gp_data(cur_ops->ttype,
&flags, &gpnum, &completed);
- page += sprintf(page,
- "??? Writer stall state %d g%lu c%lu f%#x\n",
- rcu_torture_writer_state,
- gpnum, completed, flags);
+ pr_alert("??? Writer stall state %d g%lu c%lu f%#x\n",
+ rcu_torture_writer_state,
+ gpnum, completed, flags);
show_rcu_gp_kthreads();
rcutorture_trace_dump();
}
@@ -1120,30 +1239,6 @@ rcu_torture_printk(char *page)
}
/*
- * Print torture statistics. Caller must ensure that there is only
- * one call to this function at a given time!!! This is normally
- * accomplished by relying on the module system to only have one copy
- * of the module loaded, and then by giving the rcu_torture_stats
- * kthread full control (or the init/cleanup functions when rcu_torture_stats
- * thread is not running).
- */
-static void
-rcu_torture_stats_print(void)
-{
- int size = nr_cpu_ids * 200 + 8192;
- char *buf;
-
- buf = kmalloc(size, GFP_KERNEL);
- if (!buf) {
- pr_err("rcu-torture: Out of memory, need: %d", size);
- return;
- }
- rcu_torture_printk(buf);
- pr_alert("%s", buf);
- kfree(buf);
-}
-
-/*
* Periodically prints torture statistics, if periodic statistics printing
* was specified via the stat_interval module parameter.
*/
@@ -1295,7 +1390,8 @@ static int rcu_torture_barrier_cbs(void *arg)
if (atomic_dec_and_test(&barrier_cbs_count))
wake_up(&barrier_wq);
} while (!torture_must_stop());
- cur_ops->cb_barrier();
+ if (cur_ops->cb_barrier != NULL)
+ cur_ops->cb_barrier();
destroy_rcu_head_on_stack(&rcu);
torture_kthread_stopping("rcu_torture_barrier_cbs");
return 0;
@@ -1418,7 +1514,7 @@ rcu_torture_cleanup(void)
int i;
rcutorture_record_test_transition();
- if (torture_cleanup()) {
+ if (torture_cleanup_begin()) {
if (cur_ops->cb_barrier != NULL)
cur_ops->cb_barrier();
return;
@@ -1447,6 +1543,8 @@ rcu_torture_cleanup(void)
torture_stop_kthread(rcu_torture_stats, stats_task);
torture_stop_kthread(rcu_torture_fqs, fqs_task);
+ for (i = 0; i < ncbflooders; i++)
+ torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
if ((test_boost == 1 && cur_ops->can_boost) ||
test_boost == 2) {
unregister_cpu_notifier(&rcutorture_cpu_nb);
@@ -1468,6 +1566,7 @@ rcu_torture_cleanup(void)
"End of test: RCU_HOTPLUG");
else
rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
+ torture_cleanup_end();
}
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
@@ -1534,9 +1633,10 @@ rcu_torture_init(void)
int firsterr = 0;
static struct rcu_torture_ops *torture_ops[] = {
&rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &sched_ops,
+ RCUTORTURE_TASKS_OPS
};
- if (!torture_init_begin(torture_type, verbose, &rcutorture_runnable))
+ if (!torture_init_begin(torture_type, verbose, &torture_runnable))
return -EBUSY;
/* Process args and tell the world that the torturer is on the job. */
@@ -1693,6 +1793,24 @@ rcu_torture_init(void)
goto unwind;
if (object_debug)
rcu_test_debug_objects();
+ if (cbflood_n_burst > 0) {
+ /* Create the cbflood threads */
+ ncbflooders = (num_online_cpus() + 3) / 4;
+ cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task),
+ GFP_KERNEL);
+ if (!cbflood_task) {
+ VERBOSE_TOROUT_ERRSTRING("out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
+ for (i = 0; i < ncbflooders; i++) {
+ firsterr = torture_create_kthread(rcu_torture_cbflood,
+ NULL,
+ cbflood_task[i]);
+ if (firsterr)
+ goto unwind;
+ }
+ }
rcutorture_record_test_transition();
torture_init_end();
return 0;
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index d9efcc13008c..c0623fc47125 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -51,7 +51,7 @@ static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
#include "tiny_plugin.h"
-/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
+/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */
static void rcu_idle_enter_common(long long newval)
{
if (newval) {
@@ -62,7 +62,7 @@ static void rcu_idle_enter_common(long long newval)
}
RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
rcu_dynticks_nesting, newval));
- if (!is_idle_task(current)) {
+ if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
@@ -72,7 +72,7 @@ static void rcu_idle_enter_common(long long newval)
current->pid, current->comm,
idle->pid, idle->comm); /* must be idle task! */
}
- rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
+ rcu_sched_qs(); /* implies rcu_bh_inc() */
barrier();
rcu_dynticks_nesting = newval;
}
@@ -114,7 +114,7 @@ void rcu_irq_exit(void)
}
EXPORT_SYMBOL_GPL(rcu_irq_exit);
-/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
+/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */
static void rcu_idle_exit_common(long long oldval)
{
if (oldval) {
@@ -123,7 +123,7 @@ static void rcu_idle_exit_common(long long oldval)
return;
}
RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
- if (!is_idle_task(current)) {
+ if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
@@ -217,7 +217,7 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
* are at it, given that any rcu quiescent state is also an rcu_bh
* quiescent state. Use "+" instead of "||" to defeat short circuiting.
*/
-void rcu_sched_qs(int cpu)
+void rcu_sched_qs(void)
{
unsigned long flags;
@@ -231,7 +231,7 @@ void rcu_sched_qs(int cpu)
/*
* Record an rcu_bh quiescent state.
*/
-void rcu_bh_qs(int cpu)
+void rcu_bh_qs(void)
{
unsigned long flags;
@@ -251,9 +251,11 @@ void rcu_check_callbacks(int cpu, int user)
{
RCU_TRACE(check_cpu_stalls());
if (user || rcu_is_cpu_rrupt_from_idle())
- rcu_sched_qs(cpu);
+ rcu_sched_qs();
else if (!in_softirq())
- rcu_bh_qs(cpu);
+ rcu_bh_qs();
+ if (user)
+ rcu_note_voluntary_context_switch(current);
}
/*
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 1b70cb6fbe3c..133e47223095 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -79,9 +79,18 @@ static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
* the tracing userspace tools to be able to decipher the string
* address to the matching string.
*/
-#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
+#ifdef CONFIG_TRACING
+# define DEFINE_RCU_TPS(sname) \
static char sname##_varname[] = #sname; \
-static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname; \
+static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
+# define RCU_STATE_NAME(sname) sname##_varname
+#else
+# define DEFINE_RCU_TPS(sname)
+# define RCU_STATE_NAME(sname) __stringify(sname)
+#endif
+
+#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
+DEFINE_RCU_TPS(sname) \
struct rcu_state sname##_state = { \
.level = { &sname##_state.node[0] }, \
.call = cr, \
@@ -93,7 +102,7 @@ struct rcu_state sname##_state = { \
.orphan_donetail = &sname##_state.orphan_donelist, \
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
.onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
- .name = sname##_varname, \
+ .name = RCU_STATE_NAME(sname), \
.abbr = sabbr, \
}; \
DEFINE_PER_CPU(struct rcu_data, sname##_data)
@@ -188,22 +197,24 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
* one since the start of the grace period, this just sets a flag.
* The caller must have disabled preemption.
*/
-void rcu_sched_qs(int cpu)
+void rcu_sched_qs(void)
{
- struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
-
- if (rdp->passed_quiesce == 0)
- trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs"));
- rdp->passed_quiesce = 1;
+ if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) {
+ trace_rcu_grace_period(TPS("rcu_sched"),
+ __this_cpu_read(rcu_sched_data.gpnum),
+ TPS("cpuqs"));
+ __this_cpu_write(rcu_sched_data.passed_quiesce, 1);
+ }
}
-void rcu_bh_qs(int cpu)
+void rcu_bh_qs(void)
{
- struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
-
- if (rdp->passed_quiesce == 0)
- trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs"));
- rdp->passed_quiesce = 1;
+ if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
+ trace_rcu_grace_period(TPS("rcu_bh"),
+ __this_cpu_read(rcu_bh_data.gpnum),
+ TPS("cpuqs"));
+ __this_cpu_write(rcu_bh_data.passed_quiesce, 1);
+ }
}
static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
@@ -278,7 +289,7 @@ static void rcu_momentary_dyntick_idle(void)
void rcu_note_context_switch(int cpu)
{
trace_rcu_utilization(TPS("Start context switch"));
- rcu_sched_qs(cpu);
+ rcu_sched_qs();
rcu_preempt_note_context_switch(cpu);
if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
rcu_momentary_dyntick_idle();
@@ -526,6 +537,7 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
atomic_inc(&rdtp->dynticks);
smp_mb__after_atomic(); /* Force ordering with next sojourn. */
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
+ rcu_dynticks_task_enter();
/*
* It is illegal to enter an extended quiescent state while
@@ -642,6 +654,7 @@ void rcu_irq_exit(void)
static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
int user)
{
+ rcu_dynticks_task_exit();
smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
atomic_inc(&rdtp->dynticks);
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
@@ -819,7 +832,7 @@ bool notrace __rcu_is_watching(void)
*/
bool notrace rcu_is_watching(void)
{
- int ret;
+ bool ret;
preempt_disable();
ret = __rcu_is_watching();
@@ -1647,7 +1660,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
rnp->level, rnp->grplo,
rnp->grphi, rnp->qsmask);
raw_spin_unlock_irq(&rnp->lock);
- cond_resched();
+ cond_resched_rcu_qs();
}
mutex_unlock(&rsp->onoff_mutex);
@@ -1668,7 +1681,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
if (fqs_state == RCU_SAVE_DYNTICK) {
/* Collect dyntick-idle snapshots. */
if (is_sysidle_rcu_state(rsp)) {
- isidle = 1;
+ isidle = true;
maxj = jiffies - ULONG_MAX / 4;
}
force_qs_rnp(rsp, dyntick_save_progress_counter,
@@ -1677,14 +1690,15 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
fqs_state = RCU_FORCE_QS;
} else {
/* Handle dyntick-idle and offline CPUs. */
- isidle = 0;
+ isidle = false;
force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
}
/* Clear flag to prevent immediate re-entry. */
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
raw_spin_lock_irq(&rnp->lock);
smp_mb__after_unlock_lock();
- ACCESS_ONCE(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
+ ACCESS_ONCE(rsp->gp_flags) =
+ ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
raw_spin_unlock_irq(&rnp->lock);
}
return fqs_state;
@@ -1736,7 +1750,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
/* smp_mb() provided by prior unlock-lock pair. */
nocb += rcu_future_gp_cleanup(rsp, rnp);
raw_spin_unlock_irq(&rnp->lock);
- cond_resched();
+ cond_resched_rcu_qs();
}
rnp = rcu_get_root(rsp);
raw_spin_lock_irq(&rnp->lock);
@@ -1785,8 +1799,8 @@ static int __noreturn rcu_gp_kthread(void *arg)
/* Locking provides needed memory barrier. */
if (rcu_gp_init(rsp))
break;
- cond_resched();
- flush_signals(current);
+ cond_resched_rcu_qs();
+ WARN_ON(signal_pending(current));
trace_rcu_grace_period(rsp->name,
ACCESS_ONCE(rsp->gpnum),
TPS("reqwaitsig"));
@@ -1828,11 +1842,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
trace_rcu_grace_period(rsp->name,
ACCESS_ONCE(rsp->gpnum),
TPS("fqsend"));
- cond_resched();
+ cond_resched_rcu_qs();
} else {
/* Deal with stray signal. */
- cond_resched();
- flush_signals(current);
+ cond_resched_rcu_qs();
+ WARN_ON(signal_pending(current));
trace_rcu_grace_period(rsp->name,
ACCESS_ONCE(rsp->gpnum),
TPS("fqswaitsig"));
@@ -1928,7 +1942,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
{
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
- wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
+ rcu_gp_kthread_wake(rsp);
}
/*
@@ -2210,8 +2224,6 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
/* Adjust any no-longer-needed kthreads. */
rcu_boost_kthread_setaffinity(rnp, -1);
- /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */
-
/* Exclude any attempts to start a new grace period. */
mutex_lock(&rsp->onoff_mutex);
raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
@@ -2393,8 +2405,8 @@ void rcu_check_callbacks(int cpu, int user)
* at least not while the corresponding CPU is online.
*/
- rcu_sched_qs(cpu);
- rcu_bh_qs(cpu);
+ rcu_sched_qs();
+ rcu_bh_qs();
} else if (!in_softirq()) {
@@ -2405,11 +2417,13 @@ void rcu_check_callbacks(int cpu, int user)
* critical section, so note it.
*/
- rcu_bh_qs(cpu);
+ rcu_bh_qs();
}
rcu_preempt_check_callbacks(cpu);
if (rcu_pending(cpu))
invoke_rcu_core();
+ if (user)
+ rcu_note_voluntary_context_switch(current);
trace_rcu_utilization(TPS("End scheduler-tick"));
}
@@ -2432,7 +2446,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
struct rcu_node *rnp;
rcu_for_each_leaf_node(rsp, rnp) {
- cond_resched();
+ cond_resched_rcu_qs();
mask = 0;
raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
@@ -2449,7 +2463,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
if ((rnp->qsmask & bit) != 0) {
if ((rnp->qsmaskinit & bit) != 0)
- *isidle = 0;
+ *isidle = false;
if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
mask |= bit;
}
@@ -2505,9 +2519,10 @@ static void force_quiescent_state(struct rcu_state *rsp)
raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
return; /* Someone beat us to it. */
}
- ACCESS_ONCE(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
+ ACCESS_ONCE(rsp->gp_flags) =
+ ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
- wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
+ rcu_gp_kthread_wake(rsp);
}
/*
@@ -2925,11 +2940,6 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
* restructure your code to batch your updates, and then use a single
* synchronize_sched() instead.
*
- * Note that it is illegal to call this function while holding any lock
- * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
- * to call this function from a CPU-hotplug notifier. Failing to observe
- * these restriction will result in deadlock.
- *
* This implementation can be thought of as an application of ticket
* locking to RCU, with sync_sched_expedited_started and
* sync_sched_expedited_done taking on the roles of the halves
@@ -2979,7 +2989,12 @@ void synchronize_sched_expedited(void)
*/
snap = atomic_long_inc_return(&rsp->expedited_start);
firstsnap = snap;
- get_online_cpus();
+ if (!try_get_online_cpus()) {
+ /* CPU hotplug operation in flight, fall back to normal GP. */
+ wait_rcu_gp(call_rcu_sched);
+ atomic_long_inc(&rsp->expedited_normal);
+ return;
+ }
WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
/*
@@ -3026,7 +3041,12 @@ void synchronize_sched_expedited(void)
* and they started after our first try, so their grace
* period works for us.
*/
- get_online_cpus();
+ if (!try_get_online_cpus()) {
+ /* CPU hotplug operation in flight, use normal GP. */
+ wait_rcu_gp(call_rcu_sched);
+ atomic_long_inc(&rsp->expedited_normal);
+ return;
+ }
snap = atomic_long_read(&rsp->expedited_start);
smp_mb(); /* ensure read is before try_stop_cpus(). */
}
@@ -3442,6 +3462,7 @@ static int rcu_cpu_notify(struct notifier_block *self,
case CPU_UP_PREPARE_FROZEN:
rcu_prepare_cpu(cpu);
rcu_prepare_kthreads(cpu);
+ rcu_spawn_all_nocb_kthreads(cpu);
break;
case CPU_ONLINE:
case CPU_DOWN_FAILED:
@@ -3489,7 +3510,7 @@ static int rcu_pm_notify(struct notifier_block *self,
}
/*
- * Spawn the kthread that handles this RCU flavor's grace periods.
+ * Spawn the kthreads that handle each RCU flavor's grace periods.
*/
static int __init rcu_spawn_gp_kthread(void)
{
@@ -3498,6 +3519,7 @@ static int __init rcu_spawn_gp_kthread(void)
struct rcu_state *rsp;
struct task_struct *t;
+ rcu_scheduler_fully_active = 1;
for_each_rcu_flavor(rsp) {
t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
BUG_ON(IS_ERR(t));
@@ -3505,8 +3527,9 @@ static int __init rcu_spawn_gp_kthread(void)
raw_spin_lock_irqsave(&rnp->lock, flags);
rsp->gp_kthread = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- rcu_spawn_nocb_kthreads(rsp);
}
+ rcu_spawn_nocb_kthreads();
+ rcu_spawn_boost_kthreads();
return 0;
}
early_initcall(rcu_spawn_gp_kthread);
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 6a86eb7bac45..d03764652d91 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -350,7 +350,7 @@ struct rcu_data {
int nocb_p_count_lazy; /* (approximate). */
wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
struct task_struct *nocb_kthread;
- bool nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
+ int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
/* The following fields are used by the leader, hence own cacheline. */
struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
@@ -383,6 +383,11 @@ struct rcu_data {
#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
+/* Values for nocb_defer_wakeup field in struct rcu_data. */
+#define RCU_NOGP_WAKE_NOT 0
+#define RCU_NOGP_WAKE 1
+#define RCU_NOGP_WAKE_FORCE 2
+
#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
/* For jiffies_till_first_fqs and */
/* and jiffies_till_next_fqs. */
@@ -572,6 +577,7 @@ static void rcu_preempt_do_callbacks(void);
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp);
#endif /* #ifdef CONFIG_RCU_BOOST */
+static void __init rcu_spawn_boost_kthreads(void);
static void rcu_prepare_kthreads(int cpu);
static void rcu_cleanup_after_idle(int cpu);
static void rcu_prepare_for_idle(int cpu);
@@ -589,10 +595,14 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
struct rcu_data *rdp,
unsigned long flags);
-static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
+static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
-static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
+static void rcu_spawn_all_nocb_kthreads(int cpu);
+static void __init rcu_spawn_nocb_kthreads(void);
+#ifdef CONFIG_RCU_NOCB_CPU
+static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
+#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
static bool init_nocb_callback_list(struct rcu_data *rdp);
static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
@@ -605,6 +615,8 @@ static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
static void rcu_bind_gp_kthread(void);
static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
+static void rcu_dynticks_task_enter(void);
+static void rcu_dynticks_task_exit(void);
#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index a7997e272564..387dd4599344 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -85,33 +85,6 @@ static void __init rcu_bootup_announce_oddness(void)
pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
if (nr_cpu_ids != NR_CPUS)
pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
-#ifdef CONFIG_RCU_NOCB_CPU
-#ifndef CONFIG_RCU_NOCB_CPU_NONE
- if (!have_rcu_nocb_mask) {
- zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
- have_rcu_nocb_mask = true;
- }
-#ifdef CONFIG_RCU_NOCB_CPU_ZERO
- pr_info("\tOffload RCU callbacks from CPU 0\n");
- cpumask_set_cpu(0, rcu_nocb_mask);
-#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
-#ifdef CONFIG_RCU_NOCB_CPU_ALL
- pr_info("\tOffload RCU callbacks from all CPUs\n");
- cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
-#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
-#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
- if (have_rcu_nocb_mask) {
- if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
- pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
- cpumask_and(rcu_nocb_mask, cpu_possible_mask,
- rcu_nocb_mask);
- }
- cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
- pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
- if (rcu_nocb_poll)
- pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
- }
-#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
}
#ifdef CONFIG_TREE_PREEMPT_RCU
@@ -134,7 +107,7 @@ static void __init rcu_bootup_announce(void)
* Return the number of RCU-preempt batches processed thus far
* for debug and statistics.
*/
-long rcu_batches_completed_preempt(void)
+static long rcu_batches_completed_preempt(void)
{
return rcu_preempt_state.completed;
}
@@ -155,18 +128,19 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
* not in a quiescent state. There might be any number of tasks blocked
* while in an RCU read-side critical section.
*
- * Unlike the other rcu_*_qs() functions, callers to this function
- * must disable irqs in order to protect the assignment to
- * ->rcu_read_unlock_special.
- */
-static void rcu_preempt_qs(int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
-
- if (rdp->passed_quiesce == 0)
- trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs"));
- rdp->passed_quiesce = 1;
- current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
+ * As with the other rcu_*_qs() functions, callers to this function
+ * must disable preemption.
+ */
+static void rcu_preempt_qs(void)
+{
+ if (!__this_cpu_read(rcu_preempt_data.passed_quiesce)) {
+ trace_rcu_grace_period(TPS("rcu_preempt"),
+ __this_cpu_read(rcu_preempt_data.gpnum),
+ TPS("cpuqs"));
+ __this_cpu_write(rcu_preempt_data.passed_quiesce, 1);
+ barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
+ current->rcu_read_unlock_special.b.need_qs = false;
+ }
}
/*
@@ -190,14 +164,14 @@ static void rcu_preempt_note_context_switch(int cpu)
struct rcu_node *rnp;
if (t->rcu_read_lock_nesting > 0 &&
- (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
+ !t->rcu_read_unlock_special.b.blocked) {
/* Possibly blocking in an RCU read-side critical section. */
rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
rnp = rdp->mynode;
raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
- t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
+ t->rcu_read_unlock_special.b.blocked = true;
t->rcu_blocked_node = rnp;
/*
@@ -239,7 +213,7 @@ static void rcu_preempt_note_context_switch(int cpu)
: rnp->gpnum + 1);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
} else if (t->rcu_read_lock_nesting < 0 &&
- t->rcu_read_unlock_special) {
+ t->rcu_read_unlock_special.s) {
/*
* Complete exit from RCU read-side critical section on
@@ -257,9 +231,7 @@ static void rcu_preempt_note_context_switch(int cpu)
* grace period, then the fact that the task has been enqueued
* means that we continue to block the current grace period.
*/
- local_irq_save(flags);
- rcu_preempt_qs(cpu);
- local_irq_restore(flags);
+ rcu_preempt_qs();
}
/*
@@ -340,7 +312,7 @@ void rcu_read_unlock_special(struct task_struct *t)
bool drop_boost_mutex = false;
#endif /* #ifdef CONFIG_RCU_BOOST */
struct rcu_node *rnp;
- int special;
+ union rcu_special special;
/* NMI handlers cannot block and cannot safely manipulate state. */
if (in_nmi())
@@ -350,12 +322,13 @@ void rcu_read_unlock_special(struct task_struct *t)
/*
* If RCU core is waiting for this CPU to exit critical section,
- * let it know that we have done so.
+ * let it know that we have done so. Because irqs are disabled,
+ * t->rcu_read_unlock_special cannot change.
*/
special = t->rcu_read_unlock_special;
- if (special & RCU_READ_UNLOCK_NEED_QS) {
- rcu_preempt_qs(smp_processor_id());
- if (!t->rcu_read_unlock_special) {
+ if (special.b.need_qs) {
+ rcu_preempt_qs();
+ if (!t->rcu_read_unlock_special.s) {
local_irq_restore(flags);
return;
}
@@ -368,8 +341,8 @@ void rcu_read_unlock_special(struct task_struct *t)
}
/* Clean up if blocked during RCU read-side critical section. */
- if (special & RCU_READ_UNLOCK_BLOCKED) {
- t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
+ if (special.b.blocked) {
+ t->rcu_read_unlock_special.b.blocked = false;
/*
* Remove this task from the list it blocked on. The
@@ -653,12 +626,13 @@ static void rcu_preempt_check_callbacks(int cpu)
struct task_struct *t = current;
if (t->rcu_read_lock_nesting == 0) {
- rcu_preempt_qs(cpu);
+ rcu_preempt_qs();
return;
}
if (t->rcu_read_lock_nesting > 0 &&
- per_cpu(rcu_preempt_data, cpu).qs_pending)
- t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
+ per_cpu(rcu_preempt_data, cpu).qs_pending &&
+ !per_cpu(rcu_preempt_data, cpu).passed_quiesce)
+ t->rcu_read_unlock_special.b.need_qs = true;
}
#ifdef CONFIG_RCU_BOOST
@@ -819,11 +793,6 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
* In fact, if you are using synchronize_rcu_expedited() in a loop,
* please restructure your code to batch your updates, and then Use a
* single synchronize_rcu() instead.
- *
- * Note that it is illegal to call this function while holding any lock
- * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
- * to call this function from a CPU-hotplug notifier. Failing to observe
- * these restriction will result in deadlock.
*/
void synchronize_rcu_expedited(void)
{
@@ -845,7 +814,11 @@ void synchronize_rcu_expedited(void)
* being boosted. This simplifies the process of moving tasks
* from leaf to root rcu_node structures.
*/
- get_online_cpus();
+ if (!try_get_online_cpus()) {
+ /* CPU-hotplug operation in flight, fall back to normal GP. */
+ wait_rcu_gp(call_rcu);
+ return;
+ }
/*
* Acquire lock, falling back to synchronize_rcu() if too many
@@ -897,7 +870,8 @@ void synchronize_rcu_expedited(void)
/* Clean up and exit. */
smp_mb(); /* ensure expedited GP seen before counter increment. */
- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
+ ACCESS_ONCE(sync_rcu_preempt_exp_count) =
+ sync_rcu_preempt_exp_count + 1;
unlock_mb_ret:
mutex_unlock(&sync_rcu_preempt_exp_mutex);
mb_ret:
@@ -941,7 +915,7 @@ void exit_rcu(void)
return;
t->rcu_read_lock_nesting = 1;
barrier();
- t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED;
+ t->rcu_read_unlock_special.b.blocked = true;
__rcu_read_unlock();
}
@@ -1462,14 +1436,13 @@ static struct smp_hotplug_thread rcu_cpu_thread_spec = {
};
/*
- * Spawn all kthreads -- called as soon as the scheduler is running.
+ * Spawn boost kthreads -- called as soon as the scheduler is running.
*/
-static int __init rcu_spawn_kthreads(void)
+static void __init rcu_spawn_boost_kthreads(void)
{
struct rcu_node *rnp;
int cpu;
- rcu_scheduler_fully_active = 1;
for_each_possible_cpu(cpu)
per_cpu(rcu_cpu_has_work, cpu) = 0;
BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
@@ -1479,9 +1452,7 @@ static int __init rcu_spawn_kthreads(void)
rcu_for_each_leaf_node(rcu_state_p, rnp)
(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
}
- return 0;
}
-early_initcall(rcu_spawn_kthreads);
static void rcu_prepare_kthreads(int cpu)
{
@@ -1519,12 +1490,9 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
{
}
-static int __init rcu_scheduler_really_started(void)
+static void __init rcu_spawn_boost_kthreads(void)
{
- rcu_scheduler_fully_active = 1;
- return 0;
}
-early_initcall(rcu_scheduler_really_started);
static void rcu_prepare_kthreads(int cpu)
{
@@ -1625,7 +1593,7 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
/* Exit early if we advanced recently. */
if (jiffies == rdtp->last_advance_all)
- return 0;
+ return false;
rdtp->last_advance_all = jiffies;
for_each_rcu_flavor(rsp) {
@@ -1848,7 +1816,7 @@ static int rcu_oom_notify(struct notifier_block *self,
get_online_cpus();
for_each_online_cpu(cpu) {
smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
- cond_resched();
+ cond_resched_rcu_qs();
}
put_online_cpus();
@@ -2075,7 +2043,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
if (!ACCESS_ONCE(rdp_leader->nocb_kthread))
return;
if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
- /* Prior xchg orders against prior callback enqueue. */
+ /* Prior smp_mb__after_atomic() orders against prior enqueue. */
ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
wake_up(&rdp_leader->nocb_wq);
}
@@ -2104,6 +2072,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
ACCESS_ONCE(*old_rhpp) = rhp;
atomic_long_add(rhcount, &rdp->nocb_q_count);
atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
+ smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
/* If we are not being polled and there is a kthread, awaken it ... */
t = ACCESS_ONCE(rdp->nocb_kthread);
@@ -2120,16 +2089,23 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
TPS("WakeEmpty"));
} else {
- rdp->nocb_defer_wakeup = true;
+ rdp->nocb_defer_wakeup = RCU_NOGP_WAKE;
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
TPS("WakeEmptyIsDeferred"));
}
rdp->qlen_last_fqs_check = 0;
} else if (len > rdp->qlen_last_fqs_check + qhimark) {
/* ... or if many callbacks queued. */
- wake_nocb_leader(rdp, true);
+ if (!irqs_disabled_flags(flags)) {
+ wake_nocb_leader(rdp, true);
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ TPS("WakeOvf"));
+ } else {
+ rdp->nocb_defer_wakeup = RCU_NOGP_WAKE_FORCE;
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ TPS("WakeOvfIsDeferred"));
+ }
rdp->qlen_last_fqs_check = LONG_MAX / 2;
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeOvf"));
} else {
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
}
@@ -2150,7 +2126,7 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
{
if (!rcu_is_nocb_cpu(rdp->cpu))
- return 0;
+ return false;
__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
if (__is_kfree_rcu_offset((unsigned long)rhp->func))
trace_rcu_kfree_callback(rdp->rsp->name, rhp,
@@ -2161,7 +2137,18 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
trace_rcu_callback(rdp->rsp->name, rhp,
-atomic_long_read(&rdp->nocb_q_count_lazy),
-atomic_long_read(&rdp->nocb_q_count));
- return 1;
+
+ /*
+ * If called from an extended quiescent state with interrupts
+ * disabled, invoke the RCU core in order to allow the idle-entry
+ * deferred-wakeup check to function.
+ */
+ if (irqs_disabled_flags(flags) &&
+ !rcu_is_watching() &&
+ cpu_online(smp_processor_id()))
+ invoke_rcu_core();
+
+ return true;
}
/*
@@ -2177,7 +2164,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
/* If this is not a no-CBs CPU, tell the caller to do it the old way. */
if (!rcu_is_nocb_cpu(smp_processor_id()))
- return 0;
+ return false;
rsp->qlen = 0;
rsp->qlen_lazy = 0;
@@ -2196,7 +2183,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
rsp->orphan_nxtlist = NULL;
rsp->orphan_nxttail = &rsp->orphan_nxtlist;
}
- return 1;
+ return true;
}
/*
@@ -2229,7 +2216,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
(d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
if (likely(d))
break;
- flush_signals(current);
+ WARN_ON(signal_pending(current));
trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
}
trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
@@ -2288,7 +2275,7 @@ wait_again:
if (!rcu_nocb_poll)
trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
"WokeEmpty");
- flush_signals(current);
+ WARN_ON(signal_pending(current));
schedule_timeout_interruptible(1);
/* Rescan in case we were a victim of memory ordering. */
@@ -2327,6 +2314,7 @@ wait_again:
atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count);
atomic_long_add(rdp->nocb_gp_count_lazy,
&rdp->nocb_follower_count_lazy);
+ smp_mb__after_atomic(); /* Store *tail before wakeup. */
if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
/*
* List was empty, wake up the follower.
@@ -2367,7 +2355,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
if (!rcu_nocb_poll)
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
"WokeEmpty");
- flush_signals(current);
+ WARN_ON(signal_pending(current));
schedule_timeout_interruptible(1);
}
}
@@ -2428,15 +2416,16 @@ static int rcu_nocb_kthread(void *arg)
list = next;
}
trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
- ACCESS_ONCE(rdp->nocb_p_count) -= c;
- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
+ ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
+ ACCESS_ONCE(rdp->nocb_p_count_lazy) =
+ rdp->nocb_p_count_lazy - cl;
rdp->n_nocbs_invoked += c;
}
return 0;
}
/* Is a deferred wakeup of rcu_nocb_kthread() required? */
-static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
+static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
{
return ACCESS_ONCE(rdp->nocb_defer_wakeup);
}
@@ -2444,11 +2433,79 @@ static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
/* Do a deferred wakeup of rcu_nocb_kthread(). */
static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
{
+ int ndw;
+
if (!rcu_nocb_need_deferred_wakeup(rdp))
return;
- ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
- wake_nocb_leader(rdp, false);
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
+ ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
+ ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
+ wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
+}
+
+void __init rcu_init_nohz(void)
+{
+ int cpu;
+ bool need_rcu_nocb_mask = true;
+ struct rcu_state *rsp;
+
+#ifdef CONFIG_RCU_NOCB_CPU_NONE
+ need_rcu_nocb_mask = false;
+#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
+
+#if defined(CONFIG_NO_HZ_FULL)
+ if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
+ need_rcu_nocb_mask = true;
+#endif /* #if defined(CONFIG_NO_HZ_FULL) */
+
+ if (!have_rcu_nocb_mask && need_rcu_nocb_mask) {
+ if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
+ pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
+ return;
+ }
+ have_rcu_nocb_mask = true;
+ }
+ if (!have_rcu_nocb_mask)
+ return;
+
+#ifdef CONFIG_RCU_NOCB_CPU_ZERO
+ pr_info("\tOffload RCU callbacks from CPU 0\n");
+ cpumask_set_cpu(0, rcu_nocb_mask);
+#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
+#ifdef CONFIG_RCU_NOCB_CPU_ALL
+ pr_info("\tOffload RCU callbacks from all CPUs\n");
+ cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
+#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
+#if defined(CONFIG_NO_HZ_FULL)
+ if (tick_nohz_full_running)
+ cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
+#endif /* #if defined(CONFIG_NO_HZ_FULL) */
+
+ if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
+ pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
+ cpumask_and(rcu_nocb_mask, cpu_possible_mask,
+ rcu_nocb_mask);
+ }
+ cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
+ pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
+ if (rcu_nocb_poll)
+ pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
+
+ for_each_rcu_flavor(rsp) {
+ for_each_cpu(cpu, rcu_nocb_mask) {
+ struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+
+ /*
+ * If there are early callbacks, they will need
+ * to be moved to the nocb lists.
+ */
+ WARN_ON_ONCE(rdp->nxttail[RCU_NEXT_TAIL] !=
+ &rdp->nxtlist &&
+ rdp->nxttail[RCU_NEXT_TAIL] != NULL);
+ init_nocb_callback_list(rdp);
+ }
+ rcu_organize_nocb_kthreads(rsp);
+ }
}
/* Initialize per-rcu_data variables for no-CBs CPUs. */
@@ -2459,15 +2516,85 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
rdp->nocb_follower_tail = &rdp->nocb_follower_head;
}
+/*
+ * If the specified CPU is a no-CBs CPU that does not already have its
+ * rcuo kthread for the specified RCU flavor, spawn it. If the CPUs are
+ * brought online out of order, this can require re-organizing the
+ * leader-follower relationships.
+ */
+static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
+{
+ struct rcu_data *rdp;
+ struct rcu_data *rdp_last;
+ struct rcu_data *rdp_old_leader;
+ struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
+ struct task_struct *t;
+
+ /*
+ * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
+ * then nothing to do.
+ */
+ if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread)
+ return;
+
+ /* If we didn't spawn the leader first, reorganize! */
+ rdp_old_leader = rdp_spawn->nocb_leader;
+ if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) {
+ rdp_last = NULL;
+ rdp = rdp_old_leader;
+ do {
+ rdp->nocb_leader = rdp_spawn;
+ if (rdp_last && rdp != rdp_spawn)
+ rdp_last->nocb_next_follower = rdp;
+ rdp_last = rdp;
+ rdp = rdp->nocb_next_follower;
+ rdp_last->nocb_next_follower = NULL;
+ } while (rdp);
+ rdp_spawn->nocb_next_follower = rdp_old_leader;
+ }
+
+ /* Spawn the kthread for this CPU and RCU flavor. */
+ t = kthread_run(rcu_nocb_kthread, rdp_spawn,
+ "rcuo%c/%d", rsp->abbr, cpu);
+ BUG_ON(IS_ERR(t));
+ ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
+}
+
+/*
+ * If the specified CPU is a no-CBs CPU that does not already have its
+ * rcuo kthreads, spawn them.
+ */
+static void rcu_spawn_all_nocb_kthreads(int cpu)
+{
+ struct rcu_state *rsp;
+
+ if (rcu_scheduler_fully_active)
+ for_each_rcu_flavor(rsp)
+ rcu_spawn_one_nocb_kthread(rsp, cpu);
+}
+
+/*
+ * Once the scheduler is running, spawn rcuo kthreads for all online
+ * no-CBs CPUs. This assumes that the early_initcall()s happen before
+ * non-boot CPUs come online -- if this changes, we will need to add
+ * some mutual exclusion.
+ */
+static void __init rcu_spawn_nocb_kthreads(void)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ rcu_spawn_all_nocb_kthreads(cpu);
+}
+
/* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */
static int rcu_nocb_leader_stride = -1;
module_param(rcu_nocb_leader_stride, int, 0444);
/*
- * Create a kthread for each RCU flavor for each no-CBs CPU.
- * Also initialize leader-follower relationships.
+ * Initialize leader-follower relationships for all no-CBs CPU.
*/
-static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
+static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
{
int cpu;
int ls = rcu_nocb_leader_stride;
@@ -2475,14 +2602,9 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
struct rcu_data *rdp;
struct rcu_data *rdp_leader = NULL; /* Suppress misguided gcc warn. */
struct rcu_data *rdp_prev = NULL;
- struct task_struct *t;
- if (rcu_nocb_mask == NULL)
+ if (!have_rcu_nocb_mask)
return;
-#if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL)
- if (tick_nohz_full_running)
- cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
-#endif /* #if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL) */
if (ls == -1) {
ls = int_sqrt(nr_cpu_ids);
rcu_nocb_leader_stride = ls;
@@ -2505,21 +2627,15 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
rdp_prev->nocb_next_follower = rdp;
}
rdp_prev = rdp;
-
- /* Spawn the kthread for this CPU. */
- t = kthread_run(rcu_nocb_kthread, rdp,
- "rcuo%c/%d", rsp->abbr, cpu);
- BUG_ON(IS_ERR(t));
- ACCESS_ONCE(rdp->nocb_kthread) = t;
}
}
/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
static bool init_nocb_callback_list(struct rcu_data *rdp)
{
- if (rcu_nocb_mask == NULL ||
- !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
+ if (!rcu_is_nocb_cpu(rdp->cpu))
return false;
+
rdp->nxttail[RCU_NEXT_TAIL] = NULL;
return true;
}
@@ -2541,21 +2657,21 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
bool lazy, unsigned long flags)
{
- return 0;
+ return false;
}
static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
struct rcu_data *rdp,
unsigned long flags)
{
- return 0;
+ return false;
}
static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
{
}
-static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
+static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
{
return false;
}
@@ -2564,7 +2680,11 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
{
}
-static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
+static void rcu_spawn_all_nocb_kthreads(int cpu)
+{
+}
+
+static void __init rcu_spawn_nocb_kthreads(void)
{
}
@@ -2595,16 +2715,6 @@ static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
-/*
- * Define RCU flavor that holds sysidle state. This needs to be the
- * most active flavor of RCU.
- */
-#ifdef CONFIG_PREEMPT_RCU
-static struct rcu_state *rcu_sysidle_state = &rcu_preempt_state;
-#else /* #ifdef CONFIG_PREEMPT_RCU */
-static struct rcu_state *rcu_sysidle_state = &rcu_sched_state;
-#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-
static int full_sysidle_state; /* Current system-idle state. */
#define RCU_SYSIDLE_NOT 0 /* Some CPU is not idle. */
#define RCU_SYSIDLE_SHORT 1 /* All CPUs idle for brief period. */
@@ -2622,6 +2732,10 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
{
unsigned long j;
+ /* If there are no nohz_full= CPUs, no need to track this. */
+ if (!tick_nohz_full_enabled())
+ return;
+
/* Adjust nesting, check for fully idle. */
if (irq) {
rdtp->dynticks_idle_nesting--;
@@ -2687,6 +2801,10 @@ void rcu_sysidle_force_exit(void)
*/
static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
{
+ /* If there are no nohz_full= CPUs, no need to track this. */
+ if (!tick_nohz_full_enabled())
+ return;
+
/* Adjust nesting, check for already non-idle. */
if (irq) {
rdtp->dynticks_idle_nesting++;
@@ -2741,12 +2859,16 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
unsigned long j;
struct rcu_dynticks *rdtp = rdp->dynticks;
+ /* If there are no nohz_full= CPUs, don't check system-wide idleness. */
+ if (!tick_nohz_full_enabled())
+ return;
+
/*
* If some other CPU has already reported non-idle, if this is
* not the flavor of RCU that tracks sysidle state, or if this
* is an offline or the timekeeping CPU, nothing to do.
*/
- if (!*isidle || rdp->rsp != rcu_sysidle_state ||
+ if (!*isidle || rdp->rsp != rcu_state_p ||
cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
return;
if (rcu_gp_in_progress(rdp->rsp))
@@ -2772,7 +2894,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
*/
static bool is_sysidle_rcu_state(struct rcu_state *rsp)
{
- return rsp == rcu_sysidle_state;
+ return rsp == rcu_state_p;
}
/*
@@ -2850,7 +2972,7 @@ static void rcu_sysidle_cancel(void)
static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
unsigned long maxj, bool gpkt)
{
- if (rsp != rcu_sysidle_state)
+ if (rsp != rcu_state_p)
return; /* Wrong flavor, ignore. */
if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
return; /* Running state machine from timekeeping CPU. */
@@ -2867,6 +2989,10 @@ static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
unsigned long maxj)
{
+ /* If there are no nohz_full= CPUs, no need to track this. */
+ if (!tick_nohz_full_enabled())
+ return;
+
rcu_sysidle_report(rsp, isidle, maxj, true);
}
@@ -2893,7 +3019,8 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
/*
* Check to see if the system is fully idle, other than the timekeeping CPU.
- * The caller must have disabled interrupts.
+ * The caller must have disabled interrupts. This is not intended to be
+ * called unless tick_nohz_full_enabled().
*/
bool rcu_sys_is_idle(void)
{
@@ -2919,13 +3046,12 @@ bool rcu_sys_is_idle(void)
/* Scan all the CPUs looking for nonidle CPUs. */
for_each_possible_cpu(cpu) {
- rdp = per_cpu_ptr(rcu_sysidle_state->rda, cpu);
+ rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
rcu_sysidle_check_cpu(rdp, &isidle, &maxj);
if (!isidle)
break;
}
- rcu_sysidle_report(rcu_sysidle_state,
- isidle, maxj, false);
+ rcu_sysidle_report(rcu_state_p, isidle, maxj, false);
oldrss = rss;
rss = ACCESS_ONCE(full_sysidle_state);
}
@@ -2952,7 +3078,7 @@ bool rcu_sys_is_idle(void)
* provided by the memory allocator.
*/
if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL &&
- !rcu_gp_in_progress(rcu_sysidle_state) &&
+ !rcu_gp_in_progress(rcu_state_p) &&
!rsh.inuse && xchg(&rsh.inuse, 1) == 0)
call_rcu(&rsh.rh, rcu_sysidle_cb);
return false;
@@ -3036,3 +3162,19 @@ static void rcu_bind_gp_kthread(void)
housekeeping_affine(current);
#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
}
+
+/* Record the current task on dyntick-idle entry. */
+static void rcu_dynticks_task_enter(void)
+{
+#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
+ ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
+#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
+}
+
+/* Record no current task on dyntick-idle exit. */
+static void rcu_dynticks_task_exit(void)
+{
+#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
+ ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
+#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
+}
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 4056d7992a6c..3ef8ba58694e 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -47,6 +47,8 @@
#include <linux/hardirq.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/tick.h>
#define CREATE_TRACE_POINTS
@@ -91,7 +93,7 @@ void __rcu_read_unlock(void)
barrier(); /* critical section before exit code. */
t->rcu_read_lock_nesting = INT_MIN;
barrier(); /* assign before ->rcu_read_unlock_special load */
- if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+ if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special.s)))
rcu_read_unlock_special(t);
barrier(); /* ->rcu_read_unlock_special load before assign */
t->rcu_read_lock_nesting = 0;
@@ -137,6 +139,38 @@ int notrace debug_lockdep_rcu_enabled(void)
EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
/**
+ * rcu_read_lock_held() - might we be in RCU read-side critical section?
+ *
+ * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
+ * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
+ * this assumes we are in an RCU read-side critical section unless it can
+ * prove otherwise. This is useful for debug checks in functions that
+ * require that they be called within an RCU read-side critical section.
+ *
+ * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
+ * and while lockdep is disabled.
+ *
+ * Note that rcu_read_lock() and the matching rcu_read_unlock() must
+ * occur in the same context, for example, it is illegal to invoke
+ * rcu_read_unlock() in process context if the matching rcu_read_lock()
+ * was invoked from within an irq handler.
+ *
+ * Note that rcu_read_lock() is disallowed if the CPU is either idle or
+ * offline from an RCU perspective, so check for those as well.
+ */
+int rcu_read_lock_held(void)
+{
+ if (!debug_lockdep_rcu_enabled())
+ return 1;
+ if (!rcu_is_watching())
+ return 0;
+ if (!rcu_lockdep_current_cpu_online())
+ return 0;
+ return lock_is_held(&rcu_lock_map);
+}
+EXPORT_SYMBOL_GPL(rcu_read_lock_held);
+
+/**
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
*
* Check for bottom half being disabled, which covers both the
@@ -347,3 +381,312 @@ static int __init check_cpu_stall_init(void)
early_initcall(check_cpu_stall_init);
#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
+
+#ifdef CONFIG_TASKS_RCU
+
+/*
+ * Simple variant of RCU whose quiescent states are voluntary context switch,
+ * user-space execution, and idle. As such, grace periods can take one good
+ * long time. There are no read-side primitives similar to rcu_read_lock()
+ * and rcu_read_unlock() because this implementation is intended to get
+ * the system into a safe state for some of the manipulations involved in
+ * tracing and the like. Finally, this implementation does not support
+ * high call_rcu_tasks() rates from multiple CPUs. If this is required,
+ * per-CPU callback lists will be needed.
+ */
+
+/* Global list of callbacks and associated lock. */
+static struct rcu_head *rcu_tasks_cbs_head;
+static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
+static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
+static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
+
+/* Track exiting tasks in order to allow them to be waited for. */
+DEFINE_SRCU(tasks_rcu_exit_srcu);
+
+/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
+static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10;
+module_param(rcu_task_stall_timeout, int, 0644);
+
+static void rcu_spawn_tasks_kthread(void);
+
+/*
+ * Post an RCU-tasks callback. First call must be from process context
+ * after the scheduler if fully operational.
+ */
+void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp))
+{
+ unsigned long flags;
+ bool needwake;
+
+ rhp->next = NULL;
+ rhp->func = func;
+ raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
+ needwake = !rcu_tasks_cbs_head;
+ *rcu_tasks_cbs_tail = rhp;
+ rcu_tasks_cbs_tail = &rhp->next;
+ raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
+ if (needwake) {
+ rcu_spawn_tasks_kthread();
+ wake_up(&rcu_tasks_cbs_wq);
+ }
+}
+EXPORT_SYMBOL_GPL(call_rcu_tasks);
+
+/**
+ * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
+ *
+ * Control will return to the caller some time after a full rcu-tasks
+ * grace period has elapsed, in other words after all currently
+ * executing rcu-tasks read-side critical sections have elapsed. These
+ * read-side critical sections are delimited by calls to schedule(),
+ * cond_resched_rcu_qs(), idle execution, userspace execution, calls
+ * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
+ *
+ * This is a very specialized primitive, intended only for a few uses in
+ * tracing and other situations requiring manipulation of function
+ * preambles and profiling hooks. The synchronize_rcu_tasks() function
+ * is not (yet) intended for heavy use from multiple CPUs.
+ *
+ * Note that this guarantee implies further memory-ordering guarantees.
+ * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
+ * each CPU is guaranteed to have executed a full memory barrier since the
+ * end of its last RCU-tasks read-side critical section whose beginning
+ * preceded the call to synchronize_rcu_tasks(). In addition, each CPU
+ * having an RCU-tasks read-side critical section that extends beyond
+ * the return from synchronize_rcu_tasks() is guaranteed to have executed
+ * a full memory barrier after the beginning of synchronize_rcu_tasks()
+ * and before the beginning of that RCU-tasks read-side critical section.
+ * Note that these guarantees include CPUs that are offline, idle, or
+ * executing in user mode, as well as CPUs that are executing in the kernel.
+ *
+ * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
+ * to its caller on CPU B, then both CPU A and CPU B are guaranteed
+ * to have executed a full memory barrier during the execution of
+ * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
+ * (but again only if the system has more than one CPU).
+ */
+void synchronize_rcu_tasks(void)
+{
+ /* Complain if the scheduler has not started. */
+ rcu_lockdep_assert(!rcu_scheduler_active,
+ "synchronize_rcu_tasks called too soon");
+
+ /* Wait for the grace period. */
+ wait_rcu_gp(call_rcu_tasks);
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
+
+/**
+ * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
+ *
+ * Although the current implementation is guaranteed to wait, it is not
+ * obligated to, for example, if there are no pending callbacks.
+ */
+void rcu_barrier_tasks(void)
+{
+ /* There is only one callback queue, so this is easy. ;-) */
+ synchronize_rcu_tasks();
+}
+EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
+
+/* See if tasks are still holding out, complain if so. */
+static void check_holdout_task(struct task_struct *t,
+ bool needreport, bool *firstreport)
+{
+ int cpu;
+
+ if (!ACCESS_ONCE(t->rcu_tasks_holdout) ||
+ t->rcu_tasks_nvcsw != ACCESS_ONCE(t->nvcsw) ||
+ !ACCESS_ONCE(t->on_rq) ||
+ (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
+ !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
+ ACCESS_ONCE(t->rcu_tasks_holdout) = false;
+ list_del_init(&t->rcu_tasks_holdout_list);
+ put_task_struct(t);
+ return;
+ }
+ if (!needreport)
+ return;
+ if (*firstreport) {
+ pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
+ *firstreport = false;
+ }
+ cpu = task_cpu(t);
+ pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
+ t, ".I"[is_idle_task(t)],
+ "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
+ t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
+ t->rcu_tasks_idle_cpu, cpu);
+ sched_show_task(t);
+}
+
+/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
+static int __noreturn rcu_tasks_kthread(void *arg)
+{
+ unsigned long flags;
+ struct task_struct *g, *t;
+ unsigned long lastreport;
+ struct rcu_head *list;
+ struct rcu_head *next;
+ LIST_HEAD(rcu_tasks_holdouts);
+
+ /* FIXME: Add housekeeping affinity. */
+
+ /*
+ * Each pass through the following loop makes one check for
+ * newly arrived callbacks, and, if there are some, waits for
+ * one RCU-tasks grace period and then invokes the callbacks.
+ * This loop is terminated by the system going down. ;-)
+ */
+ for (;;) {
+
+ /* Pick up any new callbacks. */
+ raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
+ list = rcu_tasks_cbs_head;
+ rcu_tasks_cbs_head = NULL;
+ rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
+ raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
+
+ /* If there were none, wait a bit and start over. */
+ if (!list) {
+ wait_event_interruptible(rcu_tasks_cbs_wq,
+ rcu_tasks_cbs_head);
+ if (!rcu_tasks_cbs_head) {
+ WARN_ON(signal_pending(current));
+ schedule_timeout_interruptible(HZ/10);
+ }
+ continue;
+ }
+
+ /*
+ * Wait for all pre-existing t->on_rq and t->nvcsw
+ * transitions to complete. Invoking synchronize_sched()
+ * suffices because all these transitions occur with
+ * interrupts disabled. Without this synchronize_sched(),
+ * a read-side critical section that started before the
+ * grace period might be incorrectly seen as having started
+ * after the grace period.
+ *
+ * This synchronize_sched() also dispenses with the
+ * need for a memory barrier on the first store to
+ * ->rcu_tasks_holdout, as it forces the store to happen
+ * after the beginning of the grace period.
+ */
+ synchronize_sched();
+
+ /*
+ * There were callbacks, so we need to wait for an
+ * RCU-tasks grace period. Start off by scanning
+ * the task list for tasks that are not already
+ * voluntarily blocked. Mark these tasks and make
+ * a list of them in rcu_tasks_holdouts.
+ */
+ rcu_read_lock();
+ for_each_process_thread(g, t) {
+ if (t != current && ACCESS_ONCE(t->on_rq) &&
+ !is_idle_task(t)) {
+ get_task_struct(t);
+ t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
+ ACCESS_ONCE(t->rcu_tasks_holdout) = true;
+ list_add(&t->rcu_tasks_holdout_list,
+ &rcu_tasks_holdouts);
+ }
+ }
+ rcu_read_unlock();
+
+ /*
+ * Wait for tasks that are in the process of exiting.
+ * This does only part of the job, ensuring that all
+ * tasks that were previously exiting reach the point
+ * where they have disabled preemption, allowing the
+ * later synchronize_sched() to finish the job.
+ */
+ synchronize_srcu(&tasks_rcu_exit_srcu);
+
+ /*
+ * Each pass through the following loop scans the list
+ * of holdout tasks, removing any that are no longer
+ * holdouts. When the list is empty, we are done.
+ */
+ lastreport = jiffies;
+ while (!list_empty(&rcu_tasks_holdouts)) {
+ bool firstreport;
+ bool needreport;
+ int rtst;
+ struct task_struct *t1;
+
+ schedule_timeout_interruptible(HZ);
+ rtst = ACCESS_ONCE(rcu_task_stall_timeout);
+ needreport = rtst > 0 &&
+ time_after(jiffies, lastreport + rtst);
+ if (needreport)
+ lastreport = jiffies;
+ firstreport = true;
+ WARN_ON(signal_pending(current));
+ list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
+ rcu_tasks_holdout_list) {
+ check_holdout_task(t, needreport, &firstreport);
+ cond_resched();
+ }
+ }
+
+ /*
+ * Because ->on_rq and ->nvcsw are not guaranteed
+ * to have a full memory barriers prior to them in the
+ * schedule() path, memory reordering on other CPUs could
+ * cause their RCU-tasks read-side critical sections to
+ * extend past the end of the grace period. However,
+ * because these ->nvcsw updates are carried out with
+ * interrupts disabled, we can use synchronize_sched()
+ * to force the needed ordering on all such CPUs.
+ *
+ * This synchronize_sched() also confines all
+ * ->rcu_tasks_holdout accesses to be within the grace
+ * period, avoiding the need for memory barriers for
+ * ->rcu_tasks_holdout accesses.
+ *
+ * In addition, this synchronize_sched() waits for exiting
+ * tasks to complete their final preempt_disable() region
+ * of execution, cleaning up after the synchronize_srcu()
+ * above.
+ */
+ synchronize_sched();
+
+ /* Invoke the callbacks. */
+ while (list) {
+ next = list->next;
+ local_bh_disable();
+ list->func(list);
+ local_bh_enable();
+ list = next;
+ cond_resched();
+ }
+ schedule_timeout_uninterruptible(HZ/10);
+ }
+}
+
+/* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */
+static void rcu_spawn_tasks_kthread(void)
+{
+ static DEFINE_MUTEX(rcu_tasks_kthread_mutex);
+ static struct task_struct *rcu_tasks_kthread_ptr;
+ struct task_struct *t;
+
+ if (ACCESS_ONCE(rcu_tasks_kthread_ptr)) {
+ smp_mb(); /* Ensure caller sees full kthread. */
+ return;
+ }
+ mutex_lock(&rcu_tasks_kthread_mutex);
+ if (rcu_tasks_kthread_ptr) {
+ mutex_unlock(&rcu_tasks_kthread_mutex);
+ return;
+ }
+ t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
+ BUG_ON(IS_ERR(t));
+ smp_mb(); /* Ensure others see full kthread. */
+ ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
+ mutex_unlock(&rcu_tasks_kthread_mutex);
+}
+
+#endif /* #ifdef CONFIG_TASKS_RCU */
diff --git a/kernel/reboot.c b/kernel/reboot.c
index a3a9e240fcdb..5925f5ae8dff 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -104,6 +104,87 @@ int unregister_reboot_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_reboot_notifier);
+/*
+ * Notifier list for kernel code which wants to be called
+ * to restart the system.
+ */
+static ATOMIC_NOTIFIER_HEAD(restart_handler_list);
+
+/**
+ * register_restart_handler - Register function to be called to reset
+ * the system
+ * @nb: Info about handler function to be called
+ * @nb->priority: Handler priority. Handlers should follow the
+ * following guidelines for setting priorities.
+ * 0: Restart handler of last resort,
+ * with limited restart capabilities
+ * 128: Default restart handler; use if no other
+ * restart handler is expected to be available,
+ * and/or if restart functionality is
+ * sufficient to restart the entire system
+ * 255: Highest priority restart handler, will
+ * preempt all other restart handlers
+ *
+ * Registers a function with code to be called to restart the
+ * system.
+ *
+ * Registered functions will be called from machine_restart as last
+ * step of the restart sequence (if the architecture specific
+ * machine_restart function calls do_kernel_restart - see below
+ * for details).
+ * Registered functions are expected to restart the system immediately.
+ * If more than one function is registered, the restart handler priority
+ * selects which function will be called first.
+ *
+ * Restart handlers are expected to be registered from non-architecture
+ * code, typically from drivers. A typical use case would be a system
+ * where restart functionality is provided through a watchdog. Multiple
+ * restart handlers may exist; for example, one restart handler might
+ * restart the entire system, while another only restarts the CPU.
+ * In such cases, the restart handler which only restarts part of the
+ * hardware is expected to register with low priority to ensure that
+ * it only runs if no other means to restart the system is available.
+ *
+ * Currently always returns zero, as atomic_notifier_chain_register()
+ * always returns zero.
+ */
+int register_restart_handler(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&restart_handler_list, nb);
+}
+EXPORT_SYMBOL(register_restart_handler);
+
+/**
+ * unregister_restart_handler - Unregister previously registered
+ * restart handler
+ * @nb: Hook to be unregistered
+ *
+ * Unregisters a previously registered restart handler function.
+ *
+ * Returns zero on success, or %-ENOENT on failure.
+ */
+int unregister_restart_handler(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&restart_handler_list, nb);
+}
+EXPORT_SYMBOL(unregister_restart_handler);
+
+/**
+ * do_kernel_restart - Execute kernel restart handler call chain
+ *
+ * Calls functions registered with register_restart_handler.
+ *
+ * Expected to be called from machine_restart as last step of the restart
+ * sequence.
+ *
+ * Restarts the system immediately if a restart handler function has been
+ * registered. Otherwise does nothing.
+ */
+void do_kernel_restart(char *cmd)
+{
+ atomic_notifier_call_chain(&restart_handler_list, reboot_mode, cmd);
+}
+
void migrate_to_reboot_cpu(void)
{
/* The boot cpu is always logical cpu 0 */
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index e73efba98301..8a2e230fb86a 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -148,11 +148,8 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
goto out;
- t = p;
- do {
+ for_each_thread(p, t)
sched_move_task(t);
- } while_each_thread(p, t);
-
out:
unlock_task_sighand(p, &flags);
autogroup_kref_put(prev);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 59965ec0b7de..44999505e1bf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -90,22 +90,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
-#ifdef smp_mb__before_atomic
-void __smp_mb__before_atomic(void)
-{
- smp_mb__before_atomic();
-}
-EXPORT_SYMBOL(__smp_mb__before_atomic);
-#endif
-
-#ifdef smp_mb__after_atomic
-void __smp_mb__after_atomic(void)
-{
- smp_mb__after_atomic();
-}
-EXPORT_SYMBOL(__smp_mb__after_atomic);
-#endif
-
void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
{
unsigned long delta;
@@ -333,9 +317,12 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
for (;;) {
rq = task_rq(p);
raw_spin_lock(&rq->lock);
- if (likely(rq == task_rq(p)))
+ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
return rq;
raw_spin_unlock(&rq->lock);
+
+ while (unlikely(task_on_rq_migrating(p)))
+ cpu_relax();
}
}
@@ -352,10 +339,13 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
raw_spin_lock_irqsave(&p->pi_lock, *flags);
rq = task_rq(p);
raw_spin_lock(&rq->lock);
- if (likely(rq == task_rq(p)))
+ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
return rq;
raw_spin_unlock(&rq->lock);
raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+
+ while (unlikely(task_on_rq_migrating(p)))
+ cpu_relax();
}
}
@@ -449,7 +439,15 @@ static void __hrtick_start(void *arg)
void hrtick_start(struct rq *rq, u64 delay)
{
struct hrtimer *timer = &rq->hrtick_timer;
- ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
+ ktime_t time;
+ s64 delta;
+
+ /*
+ * Don't schedule slices shorter than 10000ns, that just
+ * doesn't make sense and can cause timer DoS.
+ */
+ delta = max_t(s64, delay, 10000LL);
+ time = ktime_add_ns(timer->base->get_time(), delta);
hrtimer_set_expires(timer, time);
@@ -1043,7 +1041,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
* A queue event has occurred, and we're going to schedule. In
* this case, we can save a useless back to back clock update.
*/
- if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
+ if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
rq->skip_clock_update = 1;
}
@@ -1088,7 +1086,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
static void __migrate_swap_task(struct task_struct *p, int cpu)
{
- if (p->on_rq) {
+ if (task_on_rq_queued(p)) {
struct rq *src_rq, *dst_rq;
src_rq = task_rq(p);
@@ -1214,7 +1212,7 @@ static int migration_cpu_stop(void *data);
unsigned long wait_task_inactive(struct task_struct *p, long match_state)
{
unsigned long flags;
- int running, on_rq;
+ int running, queued;
unsigned long ncsw;
struct rq *rq;
@@ -1252,7 +1250,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
rq = task_rq_lock(p, &flags);
trace_sched_wait_task(p);
running = task_running(rq, p);
- on_rq = p->on_rq;
+ queued = task_on_rq_queued(p);
ncsw = 0;
if (!match_state || p->state == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
@@ -1284,7 +1282,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* running right now), it's preempted, and we should
* yield - it could be a while.
*/
- if (unlikely(on_rq)) {
+ if (unlikely(queued)) {
ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1478,7 +1476,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
{
activate_task(rq, p, en_flags);
- p->on_rq = 1;
+ p->on_rq = TASK_ON_RQ_QUEUED;
/* if a worker is waking up, notify workqueue */
if (p->flags & PF_WQ_WORKER)
@@ -1537,7 +1535,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
int ret = 0;
rq = __task_rq_lock(p);
- if (p->on_rq) {
+ if (task_on_rq_queued(p)) {
/* check_preempt_curr() may use rq clock */
update_rq_clock(rq);
ttwu_do_wakeup(rq, p, wake_flags);
@@ -1620,6 +1618,25 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
}
}
+void wake_up_if_idle(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ if (!is_idle_task(rq->curr))
+ return;
+
+ if (set_nr_if_polling(rq->idle)) {
+ trace_sched_wake_idle_without_ipi(cpu);
+ } else {
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (is_idle_task(rq->curr))
+ smp_send_reschedule(cpu);
+ /* Else cpu is not in idle, do nothing here */
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+}
+
bool cpus_share_cache(int this_cpu, int that_cpu)
{
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
@@ -1742,7 +1759,7 @@ static void try_to_wake_up_local(struct task_struct *p)
if (!(p->state & TASK_NORMAL))
goto out;
- if (!p->on_rq)
+ if (!task_on_rq_queued(p))
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
ttwu_do_wakeup(rq, p, 0);
@@ -1776,6 +1793,20 @@ int wake_up_state(struct task_struct *p, unsigned int state)
}
/*
+ * This function clears the sched_dl_entity static params.
+ */
+void __dl_clear_params(struct task_struct *p)
+{
+ struct sched_dl_entity *dl_se = &p->dl;
+
+ dl_se->dl_runtime = 0;
+ dl_se->dl_deadline = 0;
+ dl_se->dl_period = 0;
+ dl_se->flags = 0;
+ dl_se->dl_bw = 0;
+}
+
+/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
*
@@ -1799,10 +1830,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
RB_CLEAR_NODE(&p->dl.rb_node);
hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- p->dl.dl_runtime = p->dl.runtime = 0;
- p->dl.dl_deadline = p->dl.deadline = 0;
- p->dl.dl_period = 0;
- p->dl.flags = 0;
+ __dl_clear_params(p);
INIT_LIST_HEAD(&p->rt.run_list);
@@ -1977,6 +2005,8 @@ unsigned long to_ratio(u64 period, u64 runtime)
#ifdef CONFIG_SMP
inline struct dl_bw *dl_bw_of(int i)
{
+ rcu_lockdep_assert(rcu_read_lock_sched_held(),
+ "sched RCU must be held");
return &cpu_rq(i)->rd->dl_bw;
}
@@ -1985,6 +2015,8 @@ static inline int dl_bw_cpus(int i)
struct root_domain *rd = cpu_rq(i)->rd;
int cpus = 0;
+ rcu_lockdep_assert(rcu_read_lock_sched_held(),
+ "sched RCU must be held");
for_each_cpu_and(i, rd->span, cpu_active_mask)
cpus++;
@@ -2095,7 +2127,7 @@ void wake_up_new_task(struct task_struct *p)
init_task_runnable_average(p);
rq = __task_rq_lock(p);
activate_task(rq, p, 0);
- p->on_rq = 1;
+ p->on_rq = TASK_ON_RQ_QUEUED;
trace_sched_wakeup_new(p, true);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
@@ -2287,10 +2319,6 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
*/
post_schedule(rq);
-#ifdef __ARCH_WANT_UNLOCKED_CTXSW
- /* In this case, finish_task_switch does not reenable preemption */
- preempt_enable();
-#endif
if (current->set_child_tid)
put_user(task_pid_vnr(current), current->set_child_tid);
}
@@ -2333,9 +2361,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
* of the scheduler it's an obvious special-case), so we
* do an early lockdep release here:
*/
-#ifndef __ARCH_WANT_UNLOCKED_CTXSW
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
-#endif
context_tracking_task_switch(prev, next);
/* Here we just switch the register state and the stack. */
@@ -2463,7 +2489,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
* project cycles that may never be accounted to this
* thread, breaking clock_gettime().
*/
- if (task_current(rq, p) && p->on_rq) {
+ if (task_current(rq, p) && task_on_rq_queued(p)) {
update_rq_clock(rq);
ns = rq_clock_task(rq) - p->se.exec_start;
if ((s64)ns < 0)
@@ -2509,7 +2535,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
* If we see ->on_cpu without ->on_rq, the task is leaving, and has
* been accounted, so we're correct here as well.
*/
- if (!p->on_cpu || !p->on_rq)
+ if (!p->on_cpu || !task_on_rq_queued(p))
return p->se.sum_exec_runtime;
#endif
@@ -2672,6 +2698,9 @@ static noinline void __schedule_bug(struct task_struct *prev)
*/
static inline void schedule_debug(struct task_struct *prev)
{
+#ifdef CONFIG_SCHED_STACK_END_CHECK
+ BUG_ON(unlikely(task_stack_end_corrupted(prev)));
+#endif
/*
* Test if we are atomic. Since do_exit() needs to call into
* schedule() atomically, we ignore that path. Otherwise whine
@@ -2813,7 +2842,7 @@ need_resched:
switch_count = &prev->nvcsw;
}
- if (prev->on_rq || rq->skip_clock_update < 0)
+ if (task_on_rq_queued(prev) || rq->skip_clock_update < 0)
update_rq_clock(rq);
next = pick_next_task(rq, prev);
@@ -2978,7 +3007,7 @@ EXPORT_SYMBOL(default_wake_function);
*/
void rt_mutex_setprio(struct task_struct *p, int prio)
{
- int oldprio, on_rq, running, enqueue_flag = 0;
+ int oldprio, queued, running, enqueue_flag = 0;
struct rq *rq;
const struct sched_class *prev_class;
@@ -3007,12 +3036,12 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
trace_sched_pi_setprio(p, prio);
oldprio = p->prio;
prev_class = p->sched_class;
- on_rq = p->on_rq;
+ queued = task_on_rq_queued(p);
running = task_current(rq, p);
- if (on_rq)
+ if (queued)
dequeue_task(rq, p, 0);
if (running)
- p->sched_class->put_prev_task(rq, p);
+ put_prev_task(rq, p);
/*
* Boosting condition are:
@@ -3049,7 +3078,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
if (running)
p->sched_class->set_curr_task(rq);
- if (on_rq)
+ if (queued)
enqueue_task(rq, p, enqueue_flag);
check_class_changed(rq, p, prev_class, oldprio);
@@ -3060,7 +3089,7 @@ out_unlock:
void set_user_nice(struct task_struct *p, long nice)
{
- int old_prio, delta, on_rq;
+ int old_prio, delta, queued;
unsigned long flags;
struct rq *rq;
@@ -3081,8 +3110,8 @@ void set_user_nice(struct task_struct *p, long nice)
p->static_prio = NICE_TO_PRIO(nice);
goto out_unlock;
}
- on_rq = p->on_rq;
- if (on_rq)
+ queued = task_on_rq_queued(p);
+ if (queued)
dequeue_task(rq, p, 0);
p->static_prio = NICE_TO_PRIO(nice);
@@ -3091,7 +3120,7 @@ void set_user_nice(struct task_struct *p, long nice)
p->prio = effective_prio(p);
delta = p->prio - old_prio;
- if (on_rq) {
+ if (queued) {
enqueue_task(rq, p, 0);
/*
* If the task increased its priority or is running and
@@ -3363,7 +3392,7 @@ static int __sched_setscheduler(struct task_struct *p,
{
int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
MAX_RT_PRIO - 1 - attr->sched_priority;
- int retval, oldprio, oldpolicy = -1, on_rq, running;
+ int retval, oldprio, oldpolicy = -1, queued, running;
int policy = attr->sched_policy;
unsigned long flags;
const struct sched_class *prev_class;
@@ -3560,19 +3589,19 @@ change:
return 0;
}
- on_rq = p->on_rq;
+ queued = task_on_rq_queued(p);
running = task_current(rq, p);
- if (on_rq)
+ if (queued)
dequeue_task(rq, p, 0);
if (running)
- p->sched_class->put_prev_task(rq, p);
+ put_prev_task(rq, p);
prev_class = p->sched_class;
__setscheduler(rq, p, attr);
if (running)
p->sched_class->set_curr_task(rq);
- if (on_rq) {
+ if (queued) {
/*
* We enqueue to tail when the priority of a task is
* increased (user space view).
@@ -3996,14 +4025,14 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
rcu_read_lock();
if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
rcu_read_unlock();
- goto out_unlock;
+ goto out_free_new_mask;
}
rcu_read_unlock();
}
retval = security_task_setscheduler(p);
if (retval)
- goto out_unlock;
+ goto out_free_new_mask;
cpuset_cpus_allowed(p, cpus_allowed);
@@ -4016,13 +4045,14 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
* root_domain.
*/
#ifdef CONFIG_SMP
- if (task_has_dl_policy(p)) {
- const struct cpumask *span = task_rq(p)->rd->span;
-
- if (dl_bandwidth_enabled() && !cpumask_subset(span, new_mask)) {
+ if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
+ rcu_read_lock();
+ if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
retval = -EBUSY;
- goto out_unlock;
+ rcu_read_unlock();
+ goto out_free_new_mask;
}
+ rcu_read_unlock();
}
#endif
again:
@@ -4040,7 +4070,7 @@ again:
goto again;
}
}
-out_unlock:
+out_free_new_mask:
free_cpumask_var(new_mask);
out_free_cpus_allowed:
free_cpumask_var(cpus_allowed);
@@ -4524,7 +4554,7 @@ void show_state_filter(unsigned long state_filter)
" task PC stack pid father\n");
#endif
rcu_read_lock();
- do_each_thread(g, p) {
+ for_each_process_thread(g, p) {
/*
* reset the NMI-timeout, listing all files on a slow
* console might take a lot of time:
@@ -4532,7 +4562,7 @@ void show_state_filter(unsigned long state_filter)
touch_nmi_watchdog();
if (!state_filter || (p->state & state_filter))
sched_show_task(p);
- } while_each_thread(g, p);
+ }
touch_all_softlockup_watchdogs();
@@ -4587,7 +4617,7 @@ void init_idle(struct task_struct *idle, int cpu)
rcu_read_unlock();
rq->curr = rq->idle = idle;
- idle->on_rq = 1;
+ idle->on_rq = TASK_ON_RQ_QUEUED;
#if defined(CONFIG_SMP)
idle->on_cpu = 1;
#endif
@@ -4608,6 +4638,33 @@ void init_idle(struct task_struct *idle, int cpu)
}
#ifdef CONFIG_SMP
+/*
+ * move_queued_task - move a queued task to new rq.
+ *
+ * Returns (locked) new rq. Old rq's lock is released.
+ */
+static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
+{
+ struct rq *rq = task_rq(p);
+
+ lockdep_assert_held(&rq->lock);
+
+ dequeue_task(rq, p, 0);
+ p->on_rq = TASK_ON_RQ_MIGRATING;
+ set_task_cpu(p, new_cpu);
+ raw_spin_unlock(&rq->lock);
+
+ rq = cpu_rq(new_cpu);
+
+ raw_spin_lock(&rq->lock);
+ BUG_ON(task_cpu(p) != new_cpu);
+ p->on_rq = TASK_ON_RQ_QUEUED;
+ enqueue_task(rq, p, 0);
+ check_preempt_curr(rq, p, 0);
+
+ return rq;
+}
+
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
if (p->sched_class && p->sched_class->set_cpus_allowed)
@@ -4664,14 +4721,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
- if (p->on_rq) {
+ if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, p, &flags);
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
return 0;
- }
+ } else if (task_on_rq_queued(p))
+ rq = move_queued_task(p, dest_cpu);
out:
task_rq_unlock(rq, p, &flags);
@@ -4692,20 +4750,20 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
*/
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
{
- struct rq *rq_dest, *rq_src;
+ struct rq *rq;
int ret = 0;
if (unlikely(!cpu_active(dest_cpu)))
return ret;
- rq_src = cpu_rq(src_cpu);
- rq_dest = cpu_rq(dest_cpu);
+ rq = cpu_rq(src_cpu);
raw_spin_lock(&p->pi_lock);
- double_rq_lock(rq_src, rq_dest);
+ raw_spin_lock(&rq->lock);
/* Already moved. */
if (task_cpu(p) != src_cpu)
goto done;
+
/* Affinity changed (again). */
if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
goto fail;
@@ -4714,16 +4772,12 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
* If we're not on a rq, the next wake-up will ensure we're
* placed properly.
*/
- if (p->on_rq) {
- dequeue_task(rq_src, p, 0);
- set_task_cpu(p, dest_cpu);
- enqueue_task(rq_dest, p, 0);
- check_preempt_curr(rq_dest, p, 0);
- }
+ if (task_on_rq_queued(p))
+ rq = move_queued_task(p, dest_cpu);
done:
ret = 1;
fail:
- double_rq_unlock(rq_src, rq_dest);
+ raw_spin_unlock(&rq->lock);
raw_spin_unlock(&p->pi_lock);
return ret;
}
@@ -4755,22 +4809,22 @@ void sched_setnuma(struct task_struct *p, int nid)
{
struct rq *rq;
unsigned long flags;
- bool on_rq, running;
+ bool queued, running;
rq = task_rq_lock(p, &flags);
- on_rq = p->on_rq;
+ queued = task_on_rq_queued(p);
running = task_current(rq, p);
- if (on_rq)
+ if (queued)
dequeue_task(rq, p, 0);
if (running)
- p->sched_class->put_prev_task(rq, p);
+ put_prev_task(rq, p);
p->numa_preferred_nid = nid;
if (running)
p->sched_class->set_curr_task(rq);
- if (on_rq)
+ if (queued)
enqueue_task(rq, p, 0);
task_rq_unlock(rq, p, &flags);
}
@@ -4790,6 +4844,12 @@ static int migration_cpu_stop(void *data)
* be on another cpu but it doesn't matter.
*/
local_irq_disable();
+ /*
+ * We need to explicitly wake pending tasks before running
+ * __migrate_task() such that we will not miss enforcing cpus_allowed
+ * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
+ */
+ sched_ttwu_pending();
__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
local_irq_enable();
return 0;
@@ -5200,6 +5260,7 @@ static int sched_cpu_inactive(struct notifier_block *nfb,
{
unsigned long flags;
long cpu = (long)hcpu;
+ struct dl_bw *dl_b;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
@@ -5207,15 +5268,19 @@ static int sched_cpu_inactive(struct notifier_block *nfb,
/* explicitly allow suspend */
if (!(action & CPU_TASKS_FROZEN)) {
- struct dl_bw *dl_b = dl_bw_of(cpu);
bool overflow;
int cpus;
+ rcu_read_lock_sched();
+ dl_b = dl_bw_of(cpu);
+
raw_spin_lock_irqsave(&dl_b->lock, flags);
cpus = dl_bw_cpus(cpu);
overflow = __dl_overflow(dl_b, cpus, 0, 0);
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+ rcu_read_unlock_sched();
+
if (overflow)
return notifier_from_errno(-EBUSY);
}
@@ -5758,7 +5823,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
const struct cpumask *span = sched_domain_span(sd);
struct cpumask *covered = sched_domains_tmpmask;
struct sd_data *sdd = sd->private;
- struct sched_domain *child;
+ struct sched_domain *sibling;
int i;
cpumask_clear(covered);
@@ -5769,10 +5834,10 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
if (cpumask_test_cpu(i, covered))
continue;
- child = *per_cpu_ptr(sdd->sd, i);
+ sibling = *per_cpu_ptr(sdd->sd, i);
/* See the comment near build_group_mask(). */
- if (!cpumask_test_cpu(i, sched_domain_span(child)))
+ if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
continue;
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
@@ -5782,10 +5847,9 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
goto fail;
sg_span = sched_group_cpus(sg);
- if (child->child) {
- child = child->child;
- cpumask_copy(sg_span, sched_domain_span(child));
- } else
+ if (sibling->child)
+ cpumask_copy(sg_span, sched_domain_span(sibling->child));
+ else
cpumask_set_cpu(i, sg_span);
cpumask_or(covered, covered, sg_span);
@@ -7136,13 +7200,13 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
.sched_policy = SCHED_NORMAL,
};
int old_prio = p->prio;
- int on_rq;
+ int queued;
- on_rq = p->on_rq;
- if (on_rq)
+ queued = task_on_rq_queued(p);
+ if (queued)
dequeue_task(rq, p, 0);
__setscheduler(rq, p, &attr);
- if (on_rq) {
+ if (queued) {
enqueue_task(rq, p, 0);
resched_curr(rq);
}
@@ -7156,12 +7220,12 @@ void normalize_rt_tasks(void)
unsigned long flags;
struct rq *rq;
- read_lock_irqsave(&tasklist_lock, flags);
- do_each_thread(g, p) {
+ read_lock(&tasklist_lock);
+ for_each_process_thread(g, p) {
/*
* Only normalize user tasks:
*/
- if (!p->mm)
+ if (p->flags & PF_KTHREAD)
continue;
p->se.exec_start = 0;
@@ -7176,21 +7240,16 @@ void normalize_rt_tasks(void)
* Renice negative nice level userspace
* tasks back to 0:
*/
- if (task_nice(p) < 0 && p->mm)
+ if (task_nice(p) < 0)
set_user_nice(p, 0);
continue;
}
- raw_spin_lock(&p->pi_lock);
- rq = __task_rq_lock(p);
-
+ rq = task_rq_lock(p, &flags);
normalize_task(rq, p);
-
- __task_rq_unlock(rq);
- raw_spin_unlock(&p->pi_lock);
- } while_each_thread(g, p);
-
- read_unlock_irqrestore(&tasklist_lock, flags);
+ task_rq_unlock(rq, p, &flags);
+ }
+ read_unlock(&tasklist_lock);
}
#endif /* CONFIG_MAGIC_SYSRQ */
@@ -7330,19 +7389,19 @@ void sched_offline_group(struct task_group *tg)
void sched_move_task(struct task_struct *tsk)
{
struct task_group *tg;
- int on_rq, running;
+ int queued, running;
unsigned long flags;
struct rq *rq;
rq = task_rq_lock(tsk, &flags);
running = task_current(rq, tsk);
- on_rq = tsk->on_rq;
+ queued = task_on_rq_queued(tsk);
- if (on_rq)
+ if (queued)
dequeue_task(rq, tsk, 0);
if (unlikely(running))
- tsk->sched_class->put_prev_task(rq, tsk);
+ put_prev_task(rq, tsk);
tg = container_of(task_css_check(tsk, cpu_cgrp_id,
lockdep_is_held(&tsk->sighand->siglock)),
@@ -7352,14 +7411,14 @@ void sched_move_task(struct task_struct *tsk)
#ifdef CONFIG_FAIR_GROUP_SCHED
if (tsk->sched_class->task_move_group)
- tsk->sched_class->task_move_group(tsk, on_rq);
+ tsk->sched_class->task_move_group(tsk, queued);
else
#endif
set_task_rq(tsk, task_cpu(tsk));
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
- if (on_rq)
+ if (queued)
enqueue_task(rq, tsk, 0);
task_rq_unlock(rq, tsk, &flags);
@@ -7377,10 +7436,10 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
{
struct task_struct *g, *p;
- do_each_thread(g, p) {
- if (rt_task(p) && task_rq(p)->rt.tg == tg)
+ for_each_process_thread(g, p) {
+ if (rt_task(p) && task_group(p) == tg)
return 1;
- } while_each_thread(g, p);
+ }
return 0;
}
@@ -7589,6 +7648,7 @@ static int sched_dl_global_constraints(void)
u64 runtime = global_rt_runtime();
u64 period = global_rt_period();
u64 new_bw = to_ratio(period, runtime);
+ struct dl_bw *dl_b;
int cpu, ret = 0;
unsigned long flags;
@@ -7602,13 +7662,16 @@ static int sched_dl_global_constraints(void)
* solutions is welcome!
*/
for_each_possible_cpu(cpu) {
- struct dl_bw *dl_b = dl_bw_of(cpu);
+ rcu_read_lock_sched();
+ dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
if (new_bw < dl_b->total_bw)
ret = -EBUSY;
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+ rcu_read_unlock_sched();
+
if (ret)
break;
}
@@ -7619,6 +7682,7 @@ static int sched_dl_global_constraints(void)
static void sched_dl_do_global(void)
{
u64 new_bw = -1;
+ struct dl_bw *dl_b;
int cpu;
unsigned long flags;
@@ -7632,11 +7696,14 @@ static void sched_dl_do_global(void)
* FIXME: As above...
*/
for_each_possible_cpu(cpu) {
- struct dl_bw *dl_b = dl_bw_of(cpu);
+ rcu_read_lock_sched();
+ dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
dl_b->bw = new_bw;
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+ rcu_read_unlock_sched();
}
}
@@ -8017,7 +8084,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
quota = normalize_cfs_quota(tg, d);
- parent_quota = parent_b->hierarchal_quota;
+ parent_quota = parent_b->hierarchical_quota;
/*
* ensure max(child_quota) <= parent_quota, inherit when no
@@ -8028,7 +8095,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
else if (parent_quota != RUNTIME_INF && quota > parent_quota)
return -EINVAL;
}
- cfs_b->hierarchal_quota = quota;
+ cfs_b->hierarchical_quota = quota;
return 0;
}
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index bd95963dae80..539ca3ce071b 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -107,9 +107,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
int best_cpu = -1;
const struct sched_dl_entity *dl_se = &p->dl;
- if (later_mask && cpumask_and(later_mask, cp->free_cpus,
- &p->cpus_allowed) && cpumask_and(later_mask,
- later_mask, cpu_active_mask)) {
+ if (later_mask && cpumask_and(later_mask, later_mask, cp->free_cpus)) {
best_cpu = cpumask_any(later_mask);
goto out;
} else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 72fdf06ef865..8394b1ee600c 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -288,24 +288,29 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
struct signal_struct *sig = tsk->signal;
cputime_t utime, stime;
struct task_struct *t;
-
- times->utime = sig->utime;
- times->stime = sig->stime;
- times->sum_exec_runtime = sig->sum_sched_runtime;
+ unsigned int seq, nextseq;
+ unsigned long flags;
rcu_read_lock();
- /* make sure we can trust tsk->thread_group list */
- if (!likely(pid_alive(tsk)))
- goto out;
-
- t = tsk;
+ /* Attempt a lockless read on the first round. */
+ nextseq = 0;
do {
- task_cputime(t, &utime, &stime);
- times->utime += utime;
- times->stime += stime;
- times->sum_exec_runtime += task_sched_runtime(t);
- } while_each_thread(tsk, t);
-out:
+ seq = nextseq;
+ flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
+ times->utime = sig->utime;
+ times->stime = sig->stime;
+ times->sum_exec_runtime = sig->sum_sched_runtime;
+
+ for_each_thread(tsk, t) {
+ task_cputime(t, &utime, &stime);
+ times->utime += utime;
+ times->stime += stime;
+ times->sum_exec_runtime += task_sched_runtime(t);
+ }
+ /* If lockless access failed, take the lock. */
+ nextseq = 1;
+ } while (need_seqretry(&sig->stats_lock, seq));
+ done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
rcu_read_unlock();
}
@@ -550,6 +555,23 @@ drop_precision:
}
/*
+ * Atomically advance counter to the new value. Interrupts, vcpu
+ * scheduling, and scaling inaccuracies can cause cputime_advance
+ * to be occasionally called with a new value smaller than counter.
+ * Let's enforce atomicity.
+ *
+ * Normally a caller will only go through this loop once, or not
+ * at all in case a previous caller updated counter the same jiffy.
+ */
+static void cputime_advance(cputime_t *counter, cputime_t new)
+{
+ cputime_t old;
+
+ while (new > (old = ACCESS_ONCE(*counter)))
+ cmpxchg_cputime(counter, old, new);
+}
+
+/*
* Adjust tick based cputime random precision against scheduler
* runtime accounting.
*/
@@ -594,13 +616,8 @@ static void cputime_adjust(struct task_cputime *curr,
utime = rtime - stime;
}
- /*
- * If the tick based count grows faster than the scheduler one,
- * the result of the scaling may go backward.
- * Let's enforce monotonicity.
- */
- prev->stime = max(prev->stime, stime);
- prev->utime = max(prev->utime, utime);
+ cputime_advance(&prev->stime, stime);
+ cputime_advance(&prev->utime, utime);
out:
*ut = prev->utime;
@@ -617,9 +634,6 @@ void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
cputime_adjust(&cputime, &p->prev_cputime, ut, st);
}
-/*
- * Must be called with siglock held.
- */
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
struct task_cputime cputime;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 255ce138b652..abfaf3d9a29f 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -530,7 +530,7 @@ again:
update_rq_clock(rq);
dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0;
- if (p->on_rq) {
+ if (task_on_rq_queued(p)) {
enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
if (task_has_dl_policy(rq->curr))
check_preempt_curr_dl(rq, p, 0);
@@ -997,10 +997,7 @@ static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
#ifdef CONFIG_SCHED_HRTICK
static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
{
- s64 delta = p->dl.dl_runtime - p->dl.runtime;
-
- if (delta > 10000)
- hrtick_start(rq, p->dl.runtime);
+ hrtick_start(rq, p->dl.runtime);
}
#endif
@@ -1030,7 +1027,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
* means a stop task can slip in, in which case we need to
* re-start task selection.
*/
- if (rq->stop && rq->stop->on_rq)
+ if (rq->stop && task_on_rq_queued(rq->stop))
return RETRY_TASK;
}
@@ -1124,10 +1121,8 @@ static void set_curr_task_dl(struct rq *rq)
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
- (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
- (p->nr_cpus_allowed > 1))
+ cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
return 1;
-
return 0;
}
@@ -1169,6 +1164,13 @@ static int find_later_rq(struct task_struct *task)
if (task->nr_cpus_allowed == 1)
return -1;
+ /*
+ * We have to consider system topology and task affinity
+ * first, then we can look for a suitable cpu.
+ */
+ cpumask_copy(later_mask, task_rq(task)->rd->span);
+ cpumask_and(later_mask, later_mask, cpu_active_mask);
+ cpumask_and(later_mask, later_mask, &task->cpus_allowed);
best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
task, later_mask);
if (best_cpu == -1)
@@ -1257,7 +1259,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(later_rq->cpu,
&task->cpus_allowed) ||
- task_running(rq, task) || !task->on_rq)) {
+ task_running(rq, task) ||
+ !task_on_rq_queued(task))) {
double_unlock_balance(rq, later_rq);
later_rq = NULL;
break;
@@ -1296,7 +1299,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
BUG_ON(task_current(rq, p));
BUG_ON(p->nr_cpus_allowed <= 1);
- BUG_ON(!p->on_rq);
+ BUG_ON(!task_on_rq_queued(p));
BUG_ON(!dl_task(p));
return p;
@@ -1443,7 +1446,7 @@ static int pull_dl_task(struct rq *this_rq)
dl_time_before(p->dl.deadline,
this_rq->dl.earliest_dl.curr))) {
WARN_ON(p == src_rq->curr);
- WARN_ON(!p->on_rq);
+ WARN_ON(!task_on_rq_queued(p));
/*
* Then we pull iff p has actually an earlier
@@ -1569,6 +1572,8 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy))
hrtimer_try_to_cancel(&p->dl.dl_timer);
+ __dl_clear_params(p);
+
#ifdef CONFIG_SMP
/*
* Since this might be the only -deadline task on the rq,
@@ -1596,7 +1601,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
if (unlikely(p->dl.dl_throttled))
return;
- if (p->on_rq && rq->curr != p) {
+ if (task_on_rq_queued(p) && rq->curr != p) {
#ifdef CONFIG_SMP
if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
/* Only reschedule if pushing failed */
@@ -1614,7 +1619,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
static void prio_changed_dl(struct rq *rq, struct task_struct *p,
int oldprio)
{
- if (p->on_rq || rq->curr == p) {
+ if (task_on_rq_queued(p) || rq->curr == p) {
#ifdef CONFIG_SMP
/*
* This might be too much, but unfortunately
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 627b3c34b821..ce33780d8f20 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -150,7 +150,6 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
{
struct task_struct *g, *p;
- unsigned long flags;
SEQ_printf(m,
"\nrunnable tasks:\n"
@@ -159,16 +158,14 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
"------------------------------------------------------"
"----------------------------------------------------\n");
- read_lock_irqsave(&tasklist_lock, flags);
-
- do_each_thread(g, p) {
+ rcu_read_lock();
+ for_each_process_thread(g, p) {
if (task_cpu(p) != rq_cpu)
continue;
print_task(m, rq, p);
- } while_each_thread(g, p);
-
- read_unlock_irqrestore(&tasklist_lock, flags);
+ }
+ rcu_read_unlock();
}
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
@@ -333,9 +330,7 @@ do { \
print_cfs_stats(m, cpu);
print_rt_stats(m, cpu);
- rcu_read_lock();
print_rq(m, rq, cpu);
- rcu_read_unlock();
spin_unlock_irqrestore(&sched_debug_lock, flags);
SEQ_printf(m, "\n");
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 82088b29704e..b78280c59b46 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -23,6 +23,7 @@
#include <linux/latencytop.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
+#include <linux/cpuidle.h>
#include <linux/slab.h>
#include <linux/profile.h>
#include <linux/interrupt.h>
@@ -665,6 +666,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
}
#ifdef CONFIG_SMP
+static int select_idle_sibling(struct task_struct *p, int cpu);
static unsigned long task_h_load(struct task_struct *p);
static inline void __update_task_entity_contrib(struct sched_entity *se);
@@ -1038,7 +1040,8 @@ struct numa_stats {
*/
static void update_numa_stats(struct numa_stats *ns, int nid)
{
- int cpu, cpus = 0;
+ int smt, cpu, cpus = 0;
+ unsigned long capacity;
memset(ns, 0, sizeof(*ns));
for_each_cpu(cpu, cpumask_of_node(nid)) {
@@ -1062,8 +1065,12 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
if (!cpus)
return;
- ns->task_capacity =
- DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE);
+ /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
+ smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
+ capacity = cpus / smt; /* cores */
+
+ ns->task_capacity = min_t(unsigned, capacity,
+ DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
}
@@ -1206,7 +1213,7 @@ static void task_numa_compare(struct task_numa_env *env,
if (!cur) {
/* Is there capacity at our destination? */
- if (env->src_stats.has_free_capacity &&
+ if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
!env->dst_stats.has_free_capacity)
goto unlock;
@@ -1252,6 +1259,13 @@ balance:
if (load_too_imbalanced(src_load, dst_load, env))
goto unlock;
+ /*
+ * One idle CPU per node is evaluated for a task numa move.
+ * Call select_idle_sibling to maybe find a better one.
+ */
+ if (!cur)
+ env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
+
assign:
task_numa_assign(env, cur, imp);
unlock:
@@ -1775,7 +1789,7 @@ void task_numa_free(struct task_struct *p)
list_del(&p->numa_entry);
grp->nr_tasks--;
spin_unlock_irqrestore(&grp->lock, flags);
- rcu_assign_pointer(p->numa_group, NULL);
+ RCU_INIT_POINTER(p->numa_group, NULL);
put_numa_group(grp);
}
@@ -1804,10 +1818,6 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
if (!p->mm)
return;
- /* Do not worry about placement if exiting */
- if (p->state == TASK_DEAD)
- return;
-
/* Allocate buffer to track faults on a per-node basis */
if (unlikely(!p->numa_faults_memory)) {
int size = sizeof(*p->numa_faults_memory) *
@@ -2211,8 +2221,8 @@ static __always_inline u64 decay_load(u64 val, u64 n)
/*
* As y^PERIOD = 1/2, we can combine
- * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
- * With a look-up table which covers k^n (n<PERIOD)
+ * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
+ * With a look-up table which covers y^n (n<PERIOD)
*
* To achieve constant time decay_load.
*/
@@ -2377,6 +2387,9 @@ static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
tg_contrib -= cfs_rq->tg_load_contrib;
+ if (!tg_contrib)
+ return;
+
if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
atomic_long_add(tg_contrib, &tg->load_avg);
cfs_rq->tg_load_contrib += tg_contrib;
@@ -3892,14 +3905,6 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
resched_curr(rq);
return;
}
-
- /*
- * Don't schedule slices shorter than 10000ns, that just
- * doesn't make sense. Rely on vruntime for fairness.
- */
- if (rq->curr != p)
- delta = max_t(s64, 10000LL, delta);
-
hrtick_start(rq, delta);
}
}
@@ -4087,7 +4092,7 @@ static unsigned long capacity_of(int cpu)
static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
+ unsigned long nr_running = ACCESS_ONCE(rq->cfs.h_nr_running);
unsigned long load_avg = rq->cfs.runnable_load_avg;
if (nr_running)
@@ -4276,8 +4281,8 @@ static int wake_wide(struct task_struct *p)
static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
{
s64 this_load, load;
+ s64 this_eff_load, prev_eff_load;
int idx, this_cpu, prev_cpu;
- unsigned long tl_per_task;
struct task_group *tg;
unsigned long weight;
int balanced;
@@ -4320,47 +4325,30 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
* Otherwise check if either cpus are near enough in load to allow this
* task to be woken on this_cpu.
*/
- if (this_load > 0) {
- s64 this_eff_load, prev_eff_load;
+ this_eff_load = 100;
+ this_eff_load *= capacity_of(prev_cpu);
- this_eff_load = 100;
- this_eff_load *= capacity_of(prev_cpu);
+ prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
+ prev_eff_load *= capacity_of(this_cpu);
+
+ if (this_load > 0) {
this_eff_load *= this_load +
effective_load(tg, this_cpu, weight, weight);
- prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
- prev_eff_load *= capacity_of(this_cpu);
prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
+ }
- balanced = this_eff_load <= prev_eff_load;
- } else
- balanced = true;
-
- /*
- * If the currently running task will sleep within
- * a reasonable amount of time then attract this newly
- * woken task:
- */
- if (sync && balanced)
- return 1;
+ balanced = this_eff_load <= prev_eff_load;
schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
- tl_per_task = cpu_avg_load_per_task(this_cpu);
- if (balanced ||
- (this_load <= load &&
- this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
- /*
- * This domain has SD_WAKE_AFFINE and
- * p is cache cold in this domain, and
- * there is no bad imbalance.
- */
- schedstat_inc(sd, ttwu_move_affine);
- schedstat_inc(p, se.statistics.nr_wakeups_affine);
+ if (!balanced)
+ return 0;
- return 1;
- }
- return 0;
+ schedstat_inc(sd, ttwu_move_affine);
+ schedstat_inc(p, se.statistics.nr_wakeups_affine);
+
+ return 1;
}
/*
@@ -4428,20 +4416,46 @@ static int
find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
{
unsigned long load, min_load = ULONG_MAX;
- int idlest = -1;
+ unsigned int min_exit_latency = UINT_MAX;
+ u64 latest_idle_timestamp = 0;
+ int least_loaded_cpu = this_cpu;
+ int shallowest_idle_cpu = -1;
int i;
/* Traverse only the allowed CPUs */
for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
- load = weighted_cpuload(i);
-
- if (load < min_load || (load == min_load && i == this_cpu)) {
- min_load = load;
- idlest = i;
+ if (idle_cpu(i)) {
+ struct rq *rq = cpu_rq(i);
+ struct cpuidle_state *idle = idle_get_state(rq);
+ if (idle && idle->exit_latency < min_exit_latency) {
+ /*
+ * We give priority to a CPU whose idle state
+ * has the smallest exit latency irrespective
+ * of any idle timestamp.
+ */
+ min_exit_latency = idle->exit_latency;
+ latest_idle_timestamp = rq->idle_stamp;
+ shallowest_idle_cpu = i;
+ } else if ((!idle || idle->exit_latency == min_exit_latency) &&
+ rq->idle_stamp > latest_idle_timestamp) {
+ /*
+ * If equal or no active idle state, then
+ * the most recently idled CPU might have
+ * a warmer cache.
+ */
+ latest_idle_timestamp = rq->idle_stamp;
+ shallowest_idle_cpu = i;
+ }
+ } else {
+ load = weighted_cpuload(i);
+ if (load < min_load || (load == min_load && i == this_cpu)) {
+ min_load = load;
+ least_loaded_cpu = i;
+ }
}
}
- return idlest;
+ return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
}
/*
@@ -4513,11 +4527,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if (p->nr_cpus_allowed == 1)
return prev_cpu;
- if (sd_flag & SD_BALANCE_WAKE) {
- if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
- want_affine = 1;
- new_cpu = prev_cpu;
- }
+ if (sd_flag & SD_BALANCE_WAKE)
+ want_affine = cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
rcu_read_lock();
for_each_domain(cpu, tmp) {
@@ -4704,7 +4715,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
return;
/*
- * This is possible from callers such as move_task(), in which we
+ * This is possible from callers such as attach_tasks(), in which we
* unconditionally check_prempt_curr() after an enqueue (which may have
* lead to a throttle). This both saves work and prevents false
* next-buddy nomination below.
@@ -5112,27 +5123,18 @@ struct lb_env {
unsigned int loop_max;
enum fbq_type fbq_type;
+ struct list_head tasks;
};
/*
- * move_task - move a task from one runqueue to another runqueue.
- * Both runqueues must be locked.
- */
-static void move_task(struct task_struct *p, struct lb_env *env)
-{
- deactivate_task(env->src_rq, p, 0);
- set_task_cpu(p, env->dst_cpu);
- activate_task(env->dst_rq, p, 0);
- check_preempt_curr(env->dst_rq, p, 0);
-}
-
-/*
* Is this task likely cache-hot:
*/
static int task_hot(struct task_struct *p, struct lb_env *env)
{
s64 delta;
+ lockdep_assert_held(&env->src_rq->lock);
+
if (p->sched_class != &fair_sched_class)
return 0;
@@ -5252,6 +5254,9 @@ static
int can_migrate_task(struct task_struct *p, struct lb_env *env)
{
int tsk_cache_hot = 0;
+
+ lockdep_assert_held(&env->src_rq->lock);
+
/*
* We do not migrate tasks that are:
* 1) throttled_lb_pair, or
@@ -5310,24 +5315,12 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
if (!tsk_cache_hot)
tsk_cache_hot = migrate_degrades_locality(p, env);
- if (migrate_improves_locality(p, env)) {
-#ifdef CONFIG_SCHEDSTATS
+ if (migrate_improves_locality(p, env) || !tsk_cache_hot ||
+ env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
if (tsk_cache_hot) {
schedstat_inc(env->sd, lb_hot_gained[env->idle]);
schedstat_inc(p, se.statistics.nr_forced_migrations);
}
-#endif
- return 1;
- }
-
- if (!tsk_cache_hot ||
- env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
-
- if (tsk_cache_hot) {
- schedstat_inc(env->sd, lb_hot_gained[env->idle]);
- schedstat_inc(p, se.statistics.nr_forced_migrations);
- }
-
return 1;
}
@@ -5336,47 +5329,63 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
}
/*
- * move_one_task tries to move exactly one task from busiest to this_rq, as
+ * detach_task() -- detach the task for the migration specified in env
+ */
+static void detach_task(struct task_struct *p, struct lb_env *env)
+{
+ lockdep_assert_held(&env->src_rq->lock);
+
+ deactivate_task(env->src_rq, p, 0);
+ p->on_rq = TASK_ON_RQ_MIGRATING;
+ set_task_cpu(p, env->dst_cpu);
+}
+
+/*
+ * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
* part of active balancing operations within "domain".
- * Returns 1 if successful and 0 otherwise.
*
- * Called with both runqueues locked.
+ * Returns a task if successful and NULL otherwise.
*/
-static int move_one_task(struct lb_env *env)
+static struct task_struct *detach_one_task(struct lb_env *env)
{
struct task_struct *p, *n;
+ lockdep_assert_held(&env->src_rq->lock);
+
list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
if (!can_migrate_task(p, env))
continue;
- move_task(p, env);
+ detach_task(p, env);
+
/*
- * Right now, this is only the second place move_task()
- * is called, so we can safely collect move_task()
- * stats here rather than inside move_task().
+ * Right now, this is only the second place where
+ * lb_gained[env->idle] is updated (other is detach_tasks)
+ * so we can safely collect stats here rather than
+ * inside detach_tasks().
*/
schedstat_inc(env->sd, lb_gained[env->idle]);
- return 1;
+ return p;
}
- return 0;
+ return NULL;
}
static const unsigned int sched_nr_migrate_break = 32;
/*
- * move_tasks tries to move up to imbalance weighted load from busiest to
- * this_rq, as part of a balancing operation within domain "sd".
- * Returns 1 if successful and 0 otherwise.
+ * detach_tasks() -- tries to detach up to imbalance weighted load from
+ * busiest_rq, as part of a balancing operation within domain "sd".
*
- * Called with both runqueues locked.
+ * Returns number of detached tasks if successful and 0 otherwise.
*/
-static int move_tasks(struct lb_env *env)
+static int detach_tasks(struct lb_env *env)
{
struct list_head *tasks = &env->src_rq->cfs_tasks;
struct task_struct *p;
unsigned long load;
- int pulled = 0;
+ int detached = 0;
+
+ lockdep_assert_held(&env->src_rq->lock);
if (env->imbalance <= 0)
return 0;
@@ -5407,14 +5416,16 @@ static int move_tasks(struct lb_env *env)
if ((load / 2) > env->imbalance)
goto next;
- move_task(p, env);
- pulled++;
+ detach_task(p, env);
+ list_add(&p->se.group_node, &env->tasks);
+
+ detached++;
env->imbalance -= load;
#ifdef CONFIG_PREEMPT
/*
* NEWIDLE balancing is a source of latency, so preemptible
- * kernels will stop after the first task is pulled to minimize
+ * kernels will stop after the first task is detached to minimize
* the critical section.
*/
if (env->idle == CPU_NEWLY_IDLE)
@@ -5434,13 +5445,58 @@ next:
}
/*
- * Right now, this is one of only two places move_task() is called,
- * so we can safely collect move_task() stats here rather than
- * inside move_task().
+ * Right now, this is one of only two places we collect this stat
+ * so we can safely collect detach_one_task() stats here rather
+ * than inside detach_one_task().
*/
- schedstat_add(env->sd, lb_gained[env->idle], pulled);
+ schedstat_add(env->sd, lb_gained[env->idle], detached);
+
+ return detached;
+}
+
+/*
+ * attach_task() -- attach the task detached by detach_task() to its new rq.
+ */
+static void attach_task(struct rq *rq, struct task_struct *p)
+{
+ lockdep_assert_held(&rq->lock);
+
+ BUG_ON(task_rq(p) != rq);
+ p->on_rq = TASK_ON_RQ_QUEUED;
+ activate_task(rq, p, 0);
+ check_preempt_curr(rq, p, 0);
+}
+
+/*
+ * attach_one_task() -- attaches the task returned from detach_one_task() to
+ * its new rq.
+ */
+static void attach_one_task(struct rq *rq, struct task_struct *p)
+{
+ raw_spin_lock(&rq->lock);
+ attach_task(rq, p);
+ raw_spin_unlock(&rq->lock);
+}
+
+/*
+ * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
+ * new rq.
+ */
+static void attach_tasks(struct lb_env *env)
+{
+ struct list_head *tasks = &env->tasks;
+ struct task_struct *p;
+
+ raw_spin_lock(&env->dst_rq->lock);
+
+ while (!list_empty(tasks)) {
+ p = list_first_entry(tasks, struct task_struct, se.group_node);
+ list_del_init(&p->se.group_node);
- return pulled;
+ attach_task(env->dst_rq, p);
+ }
+
+ raw_spin_unlock(&env->dst_rq->lock);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -5559,6 +5615,13 @@ static unsigned long task_h_load(struct task_struct *p)
#endif
/********** Helpers for find_busiest_group ************************/
+
+enum group_type {
+ group_other = 0,
+ group_imbalanced,
+ group_overloaded,
+};
+
/*
* sg_lb_stats - stats of a sched_group required for load_balancing
*/
@@ -5572,7 +5635,7 @@ struct sg_lb_stats {
unsigned int group_capacity_factor;
unsigned int idle_cpus;
unsigned int group_weight;
- int group_imb; /* Is there an imbalance in the group ? */
+ enum group_type group_type;
int group_has_free_capacity;
#ifdef CONFIG_NUMA_BALANCING
unsigned int nr_numa_running;
@@ -5610,6 +5673,8 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
.total_capacity = 0UL,
.busiest_stat = {
.avg_load = 0UL,
+ .sum_nr_running = 0,
+ .group_type = group_other,
},
};
}
@@ -5652,19 +5717,17 @@ unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
return default_scale_capacity(sd, cpu);
}
-static unsigned long default_scale_smt_capacity(struct sched_domain *sd, int cpu)
+static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
{
- unsigned long weight = sd->span_weight;
- unsigned long smt_gain = sd->smt_gain;
+ if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
+ return sd->smt_gain / sd->span_weight;
- smt_gain /= weight;
-
- return smt_gain;
+ return SCHED_CAPACITY_SCALE;
}
-unsigned long __weak arch_scale_smt_capacity(struct sched_domain *sd, int cpu)
+unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
{
- return default_scale_smt_capacity(sd, cpu);
+ return default_scale_cpu_capacity(sd, cpu);
}
static unsigned long scale_rt_capacity(int cpu)
@@ -5703,18 +5766,15 @@ static unsigned long scale_rt_capacity(int cpu)
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
{
- unsigned long weight = sd->span_weight;
unsigned long capacity = SCHED_CAPACITY_SCALE;
struct sched_group *sdg = sd->groups;
- if ((sd->flags & SD_SHARE_CPUCAPACITY) && weight > 1) {
- if (sched_feat(ARCH_CAPACITY))
- capacity *= arch_scale_smt_capacity(sd, cpu);
- else
- capacity *= default_scale_smt_capacity(sd, cpu);
+ if (sched_feat(ARCH_CAPACITY))
+ capacity *= arch_scale_cpu_capacity(sd, cpu);
+ else
+ capacity *= default_scale_cpu_capacity(sd, cpu);
- capacity >>= SCHED_CAPACITY_SHIFT;
- }
+ capacity >>= SCHED_CAPACITY_SHIFT;
sdg->sgc->capacity_orig = capacity;
@@ -5891,6 +5951,18 @@ static inline int sg_capacity_factor(struct lb_env *env, struct sched_group *gro
return capacity_factor;
}
+static enum group_type
+group_classify(struct sched_group *group, struct sg_lb_stats *sgs)
+{
+ if (sgs->sum_nr_running > sgs->group_capacity_factor)
+ return group_overloaded;
+
+ if (sg_imbalanced(group))
+ return group_imbalanced;
+
+ return group_other;
+}
+
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @env: The load balancing environment.
@@ -5920,7 +5992,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
load = source_load(i, load_idx);
sgs->group_load += load;
- sgs->sum_nr_running += rq->nr_running;
+ sgs->sum_nr_running += rq->cfs.h_nr_running;
if (rq->nr_running > 1)
*overload = true;
@@ -5942,9 +6014,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
sgs->group_weight = group->group_weight;
-
- sgs->group_imb = sg_imbalanced(group);
sgs->group_capacity_factor = sg_capacity_factor(env, group);
+ sgs->group_type = group_classify(group, sgs);
if (sgs->group_capacity_factor > sgs->sum_nr_running)
sgs->group_has_free_capacity = 1;
@@ -5968,13 +6039,19 @@ static bool update_sd_pick_busiest(struct lb_env *env,
struct sched_group *sg,
struct sg_lb_stats *sgs)
{
- if (sgs->avg_load <= sds->busiest_stat.avg_load)
- return false;
+ struct sg_lb_stats *busiest = &sds->busiest_stat;
- if (sgs->sum_nr_running > sgs->group_capacity_factor)
+ if (sgs->group_type > busiest->group_type)
return true;
- if (sgs->group_imb)
+ if (sgs->group_type < busiest->group_type)
+ return false;
+
+ if (sgs->avg_load <= busiest->avg_load)
+ return false;
+
+ /* This is the busiest node in its class. */
+ if (!(env->sd->flags & SD_ASYM_PACKING))
return true;
/*
@@ -5982,8 +6059,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
* numbered CPUs in the group, therefore mark all groups
* higher than ourself as busy.
*/
- if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
- env->dst_cpu < group_first_cpu(sg)) {
+ if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
if (!sds->busiest)
return true;
@@ -6228,7 +6304,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
local = &sds->local_stat;
busiest = &sds->busiest_stat;
- if (busiest->group_imb) {
+ if (busiest->group_type == group_imbalanced) {
/*
* In the group_imb case we cannot rely on group-wide averages
* to ensure cpu-load equilibrium, look at wider averages. XXX
@@ -6248,12 +6324,11 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
return fix_small_imbalance(env, sds);
}
- if (!busiest->group_imb) {
- /*
- * Don't want to pull so many tasks that a group would go idle.
- * Except of course for the group_imb case, since then we might
- * have to drop below capacity to reach cpu-load equilibrium.
- */
+ /*
+ * If there aren't any idle cpus, avoid creating some.
+ */
+ if (busiest->group_type == group_overloaded &&
+ local->group_type == group_overloaded) {
load_above_capacity =
(busiest->sum_nr_running - busiest->group_capacity_factor);
@@ -6337,7 +6412,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
* work because they assume all things are equal, which typically
* isn't true due to cpus_allowed constraints and the like.
*/
- if (busiest->group_imb)
+ if (busiest->group_type == group_imbalanced)
goto force_balance;
/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
@@ -6346,7 +6421,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
goto force_balance;
/*
- * If the local group is more busy than the selected busiest group
+ * If the local group is busier than the selected busiest group
* don't try and pull any tasks.
*/
if (local->avg_load >= busiest->avg_load)
@@ -6361,13 +6436,14 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
if (env->idle == CPU_IDLE) {
/*
- * This cpu is idle. If the busiest group load doesn't
- * have more tasks than the number of available cpu's and
- * there is no imbalance between this and busiest group
- * wrt to idle cpu's, it is balanced.
+ * This cpu is idle. If the busiest group is not overloaded
+ * and there is no imbalance between this and busiest group
+ * wrt idle cpus, it is balanced. The imbalance becomes
+ * significant if the diff is greater than 1 otherwise we
+ * might end up to just move the imbalance on another group
*/
- if ((local->idle_cpus < busiest->idle_cpus) &&
- busiest->sum_nr_running <= busiest->group_weight)
+ if ((busiest->group_type != group_overloaded) &&
+ (local->idle_cpus <= (busiest->idle_cpus + 1)))
goto out_balanced;
} else {
/*
@@ -6550,6 +6626,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
.loop_break = sched_nr_migrate_break,
.cpus = cpus,
.fbq_type = all,
+ .tasks = LIST_HEAD_INIT(env.tasks),
};
/*
@@ -6599,23 +6676,30 @@ redo:
env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
more_balance:
- local_irq_save(flags);
- double_rq_lock(env.dst_rq, busiest);
+ raw_spin_lock_irqsave(&busiest->lock, flags);
/*
* cur_ld_moved - load moved in current iteration
* ld_moved - cumulative load moved across iterations
*/
- cur_ld_moved = move_tasks(&env);
- ld_moved += cur_ld_moved;
- double_rq_unlock(env.dst_rq, busiest);
- local_irq_restore(flags);
+ cur_ld_moved = detach_tasks(&env);
/*
- * some other cpu did the load balance for us.
+ * We've detached some tasks from busiest_rq. Every
+ * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
+ * unlock busiest->lock, and we are able to be sure
+ * that nobody can manipulate the tasks in parallel.
+ * See task_rq_lock() family for the details.
*/
- if (cur_ld_moved && env.dst_cpu != smp_processor_id())
- resched_cpu(env.dst_cpu);
+
+ raw_spin_unlock(&busiest->lock);
+
+ if (cur_ld_moved) {
+ attach_tasks(&env);
+ ld_moved += cur_ld_moved;
+ }
+
+ local_irq_restore(flags);
if (env.flags & LBF_NEED_BREAK) {
env.flags &= ~LBF_NEED_BREAK;
@@ -6665,10 +6749,8 @@ more_balance:
if (sd_parent) {
int *group_imbalance = &sd_parent->groups->sgc->imbalance;
- if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
+ if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
*group_imbalance = 1;
- } else if (*group_imbalance)
- *group_imbalance = 0;
}
/* All tasks on this runqueue were pinned by CPU affinity */
@@ -6679,7 +6761,7 @@ more_balance:
env.loop_break = sched_nr_migrate_break;
goto redo;
}
- goto out_balanced;
+ goto out_all_pinned;
}
}
@@ -6744,7 +6826,7 @@ more_balance:
* If we've begun active balancing, start to back off. This
* case may not be covered by the all_pinned logic if there
* is only 1 task on the busy runqueue (because we don't call
- * move_tasks).
+ * detach_tasks).
*/
if (sd->balance_interval < sd->max_interval)
sd->balance_interval *= 2;
@@ -6753,6 +6835,23 @@ more_balance:
goto out;
out_balanced:
+ /*
+ * We reach balance although we may have faced some affinity
+ * constraints. Clear the imbalance flag if it was set.
+ */
+ if (sd_parent) {
+ int *group_imbalance = &sd_parent->groups->sgc->imbalance;
+
+ if (*group_imbalance)
+ *group_imbalance = 0;
+ }
+
+out_all_pinned:
+ /*
+ * We reach balance because all tasks are pinned at this level so
+ * we can't migrate them. Let the imbalance flag set so parent level
+ * can try to migrate them.
+ */
schedstat_inc(sd, lb_balanced[idle]);
sd->nr_balance_failed = 0;
@@ -6914,6 +7013,7 @@ static int active_load_balance_cpu_stop(void *data)
int target_cpu = busiest_rq->push_cpu;
struct rq *target_rq = cpu_rq(target_cpu);
struct sched_domain *sd;
+ struct task_struct *p = NULL;
raw_spin_lock_irq(&busiest_rq->lock);
@@ -6933,9 +7033,6 @@ static int active_load_balance_cpu_stop(void *data)
*/
BUG_ON(busiest_rq == target_rq);
- /* move a task from busiest_rq to target_rq */
- double_lock_balance(busiest_rq, target_rq);
-
/* Search for an sd spanning us and the target CPU. */
rcu_read_lock();
for_each_domain(target_cpu, sd) {
@@ -6956,16 +7053,22 @@ static int active_load_balance_cpu_stop(void *data)
schedstat_inc(sd, alb_count);
- if (move_one_task(&env))
+ p = detach_one_task(&env);
+ if (p)
schedstat_inc(sd, alb_pushed);
else
schedstat_inc(sd, alb_failed);
}
rcu_read_unlock();
- double_unlock_balance(busiest_rq, target_rq);
out_unlock:
busiest_rq->active_balance = 0;
- raw_spin_unlock_irq(&busiest_rq->lock);
+ raw_spin_unlock(&busiest_rq->lock);
+
+ if (p)
+ attach_one_task(target_rq, p);
+
+ local_irq_enable();
+
return 0;
}
@@ -7465,7 +7568,7 @@ static void task_fork_fair(struct task_struct *p)
static void
prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
{
- if (!p->se.on_rq)
+ if (!task_on_rq_queued(p))
return;
/*
@@ -7490,11 +7593,11 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
* switched back to the fair class the enqueue_entity(.flags=0) will
* do the right thing.
*
- * If it's on_rq, then the dequeue_entity(.flags=0) will already
- * have normalized the vruntime, if it's !on_rq, then only when
+ * If it's queued, then the dequeue_entity(.flags=0) will already
+ * have normalized the vruntime, if it's !queued, then only when
* the task is sleeping will it still have non-normalized vruntime.
*/
- if (!p->on_rq && p->state != TASK_RUNNING) {
+ if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
/*
* Fix up our vruntime so that the current sleep doesn't
* cause 'unlimited' sleep bonus.
@@ -7521,15 +7624,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
*/
static void switched_to_fair(struct rq *rq, struct task_struct *p)
{
- struct sched_entity *se = &p->se;
#ifdef CONFIG_FAIR_GROUP_SCHED
+ struct sched_entity *se = &p->se;
/*
* Since the real-depth could have been changed (only FAIR
* class maintain depth value), reset depth properly.
*/
se->depth = se->parent ? se->parent->depth + 1 : 0;
#endif
- if (!se->on_rq)
+ if (!task_on_rq_queued(p))
return;
/*
@@ -7575,7 +7678,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
}
#ifdef CONFIG_FAIR_GROUP_SCHED
-static void task_move_group_fair(struct task_struct *p, int on_rq)
+static void task_move_group_fair(struct task_struct *p, int queued)
{
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq;
@@ -7594,7 +7697,7 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
* fair sleeper stuff for the first placement, but who cares.
*/
/*
- * When !on_rq, vruntime of the task has usually NOT been normalized.
+ * When !queued, vruntime of the task has usually NOT been normalized.
* But there are some cases where it has already been normalized:
*
* - Moving a forked child which is waiting for being woken up by
@@ -7605,14 +7708,14 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
* To prevent boost or penalty in the new cfs_rq caused by delta
* min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
*/
- if (!on_rq && (!se->sum_exec_runtime || p->state == TASK_WAKING))
- on_rq = 1;
+ if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
+ queued = 1;
- if (!on_rq)
+ if (!queued)
se->vruntime -= cfs_rq_of(se)->min_vruntime;
set_task_rq(p, task_cpu(p));
se->depth = se->parent ? se->parent->depth + 1 : 0;
- if (!on_rq) {
+ if (!queued) {
cfs_rq = cfs_rq_of(se);
se->vruntime += cfs_rq->min_vruntime;
#ifdef CONFIG_SMP
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 11e7bc434f43..c47fce75e666 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -147,6 +147,9 @@ use_default:
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu))
goto use_default;
+ /* Take note of the planned idle state. */
+ idle_set_state(this_rq(), &drv->states[next_state]);
+
/*
* Enter the idle state previously returned by the governor decision.
* This function will block until an interrupt occurs and will take
@@ -154,6 +157,9 @@ use_default:
*/
entered_state = cpuidle_enter(drv, dev, next_state);
+ /* The cpu is no longer idle or about to enter idle. */
+ idle_set_state(this_rq(), NULL);
+
if (broadcast)
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 5f6edca4fafd..87ea5bf1b87f 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1448,7 +1448,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
* means a dl or stop task can slip in, in which case we need
* to re-start task selection.
*/
- if (unlikely((rq->stop && rq->stop->on_rq) ||
+ if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
rq->dl.dl_nr_running))
return RETRY_TASK;
}
@@ -1468,8 +1468,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
p = _pick_next_task_rt(rq);
/* The running task is never eligible for pushing */
- if (p)
- dequeue_pushable_task(rq, p);
+ dequeue_pushable_task(rq, p);
set_post_schedule(rq);
@@ -1624,7 +1623,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
!cpumask_test_cpu(lowest_rq->cpu,
tsk_cpus_allowed(task)) ||
task_running(rq, task) ||
- !task->on_rq)) {
+ !task_on_rq_queued(task))) {
double_unlock_balance(rq, lowest_rq);
lowest_rq = NULL;
@@ -1658,7 +1657,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
BUG_ON(task_current(rq, p));
BUG_ON(p->nr_cpus_allowed <= 1);
- BUG_ON(!p->on_rq);
+ BUG_ON(!task_on_rq_queued(p));
BUG_ON(!rt_task(p));
return p;
@@ -1809,7 +1808,7 @@ static int pull_rt_task(struct rq *this_rq)
*/
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
WARN_ON(p == src_rq->curr);
- WARN_ON(!p->on_rq);
+ WARN_ON(!task_on_rq_queued(p));
/*
* There's a chance that p is higher in priority
@@ -1870,7 +1869,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
BUG_ON(!rt_task(p));
- if (!p->on_rq)
+ if (!task_on_rq_queued(p))
return;
weight = cpumask_weight(new_mask);
@@ -1936,7 +1935,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
* we may need to handle the pulling of RT tasks
* now.
*/
- if (!p->on_rq || rq->rt.rt_nr_running)
+ if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
return;
if (pull_rt_task(rq))
@@ -1970,7 +1969,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
* If that current running task is also an RT task
* then see if we can move to another run queue.
*/
- if (p->on_rq && rq->curr != p) {
+ if (task_on_rq_queued(p) && rq->curr != p) {
#ifdef CONFIG_SMP
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
/* Don't resched if we changed runqueues */
@@ -1989,7 +1988,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
static void
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
{
- if (!p->on_rq)
+ if (!task_on_rq_queued(p))
return;
if (rq->curr == p) {
@@ -2073,7 +2072,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
for_each_sched_rt_entity(rt_se) {
if (rt_se->run_list.prev != rt_se->run_list.next) {
requeue_task_rt(rq, p, 0);
- set_tsk_need_resched(p);
+ resched_curr(rq);
return;
}
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 579712f4e9d5..6130251de280 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -14,6 +14,11 @@
#include "cpuacct.h"
struct rq;
+struct cpuidle_state;
+
+/* task_struct::on_rq states: */
+#define TASK_ON_RQ_QUEUED 1
+#define TASK_ON_RQ_MIGRATING 2
extern __read_mostly int scheduler_running;
@@ -126,6 +131,9 @@ struct rt_bandwidth {
u64 rt_runtime;
struct hrtimer rt_period_timer;
};
+
+void __dl_clear_params(struct task_struct *p);
+
/*
* To keep the bandwidth of -deadline tasks and groups under control
* we need some place where:
@@ -184,7 +192,7 @@ struct cfs_bandwidth {
raw_spinlock_t lock;
ktime_t period;
u64 quota, runtime;
- s64 hierarchal_quota;
+ s64 hierarchical_quota;
u64 runtime_expires;
int idle, timer_active;
@@ -636,6 +644,11 @@ struct rq {
#ifdef CONFIG_SMP
struct llist_head wake_list;
#endif
+
+#ifdef CONFIG_CPU_IDLE
+ /* Must be inspected within a rcu lock section */
+ struct cpuidle_state *idle_state;
+#endif
};
static inline int cpu_of(struct rq *rq)
@@ -647,7 +660,7 @@ static inline int cpu_of(struct rq *rq)
#endif
}
-DECLARE_PER_CPU(struct rq, runqueues);
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
#define this_rq() (&__get_cpu_var(runqueues))
@@ -942,6 +955,15 @@ static inline int task_running(struct rq *rq, struct task_struct *p)
#endif
}
+static inline int task_on_rq_queued(struct task_struct *p)
+{
+ return p->on_rq == TASK_ON_RQ_QUEUED;
+}
+
+static inline int task_on_rq_migrating(struct task_struct *p)
+{
+ return p->on_rq == TASK_ON_RQ_MIGRATING;
+}
#ifndef prepare_arch_switch
# define prepare_arch_switch(next) do { } while (0)
@@ -953,7 +975,6 @@ static inline int task_running(struct rq *rq, struct task_struct *p)
# define finish_arch_post_lock_switch() do { } while (0)
#endif
-#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{
#ifdef CONFIG_SMP
@@ -991,35 +1012,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
raw_spin_unlock_irq(&rq->lock);
}
-#else /* __ARCH_WANT_UNLOCKED_CTXSW */
-static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-{
-#ifdef CONFIG_SMP
- /*
- * We can optimise this out completely for !SMP, because the
- * SMP rebalancing from interrupt is the only thing that cares
- * here.
- */
- next->on_cpu = 1;
-#endif
- raw_spin_unlock(&rq->lock);
-}
-
-static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-{
-#ifdef CONFIG_SMP
- /*
- * After ->on_cpu is cleared, the task can be moved to a different CPU.
- * We must ensure this doesn't happen until the switch is completely
- * finished.
- */
- smp_wmb();
- prev->on_cpu = 0;
-#endif
- local_irq_enable();
-}
-#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
-
/*
* wake flags
*/
@@ -1180,6 +1172,30 @@ static inline void idle_exit_fair(struct rq *rq) { }
#endif
+#ifdef CONFIG_CPU_IDLE
+static inline void idle_set_state(struct rq *rq,
+ struct cpuidle_state *idle_state)
+{
+ rq->idle_state = idle_state;
+}
+
+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
+{
+ WARN_ON(!rcu_read_lock_held());
+ return rq->idle_state;
+}
+#else
+static inline void idle_set_state(struct rq *rq,
+ struct cpuidle_state *idle_state)
+{
+}
+
+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
+{
+ return NULL;
+}
+#endif
+
extern void sysrq_sched_debug_show(void);
extern void sched_init_granularity(void);
extern void update_max_interval(void);
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index bfe0edadbfbb..67426e529f59 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -28,7 +28,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev)
{
struct task_struct *stop = rq->stop;
- if (!stop || !stop->on_rq)
+ if (!stop || !task_on_rq_queued(stop))
return NULL;
put_prev_task(rq, prev);
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 84922befea84..4ef9687ac115 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -21,10 +21,11 @@
#include <linux/slab.h>
#include <linux/syscalls.h>
-/* #define SECCOMP_DEBUG 1 */
+#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
+#include <asm/syscall.h>
+#endif
#ifdef CONFIG_SECCOMP_FILTER
-#include <asm/syscall.h>
#include <linux/filter.h>
#include <linux/pid.h>
#include <linux/ptrace.h>
@@ -172,10 +173,10 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
*
* Returns valid seccomp BPF response codes.
*/
-static u32 seccomp_run_filters(int syscall)
+static u32 seccomp_run_filters(struct seccomp_data *sd)
{
struct seccomp_filter *f = ACCESS_ONCE(current->seccomp.filter);
- struct seccomp_data sd;
+ struct seccomp_data sd_local;
u32 ret = SECCOMP_RET_ALLOW;
/* Ensure unexpected behavior doesn't result in failing open. */
@@ -185,14 +186,17 @@ static u32 seccomp_run_filters(int syscall)
/* Make sure cross-thread synced filter points somewhere sane. */
smp_read_barrier_depends();
- populate_seccomp_data(&sd);
+ if (!sd) {
+ populate_seccomp_data(&sd_local);
+ sd = &sd_local;
+ }
/*
* All filters in the list are evaluated and the lowest BPF return
* value always takes priority (ignoring the DATA).
*/
for (; f; f = f->prev) {
- u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)&sd);
+ u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)sd);
if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
ret = cur_ret;
@@ -563,11 +567,55 @@ static int mode1_syscalls_32[] = {
};
#endif
-int __secure_computing(int this_syscall)
+static void __secure_computing_strict(int this_syscall)
+{
+ int *syscall_whitelist = mode1_syscalls;
+#ifdef CONFIG_COMPAT
+ if (is_compat_task())
+ syscall_whitelist = mode1_syscalls_32;
+#endif
+ do {
+ if (*syscall_whitelist == this_syscall)
+ return;
+ } while (*++syscall_whitelist);
+
+#ifdef SECCOMP_DEBUG
+ dump_stack();
+#endif
+ audit_seccomp(this_syscall, SIGKILL, SECCOMP_RET_KILL);
+ do_exit(SIGKILL);
+}
+
+#ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
+void secure_computing_strict(int this_syscall)
+{
+ int mode = current->seccomp.mode;
+
+ if (mode == 0)
+ return;
+ else if (mode == SECCOMP_MODE_STRICT)
+ __secure_computing_strict(this_syscall);
+ else
+ BUG();
+}
+#else
+int __secure_computing(void)
{
- int exit_sig = 0;
- int *syscall;
- u32 ret;
+ u32 phase1_result = seccomp_phase1(NULL);
+
+ if (likely(phase1_result == SECCOMP_PHASE1_OK))
+ return 0;
+ else if (likely(phase1_result == SECCOMP_PHASE1_SKIP))
+ return -1;
+ else
+ return seccomp_phase2(phase1_result);
+}
+
+#ifdef CONFIG_SECCOMP_FILTER
+static u32 __seccomp_phase1_filter(int this_syscall, struct seccomp_data *sd)
+{
+ u32 filter_ret, action;
+ int data;
/*
* Make sure that any changes to mode from another thread have
@@ -575,85 +623,127 @@ int __secure_computing(int this_syscall)
*/
rmb();
- switch (current->seccomp.mode) {
- case SECCOMP_MODE_STRICT:
- syscall = mode1_syscalls;
-#ifdef CONFIG_COMPAT
- if (is_compat_task())
- syscall = mode1_syscalls_32;
+ filter_ret = seccomp_run_filters(sd);
+ data = filter_ret & SECCOMP_RET_DATA;
+ action = filter_ret & SECCOMP_RET_ACTION;
+
+ switch (action) {
+ case SECCOMP_RET_ERRNO:
+ /* Set the low-order 16-bits as a errno. */
+ syscall_set_return_value(current, task_pt_regs(current),
+ -data, 0);
+ goto skip;
+
+ case SECCOMP_RET_TRAP:
+ /* Show the handler the original registers. */
+ syscall_rollback(current, task_pt_regs(current));
+ /* Let the filter pass back 16 bits of data. */
+ seccomp_send_sigsys(this_syscall, data);
+ goto skip;
+
+ case SECCOMP_RET_TRACE:
+ return filter_ret; /* Save the rest for phase 2. */
+
+ case SECCOMP_RET_ALLOW:
+ return SECCOMP_PHASE1_OK;
+
+ case SECCOMP_RET_KILL:
+ default:
+ audit_seccomp(this_syscall, SIGSYS, action);
+ do_exit(SIGSYS);
+ }
+
+ unreachable();
+
+skip:
+ audit_seccomp(this_syscall, 0, action);
+ return SECCOMP_PHASE1_SKIP;
+}
#endif
- do {
- if (*syscall == this_syscall)
- return 0;
- } while (*++syscall);
- exit_sig = SIGKILL;
- ret = SECCOMP_RET_KILL;
- break;
+
+/**
+ * seccomp_phase1() - run fast path seccomp checks on the current syscall
+ * @arg sd: The seccomp_data or NULL
+ *
+ * This only reads pt_regs via the syscall_xyz helpers. The only change
+ * it will make to pt_regs is via syscall_set_return_value, and it will
+ * only do that if it returns SECCOMP_PHASE1_SKIP.
+ *
+ * If sd is provided, it will not read pt_regs at all.
+ *
+ * It may also call do_exit or force a signal; these actions must be
+ * safe.
+ *
+ * If it returns SECCOMP_PHASE1_OK, the syscall passes checks and should
+ * be processed normally.
+ *
+ * If it returns SECCOMP_PHASE1_SKIP, then the syscall should not be
+ * invoked. In this case, seccomp_phase1 will have set the return value
+ * using syscall_set_return_value.
+ *
+ * If it returns anything else, then the return value should be passed
+ * to seccomp_phase2 from a context in which ptrace hooks are safe.
+ */
+u32 seccomp_phase1(struct seccomp_data *sd)
+{
+ int mode = current->seccomp.mode;
+ int this_syscall = sd ? sd->nr :
+ syscall_get_nr(current, task_pt_regs(current));
+
+ switch (mode) {
+ case SECCOMP_MODE_STRICT:
+ __secure_computing_strict(this_syscall); /* may call do_exit */
+ return SECCOMP_PHASE1_OK;
#ifdef CONFIG_SECCOMP_FILTER
- case SECCOMP_MODE_FILTER: {
- int data;
- struct pt_regs *regs = task_pt_regs(current);
- ret = seccomp_run_filters(this_syscall);
- data = ret & SECCOMP_RET_DATA;
- ret &= SECCOMP_RET_ACTION;
- switch (ret) {
- case SECCOMP_RET_ERRNO:
- /* Set the low-order 16-bits as a errno. */
- syscall_set_return_value(current, regs,
- -data, 0);
- goto skip;
- case SECCOMP_RET_TRAP:
- /* Show the handler the original registers. */
- syscall_rollback(current, regs);
- /* Let the filter pass back 16 bits of data. */
- seccomp_send_sigsys(this_syscall, data);
- goto skip;
- case SECCOMP_RET_TRACE:
- /* Skip these calls if there is no tracer. */
- if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
- syscall_set_return_value(current, regs,
- -ENOSYS, 0);
- goto skip;
- }
- /* Allow the BPF to provide the event message */
- ptrace_event(PTRACE_EVENT_SECCOMP, data);
- /*
- * The delivery of a fatal signal during event
- * notification may silently skip tracer notification.
- * Terminating the task now avoids executing a system
- * call that may not be intended.
- */
- if (fatal_signal_pending(current))
- break;
- if (syscall_get_nr(current, regs) < 0)
- goto skip; /* Explicit request to skip. */
-
- return 0;
- case SECCOMP_RET_ALLOW:
- return 0;
- case SECCOMP_RET_KILL:
- default:
- break;
- }
- exit_sig = SIGSYS;
- break;
- }
+ case SECCOMP_MODE_FILTER:
+ return __seccomp_phase1_filter(this_syscall, sd);
#endif
default:
BUG();
}
+}
-#ifdef SECCOMP_DEBUG
- dump_stack();
-#endif
- audit_seccomp(this_syscall, exit_sig, ret);
- do_exit(exit_sig);
-#ifdef CONFIG_SECCOMP_FILTER
-skip:
- audit_seccomp(this_syscall, exit_sig, ret);
-#endif
- return -1;
+/**
+ * seccomp_phase2() - finish slow path seccomp work for the current syscall
+ * @phase1_result: The return value from seccomp_phase1()
+ *
+ * This must be called from a context in which ptrace hooks can be used.
+ *
+ * Returns 0 if the syscall should be processed or -1 to skip the syscall.
+ */
+int seccomp_phase2(u32 phase1_result)
+{
+ struct pt_regs *regs = task_pt_regs(current);
+ u32 action = phase1_result & SECCOMP_RET_ACTION;
+ int data = phase1_result & SECCOMP_RET_DATA;
+
+ BUG_ON(action != SECCOMP_RET_TRACE);
+
+ audit_seccomp(syscall_get_nr(current, regs), 0, action);
+
+ /* Skip these calls if there is no tracer. */
+ if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
+ syscall_set_return_value(current, regs,
+ -ENOSYS, 0);
+ return -1;
+ }
+
+ /* Allow the BPF to provide the event message */
+ ptrace_event(PTRACE_EVENT_SECCOMP, data);
+ /*
+ * The delivery of a fatal signal during event
+ * notification may silently skip tracer notification.
+ * Terminating the task now avoids executing a system
+ * call that may not be intended.
+ */
+ if (fatal_signal_pending(current))
+ do_exit(SIGSYS);
+ if (syscall_get_nr(current, regs) < 0)
+ return -1; /* Explicit request to skip. */
+
+ return 0;
}
+#endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
long prctl_get_seccomp(void)
{
diff --git a/kernel/smp.c b/kernel/smp.c
index aff8aa14f547..9e0d0b289118 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -13,6 +13,7 @@
#include <linux/gfp.h>
#include <linux/smp.h>
#include <linux/cpu.h>
+#include <linux/sched.h>
#include "smpboot.h"
@@ -699,3 +700,24 @@ void kick_all_cpus_sync(void)
smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
+
+/**
+ * wake_up_all_idle_cpus - break all cpus out of idle
+ * wake_up_all_idle_cpus try to break all cpus which is in idle state even
+ * including idle polling cpus, for non-idle cpus, we will do nothing
+ * for them.
+ */
+void wake_up_all_idle_cpus(void)
+{
+ int cpu;
+
+ preempt_disable();
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+
+ wake_up_if_idle(cpu);
+ }
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 5918d227730f..348ec763b104 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -278,7 +278,7 @@ restart:
pending >>= softirq_bit;
}
- rcu_bh_qs(smp_processor_id());
+ rcu_bh_qs();
local_irq_disable();
pending = local_softirq_pending();
diff --git a/kernel/sys.c b/kernel/sys.c
index dfce4debd138..1eaa2f0b0246 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -869,11 +869,9 @@ void do_sys_times(struct tms *tms)
{
cputime_t tgutime, tgstime, cutime, cstime;
- spin_lock_irq(&current->sighand->siglock);
thread_group_cputime_adjusted(current, &tgutime, &tgstime);
cutime = current->signal->cutime;
cstime = current->signal->cstime;
- spin_unlock_irq(&current->sighand->siglock);
tms->tms_utime = cputime_to_clock_t(tgutime);
tms->tms_stime = cputime_to_clock_t(tgstime);
tms->tms_cutime = cputime_to_clock_t(cutime);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 91180987e40e..4aada6d9fe74 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1055,15 +1055,6 @@ static struct ctl_table kern_table[] = {
.child = key_sysctls,
},
#endif
-#ifdef CONFIG_RCU_TORTURE_TEST
- {
- .procname = "rcutorture_runnable",
- .data = &rcutorture_runnable,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
-#endif
#ifdef CONFIG_PERF_EVENTS
/*
* User-space scripts rely on the existence of this file
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 1c2fe7de2842..ab370ffffd53 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1776,7 +1776,6 @@ schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
*/
if (!expires) {
schedule();
- __set_current_state(TASK_RUNNING);
return -EINTR;
}
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 3b8946416a5f..492b986195d5 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -272,22 +272,8 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk,
if (same_thread_group(tsk, current))
err = cpu_clock_sample(which_clock, tsk, &rtn);
} else {
- unsigned long flags;
- struct sighand_struct *sighand;
-
- /*
- * while_each_thread() is not yet entirely RCU safe,
- * keep locking the group while sampling process
- * clock for now.
- */
- sighand = lock_task_sighand(tsk, &flags);
- if (!sighand)
- return err;
-
if (tsk == current || thread_group_leader(tsk))
err = cpu_clock_sample_group(which_clock, tsk, &rtn);
-
- unlock_task_sighand(tsk, &flags);
}
if (!err)
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 7c1412ea2d29..a73efdf6f696 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -586,7 +586,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
} while (read_seqretry(&jiffies_lock, seq));
if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
- arch_needs_cpu(cpu) || irq_work_needs_cpu()) {
+ arch_needs_cpu() || irq_work_needs_cpu()) {
next_jiffies = last_jiffies + 1;
delta_jiffies = 1;
} else {
diff --git a/kernel/torture.c b/kernel/torture.c
index d600af21f022..dd70993c266c 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -211,18 +211,16 @@ EXPORT_SYMBOL_GPL(torture_onoff_cleanup);
/*
* Print online/offline testing statistics.
*/
-char *torture_onoff_stats(char *page)
+void torture_onoff_stats(void)
{
#ifdef CONFIG_HOTPLUG_CPU
- page += sprintf(page,
- "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
- n_online_successes, n_online_attempts,
- n_offline_successes, n_offline_attempts,
- min_online, max_online,
- min_offline, max_offline,
- sum_online, sum_offline, HZ);
+ pr_cont("onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
+ n_online_successes, n_online_attempts,
+ n_offline_successes, n_offline_attempts,
+ min_online, max_online,
+ min_offline, max_offline,
+ sum_online, sum_offline, HZ);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
- return page;
}
EXPORT_SYMBOL_GPL(torture_onoff_stats);
@@ -635,8 +633,13 @@ EXPORT_SYMBOL_GPL(torture_init_end);
*
* This must be called before the caller starts shutting down its own
* kthreads.
+ *
+ * Both torture_cleanup_begin() and torture_cleanup_end() must be paired,
+ * in order to correctly perform the cleanup. They are separated because
+ * threads can still need to reference the torture_type type, thus nullify
+ * only after completing all other relevant calls.
*/
-bool torture_cleanup(void)
+bool torture_cleanup_begin(void)
{
mutex_lock(&fullstop_mutex);
if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
@@ -651,12 +654,17 @@ bool torture_cleanup(void)
torture_shuffle_cleanup();
torture_stutter_cleanup();
torture_onoff_cleanup();
+ return false;
+}
+EXPORT_SYMBOL_GPL(torture_cleanup_begin);
+
+void torture_cleanup_end(void)
+{
mutex_lock(&fullstop_mutex);
torture_type = NULL;
mutex_unlock(&fullstop_mutex);
- return false;
}
-EXPORT_SYMBOL_GPL(torture_cleanup);
+EXPORT_SYMBOL_GPL(torture_cleanup_end);
/*
* Is it time for the current torture test to stop?
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 5916a8e59e87..fb186b9ddf51 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -113,6 +113,9 @@ ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops;
static struct ftrace_ops control_ops;
+static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs);
+
#if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs);
@@ -251,18 +254,24 @@ static void update_ftrace_function(void)
ftrace_func_t func;
/*
+ * Prepare the ftrace_ops that the arch callback will use.
+ * If there's only one ftrace_ops registered, the ftrace_ops_list
+ * will point to the ops we want.
+ */
+ set_function_trace_op = ftrace_ops_list;
+
+ /* If there's no ftrace_ops registered, just call the stub function */
+ if (ftrace_ops_list == &ftrace_list_end) {
+ func = ftrace_stub;
+
+ /*
* If we are at the end of the list and this ops is
* recursion safe and not dynamic and the arch supports passing ops,
* then have the mcount trampoline call the function directly.
*/
- if (ftrace_ops_list == &ftrace_list_end ||
- (ftrace_ops_list->next == &ftrace_list_end &&
- !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
- (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
- !FTRACE_FORCE_LIST_FUNC)) {
- /* Set the ftrace_ops that the arch callback uses */
- set_function_trace_op = ftrace_ops_list;
- func = ftrace_ops_list->func;
+ } else if (ftrace_ops_list->next == &ftrace_list_end) {
+ func = ftrace_ops_get_func(ftrace_ops_list);
+
} else {
/* Just use the default ftrace_ops */
set_function_trace_op = &ftrace_list_end;
@@ -1048,6 +1057,12 @@ static struct pid * const ftrace_swapper_pid = &init_struct_pid;
static struct ftrace_ops *removed_ops;
+/*
+ * Set when doing a global update, like enabling all recs or disabling them.
+ * It is not set when just updating a single ftrace_ops.
+ */
+static bool update_all_ops;
+
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
# error Dynamic ftrace depends on MCOUNT_RECORD
#endif
@@ -1307,7 +1322,6 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
struct ftrace_func_entry *entry;
struct hlist_node *tn;
struct hlist_head *hhd;
- struct ftrace_hash *old_hash;
struct ftrace_hash *new_hash;
int size = src->count;
int bits = 0;
@@ -1352,15 +1366,28 @@ update:
*/
ftrace_hash_rec_disable_modify(ops, enable);
- old_hash = *dst;
rcu_assign_pointer(*dst, new_hash);
- free_ftrace_hash_rcu(old_hash);
ftrace_hash_rec_enable_modify(ops, enable);
return 0;
}
+static bool hash_contains_ip(unsigned long ip,
+ struct ftrace_ops_hash *hash)
+{
+ /*
+ * The function record is a match if it exists in the filter
+ * hash and not in the notrace hash. Note, an emty hash is
+ * considered a match for the filter hash, but an empty
+ * notrace hash is considered not in the notrace hash.
+ */
+ return (ftrace_hash_empty(hash->filter_hash) ||
+ ftrace_lookup_ip(hash->filter_hash, ip)) &&
+ (ftrace_hash_empty(hash->notrace_hash) ||
+ !ftrace_lookup_ip(hash->notrace_hash, ip));
+}
+
/*
* Test the hashes for this ops to see if we want to call
* the ops->func or not.
@@ -1376,8 +1403,7 @@ update:
static int
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
{
- struct ftrace_hash *filter_hash;
- struct ftrace_hash *notrace_hash;
+ struct ftrace_ops_hash hash;
int ret;
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
@@ -1390,13 +1416,10 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
return 0;
#endif
- filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
- notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
+ hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
+ hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
- if ((ftrace_hash_empty(filter_hash) ||
- ftrace_lookup_ip(filter_hash, ip)) &&
- (ftrace_hash_empty(notrace_hash) ||
- !ftrace_lookup_ip(notrace_hash, ip)))
+ if (hash_contains_ip(ip, &hash))
ret = 1;
else
ret = 0;
@@ -1508,46 +1531,6 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
return keep_regs;
}
-static void ftrace_remove_tramp(struct ftrace_ops *ops,
- struct dyn_ftrace *rec)
-{
- /* If TRAMP is not set, no ops should have a trampoline for this */
- if (!(rec->flags & FTRACE_FL_TRAMP))
- return;
-
- rec->flags &= ~FTRACE_FL_TRAMP;
-
- if ((!ftrace_hash_empty(ops->func_hash->filter_hash) &&
- !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) ||
- ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
- return;
- /*
- * The tramp_hash entry will be removed at time
- * of update.
- */
- ops->nr_trampolines--;
-}
-
-static void ftrace_clear_tramps(struct dyn_ftrace *rec, struct ftrace_ops *ops)
-{
- struct ftrace_ops *op;
-
- /* If TRAMP is not set, no ops should have a trampoline for this */
- if (!(rec->flags & FTRACE_FL_TRAMP))
- return;
-
- do_for_each_ftrace_op(op, ftrace_ops_list) {
- /*
- * This function is called to clear other tramps
- * not the one that is being updated.
- */
- if (op == ops)
- continue;
- if (op->nr_trampolines)
- ftrace_remove_tramp(op, rec);
- } while_for_each_ftrace_op(op);
-}
-
static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
int filter_hash,
bool inc)
@@ -1636,18 +1619,16 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
* function, and the ops has a trampoline registered
* for it, then we can call it directly.
*/
- if (ftrace_rec_count(rec) == 1 && ops->trampoline) {
+ if (ftrace_rec_count(rec) == 1 && ops->trampoline)
rec->flags |= FTRACE_FL_TRAMP;
- ops->nr_trampolines++;
- } else {
+ else
/*
* If we are adding another function callback
* to this function, and the previous had a
* custom trampoline in use, then we need to go
* back to the default trampoline.
*/
- ftrace_clear_tramps(rec, ops);
- }
+ rec->flags &= ~FTRACE_FL_TRAMP;
/*
* If any ops wants regs saved for this function
@@ -1660,9 +1641,6 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
return;
rec->flags--;
- if (ops->trampoline && !ftrace_rec_count(rec))
- ftrace_remove_tramp(ops, rec);
-
/*
* If the rec had REGS enabled and the ops that is
* being removed had REGS set, then see if there is
@@ -1677,6 +1655,17 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
}
/*
+ * If the rec had TRAMP enabled, then it needs to
+ * be cleared. As TRAMP can only be enabled iff
+ * there is only a single ops attached to it.
+ * In otherwords, always disable it on decrementing.
+ * In the future, we may set it if rec count is
+ * decremented to one, and the ops that is left
+ * has a trampoline.
+ */
+ rec->flags &= ~FTRACE_FL_TRAMP;
+
+ /*
* flags will be cleared in ftrace_check_record()
* if rec count is zero.
*/
@@ -1895,21 +1884,72 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable)
}
static struct ftrace_ops *
+ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
+{
+ struct ftrace_ops *op;
+ unsigned long ip = rec->ip;
+
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
+
+ if (!op->trampoline)
+ continue;
+
+ if (hash_contains_ip(ip, op->func_hash))
+ return op;
+ } while_for_each_ftrace_op(op);
+
+ return NULL;
+}
+
+static struct ftrace_ops *
ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
{
struct ftrace_ops *op;
+ unsigned long ip = rec->ip;
- /* Removed ops need to be tested first */
- if (removed_ops && removed_ops->tramp_hash) {
- if (ftrace_lookup_ip(removed_ops->tramp_hash, rec->ip))
+ /*
+ * Need to check removed ops first.
+ * If they are being removed, and this rec has a tramp,
+ * and this rec is in the ops list, then it would be the
+ * one with the tramp.
+ */
+ if (removed_ops) {
+ if (hash_contains_ip(ip, &removed_ops->old_hash))
return removed_ops;
}
+ /*
+ * Need to find the current trampoline for a rec.
+ * Now, a trampoline is only attached to a rec if there
+ * was a single 'ops' attached to it. But this can be called
+ * when we are adding another op to the rec or removing the
+ * current one. Thus, if the op is being added, we can
+ * ignore it because it hasn't attached itself to the rec
+ * yet. That means we just need to find the op that has a
+ * trampoline and is not beeing added.
+ */
do_for_each_ftrace_op(op, ftrace_ops_list) {
- if (!op->tramp_hash)
+
+ if (!op->trampoline)
continue;
- if (ftrace_lookup_ip(op->tramp_hash, rec->ip))
+ /*
+ * If the ops is being added, it hasn't gotten to
+ * the point to be removed from this tree yet.
+ */
+ if (op->flags & FTRACE_OPS_FL_ADDING)
+ continue;
+
+ /*
+ * If the ops is not being added and has a trampoline,
+ * then it must be the one that we want!
+ */
+ if (hash_contains_ip(ip, op->func_hash))
+ return op;
+
+ /* If the ops is being modified, it may be in the old hash. */
+ if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
+ hash_contains_ip(ip, &op->old_hash))
return op;
} while_for_each_ftrace_op(op);
@@ -1921,10 +1961,11 @@ static struct ftrace_ops *
ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
{
struct ftrace_ops *op;
+ unsigned long ip = rec->ip;
do_for_each_ftrace_op(op, ftrace_ops_list) {
/* pass rec in as regs to have non-NULL val */
- if (ftrace_ops_test(op, rec->ip, rec))
+ if (hash_contains_ip(ip, op->func_hash))
return op;
} while_for_each_ftrace_op(op);
@@ -2231,92 +2272,6 @@ void __weak arch_ftrace_update_code(int command)
ftrace_run_stop_machine(command);
}
-static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops)
-{
- struct ftrace_page *pg;
- struct dyn_ftrace *rec;
- int size, bits;
- int ret;
-
- size = ops->nr_trampolines;
- bits = 0;
- /*
- * Make the hash size about 1/2 the # found
- */
- for (size /= 2; size; size >>= 1)
- bits++;
-
- ops->tramp_hash = alloc_ftrace_hash(bits);
- /*
- * TODO: a failed allocation is going to screw up
- * the accounting of what needs to be modified
- * and not. For now, we kill ftrace if we fail
- * to allocate here. But there are ways around this,
- * but that will take a little more work.
- */
- if (!ops->tramp_hash)
- return -ENOMEM;
-
- do_for_each_ftrace_rec(pg, rec) {
- if (ftrace_rec_count(rec) == 1 &&
- ftrace_ops_test(ops, rec->ip, rec)) {
-
- /*
- * If another ops adds to a rec, the rec will
- * lose its trampoline and never get it back
- * until all ops are off of it.
- */
- if (!(rec->flags & FTRACE_FL_TRAMP))
- continue;
-
- /* This record had better have a trampoline */
- if (FTRACE_WARN_ON(!(rec->flags & FTRACE_FL_TRAMP_EN)))
- return -1;
-
- ret = add_hash_entry(ops->tramp_hash, rec->ip);
- if (ret < 0)
- return ret;
- }
- } while_for_each_ftrace_rec();
-
- /* The number of recs in the hash must match nr_trampolines */
- if (FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines))
- pr_warn("count=%ld trampolines=%d\n",
- ops->tramp_hash->count,
- ops->nr_trampolines);
-
- return 0;
-}
-
-static int ftrace_save_tramp_hashes(void)
-{
- struct ftrace_ops *op;
- int ret;
-
- /*
- * Now that any trampoline is being used, we need to save the
- * hashes for the ops that have them. This allows the mapping
- * back from the record to the ops that has the trampoline to
- * know what code is being replaced. Modifying code must always
- * verify what it is changing.
- */
- do_for_each_ftrace_op(op, ftrace_ops_list) {
-
- /* The tramp_hash is recreated each time. */
- free_ftrace_hash(op->tramp_hash);
- op->tramp_hash = NULL;
-
- if (op->nr_trampolines) {
- ret = ftrace_save_ops_tramp_hash(op);
- if (ret)
- return ret;
- }
-
- } while_for_each_ftrace_op(op);
-
- return 0;
-}
-
static void ftrace_run_update_code(int command)
{
int ret;
@@ -2336,9 +2291,13 @@ static void ftrace_run_update_code(int command)
ret = ftrace_arch_code_modify_post_process();
FTRACE_WARN_ON(ret);
+}
- ret = ftrace_save_tramp_hashes();
- FTRACE_WARN_ON(ret);
+static void ftrace_run_modify_code(struct ftrace_ops *ops, int command)
+{
+ ops->flags |= FTRACE_OPS_FL_MODIFYING;
+ ftrace_run_update_code(command);
+ ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
}
static ftrace_func_t saved_ftrace_func;
@@ -2362,6 +2321,13 @@ static void ftrace_startup_enable(int command)
ftrace_run_update_code(command);
}
+static void ftrace_startup_all(int command)
+{
+ update_all_ops = true;
+ ftrace_startup_enable(command);
+ update_all_ops = false;
+}
+
static int ftrace_startup(struct ftrace_ops *ops, int command)
{
int ret;
@@ -2376,12 +2342,22 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
ftrace_start_up++;
command |= FTRACE_UPDATE_CALLS;
- ops->flags |= FTRACE_OPS_FL_ENABLED;
+ /*
+ * Note that ftrace probes uses this to start up
+ * and modify functions it will probe. But we still
+ * set the ADDING flag for modification, as probes
+ * do not have trampolines. If they add them in the
+ * future, then the probes will need to distinguish
+ * between adding and updating probes.
+ */
+ ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
ftrace_hash_rec_enable(ops, 1);
ftrace_startup_enable(command);
+ ops->flags &= ~FTRACE_OPS_FL_ADDING;
+
return 0;
}
@@ -2431,11 +2407,35 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
* If the ops uses a trampoline, then it needs to be
* tested first on update.
*/
+ ops->flags |= FTRACE_OPS_FL_REMOVING;
removed_ops = ops;
+ /* The trampoline logic checks the old hashes */
+ ops->old_hash.filter_hash = ops->func_hash->filter_hash;
+ ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
+
ftrace_run_update_code(command);
+ /*
+ * If there's no more ops registered with ftrace, run a
+ * sanity check to make sure all rec flags are cleared.
+ */
+ if (ftrace_ops_list == &ftrace_list_end) {
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
+
+ do_for_each_ftrace_rec(pg, rec) {
+ if (FTRACE_WARN_ON_ONCE(rec->flags))
+ pr_warn(" %pS flags:%lx\n",
+ (void *)rec->ip, rec->flags);
+ } while_for_each_ftrace_rec();
+ }
+
+ ops->old_hash.filter_hash = NULL;
+ ops->old_hash.notrace_hash = NULL;
+
removed_ops = NULL;
+ ops->flags &= ~FTRACE_OPS_FL_REMOVING;
/*
* Dynamic ops may be freed, we must make sure that all
@@ -2960,8 +2960,8 @@ static int t_show(struct seq_file *m, void *v)
if (rec->flags & FTRACE_FL_TRAMP_EN) {
struct ftrace_ops *ops;
- ops = ftrace_find_tramp_ops_curr(rec);
- if (ops && ops->trampoline)
+ ops = ftrace_find_tramp_ops_any(rec);
+ if (ops)
seq_printf(m, "\ttramp: %pS",
(void *)ops->trampoline);
else
@@ -3348,7 +3348,7 @@ static void __enable_ftrace_function_probe(void)
if (ftrace_probe_registered) {
/* still need to update the function call sites */
if (ftrace_enabled)
- ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS);
return;
}
@@ -3399,6 +3399,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
{
struct ftrace_func_probe *entry;
struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
+ struct ftrace_hash *old_hash = *orig_hash;
struct ftrace_hash *hash;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
@@ -3417,7 +3418,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
mutex_lock(&trace_probe_ops.func_hash->regex_lock);
- hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+ hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
if (!hash) {
count = -ENOMEM;
goto out;
@@ -3476,7 +3477,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
} while_for_each_ftrace_rec();
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
- if (ret < 0)
+ if (!ret)
+ free_ftrace_hash_rcu(old_hash);
+ else
count = ret;
__enable_ftrace_function_probe();
@@ -3503,6 +3506,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
struct ftrace_func_probe *entry;
struct ftrace_func_probe *p;
struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
+ struct ftrace_hash *old_hash = *orig_hash;
struct list_head free_list;
struct ftrace_hash *hash;
struct hlist_node *tmp;
@@ -3510,6 +3514,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
int type = MATCH_FULL;
int i, len = 0;
char *search;
+ int ret;
if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
glob = NULL;
@@ -3568,8 +3573,11 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
* Remove after the disable is called. Otherwise, if the last
* probe is removed, a null hash means *all enabled*.
*/
- ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
+ ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
synchronize_sched();
+ if (!ret)
+ free_ftrace_hash_rcu(old_hash);
+
list_for_each_entry_safe(entry, p, &free_list, free_list) {
list_del(&entry->free_list);
ftrace_free_entry(entry);
@@ -3759,7 +3767,7 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
static void ftrace_ops_update_code(struct ftrace_ops *ops)
{
if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
- ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS);
}
static int
@@ -3767,6 +3775,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
unsigned long ip, int remove, int reset, int enable)
{
struct ftrace_hash **orig_hash;
+ struct ftrace_hash *old_hash;
struct ftrace_hash *hash;
int ret;
@@ -3801,10 +3810,12 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
}
mutex_lock(&ftrace_lock);
+ old_hash = *orig_hash;
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
- if (!ret)
+ if (!ret) {
ftrace_ops_update_code(ops);
-
+ free_ftrace_hash_rcu(old_hash);
+ }
mutex_unlock(&ftrace_lock);
out_regex_unlock:
@@ -4013,6 +4024,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
struct seq_file *m = (struct seq_file *)file->private_data;
struct ftrace_iterator *iter;
struct ftrace_hash **orig_hash;
+ struct ftrace_hash *old_hash;
struct trace_parser *parser;
int filter_hash;
int ret;
@@ -4042,11 +4054,13 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
orig_hash = &iter->ops->func_hash->notrace_hash;
mutex_lock(&ftrace_lock);
+ old_hash = *orig_hash;
ret = ftrace_hash_move(iter->ops, filter_hash,
orig_hash, iter->hash);
- if (!ret)
+ if (!ret) {
ftrace_ops_update_code(iter->ops);
-
+ free_ftrace_hash_rcu(old_hash);
+ }
mutex_unlock(&ftrace_lock);
}
@@ -4678,6 +4692,7 @@ core_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
+static inline void ftrace_startup_all(int command) { }
/* Keep as macros so we do not need to define the commands */
# define ftrace_startup(ops, command) \
({ \
@@ -4827,6 +4842,56 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
}
#endif
+/*
+ * If there's only one function registered but it does not support
+ * recursion, this function will be called by the mcount trampoline.
+ * This function will handle recursion protection.
+ */
+static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs)
+{
+ int bit;
+
+ bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
+ if (bit < 0)
+ return;
+
+ op->func(ip, parent_ip, op, regs);
+
+ trace_clear_recursion(bit);
+}
+
+/**
+ * ftrace_ops_get_func - get the function a trampoline should call
+ * @ops: the ops to get the function for
+ *
+ * Normally the mcount trampoline will call the ops->func, but there
+ * are times that it should not. For example, if the ops does not
+ * have its own recursion protection, then it should call the
+ * ftrace_ops_recurs_func() instead.
+ *
+ * Returns the function that the trampoline should call for @ops.
+ */
+ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
+{
+ /*
+ * If this is a dynamic ops or we force list func,
+ * then it needs to call the list anyway.
+ */
+ if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
+ return ftrace_ops_list_func;
+
+ /*
+ * If the func handles its own recursion, call it directly.
+ * Otherwise call the recursion protected function that
+ * will call the ftrace ops function.
+ */
+ if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE))
+ return ftrace_ops_recurs_func;
+
+ return ops->func;
+}
+
static void clear_ftrace_swapper(void)
{
struct task_struct *p;
@@ -4927,7 +4992,8 @@ static int ftrace_pid_add(int p)
set_ftrace_pid_task(pid);
ftrace_update_pid_func();
- ftrace_startup_enable(0);
+
+ ftrace_startup_all(0);
mutex_unlock(&ftrace_lock);
return 0;
@@ -4956,7 +5022,7 @@ static void ftrace_pid_reset(void)
}
ftrace_update_pid_func();
- ftrace_startup_enable(0);
+ ftrace_startup_all(0);
mutex_unlock(&ftrace_lock);
}
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index 0434ff1b808e..3f9e328c30b5 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -205,7 +205,6 @@ static void ring_buffer_consumer(void)
break;
schedule();
- __set_current_state(TASK_RUNNING);
}
reader_finish = 0;
complete(&read_done);
@@ -379,7 +378,6 @@ static int ring_buffer_consumer_thread(void *arg)
break;
schedule();
- __set_current_state(TASK_RUNNING);
}
__set_current_state(TASK_RUNNING);
@@ -407,7 +405,6 @@ static int ring_buffer_producer_thread(void *arg)
trace_printk("Sleeping for 10 secs\n");
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ * SLEEP_TIME);
- __set_current_state(TASK_RUNNING);
}
if (kill_test)
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index ef06ce7e9cf8..0cc51edde3a8 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2513,8 +2513,11 @@ static __init int event_test_thread(void *unused)
kfree(test_malloc);
set_current_state(TASK_INTERRUPTIBLE);
- while (!kthread_should_stop())
+ while (!kthread_should_stop()) {
schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ __set_current_state(TASK_RUNNING);
return 0;
}
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 5ef60499dc8e..b0f86ea77881 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -382,6 +382,8 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
/* check the trace buffer */
ret = trace_test_buffer(&tr->trace_buffer, &count);
+
+ ftrace_enabled = 1;
tracing_start();
/* we should only have one item */
@@ -679,6 +681,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
/* check the trace buffer */
ret = trace_test_buffer(&tr->trace_buffer, &count);
+
+ ftrace_enabled = 1;
trace->reset(tr);
tracing_start();
@@ -1025,6 +1029,12 @@ trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
#endif
#ifdef CONFIG_SCHED_TRACER
+
+struct wakeup_test_data {
+ struct completion is_ready;
+ int go;
+};
+
static int trace_wakeup_test_thread(void *data)
{
/* Make this a -deadline thread */
@@ -1034,51 +1044,56 @@ static int trace_wakeup_test_thread(void *data)
.sched_deadline = 10000000ULL,
.sched_period = 10000000ULL
};
- struct completion *x = data;
+ struct wakeup_test_data *x = data;
sched_setattr(current, &attr);
/* Make it know we have a new prio */
- complete(x);
+ complete(&x->is_ready);
/* now go to sleep and let the test wake us up */
set_current_state(TASK_INTERRUPTIBLE);
- schedule();
+ while (!x->go) {
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
- complete(x);
+ complete(&x->is_ready);
+
+ set_current_state(TASK_INTERRUPTIBLE);
/* we are awake, now wait to disappear */
while (!kthread_should_stop()) {
- /*
- * This will likely be the system top priority
- * task, do short sleeps to let others run.
- */
- msleep(100);
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
}
+ __set_current_state(TASK_RUNNING);
+
return 0;
}
-
int
trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
{
unsigned long save_max = tr->max_latency;
struct task_struct *p;
- struct completion is_ready;
+ struct wakeup_test_data data;
unsigned long count;
int ret;
- init_completion(&is_ready);
+ memset(&data, 0, sizeof(data));
+
+ init_completion(&data.is_ready);
/* create a -deadline thread */
- p = kthread_run(trace_wakeup_test_thread, &is_ready, "ftrace-test");
+ p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
if (IS_ERR(p)) {
printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
return -1;
}
/* make sure the thread is running at -deadline policy */
- wait_for_completion(&is_ready);
+ wait_for_completion(&data.is_ready);
/* start the tracing */
ret = tracer_init(trace, tr);
@@ -1099,18 +1114,20 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
msleep(100);
}
- init_completion(&is_ready);
+ init_completion(&data.is_ready);
+
+ data.go = 1;
+ /* memory barrier is in the wake_up_process() */
wake_up_process(p);
/* Wait for the task to wake up */
- wait_for_completion(&is_ready);
+ wait_for_completion(&data.is_ready);
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(&tr->trace_buffer, NULL);
- printk("ret = %d\n", ret);
if (!ret)
ret = trace_test_buffer(&tr->max_buffer, &count);
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 8a4e5cb66a4c..16eddb308c33 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -13,7 +13,6 @@
#include <linux/sysctl.h>
#include <linux/init.h>
#include <linux/fs.h>
-#include <linux/magic.h>
#include <asm/setup.h>
@@ -171,8 +170,7 @@ check_stack(unsigned long ip, unsigned long *stack)
i++;
}
- if ((current != &init_task &&
- *(end_of_stack(current)) != STACK_END_MAGIC)) {
+ if (task_stack_end_corrupted(current)) {
print_max_stack();
BUG();
}
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 759d5e004517..4dc8b79c5f75 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -425,7 +425,7 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
return;
mutex_lock(&syscall_trace_lock);
tr->sys_refcount_enter--;
- rcu_assign_pointer(tr->enter_syscall_files[num], NULL);
+ RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL);
if (!tr->sys_refcount_enter)
unregister_trace_sys_enter(ftrace_syscall_enter, tr);
mutex_unlock(&syscall_trace_lock);
@@ -463,7 +463,7 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
return;
mutex_lock(&syscall_trace_lock);
tr->sys_refcount_exit--;
- rcu_assign_pointer(tr->exit_syscall_files[num], NULL);
+ RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL);
if (!tr->sys_refcount_exit)
unregister_trace_sys_exit(ftrace_syscall_exit, tr);
mutex_unlock(&syscall_trace_lock);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 7b223b212683..ff7fd80bef99 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -15,11 +15,6 @@
#include <linux/cpu.h>
#include <linux/nmi.h>
#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/freezer.h>
-#include <linux/kthread.h>
-#include <linux/lockdep.h>
-#include <linux/notifier.h>
#include <linux/module.h>
#include <linux/sysctl.h>
#include <linux/smpboot.h>
@@ -530,7 +525,10 @@ static void watchdog_nmi_disable(unsigned int cpu)
/* should be in cleanup, but blocks oprofile */
perf_event_release_kernel(event);
}
- return;
+ if (cpu == 0) {
+ /* watchdog_nmi_enable() expects this to be zero initially. */
+ cpu0_err = 0;
+ }
}
#else
static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5dbe22aa3efd..09b685daee3d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2043,9 +2043,10 @@ __acquires(&pool->lock)
* kernels, where a requeueing work item waiting for something to
* happen could deadlock with stop_machine as such work item could
* indefinitely requeue itself while all other CPUs are trapped in
- * stop_machine.
+ * stop_machine. At the same time, report a quiescent RCU state so
+ * the same condition doesn't freeze RCU.
*/
- cond_resched();
+ cond_resched_rcu_qs();
spin_lock_irq(&pool->lock);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3ac43f34437b..e7ad58c5fbeb 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -824,6 +824,18 @@ config SCHEDSTATS
application, you can say N to avoid the very slight overhead
this adds.
+config SCHED_STACK_END_CHECK
+ bool "Detect stack corruption on calls to schedule()"
+ depends on DEBUG_KERNEL
+ default n
+ help
+ This option checks for a stack overrun on calls to schedule().
+ If the stack end location is found to be over written always panic as
+ the content of the corrupted region can no longer be trusted.
+ This is to ensure no erroneous behaviour occurs which could result in
+ data corruption or a sporadic crash at a later stage once the region
+ is examined. The runtime overhead introduced is minimal.
+
config TIMER_STATS
bool "Collect kernel timers statistics"
depends on DEBUG_KERNEL && PROC_FS
@@ -952,7 +964,7 @@ config PROVE_LOCKING
the proof of observed correctness is also maintained for an
arbitrary combination of these separate locking variants.
- For more details, see Documentation/lockdep-design.txt.
+ For more details, see Documentation/locking/lockdep-design.txt.
config LOCKDEP
bool
@@ -973,7 +985,7 @@ config LOCK_STAT
help
This feature enables tracking lock contention points
- For more details, see Documentation/lockstat.txt
+ For more details, see Documentation/locking/lockstat.txt
This also enables lock events required by "perf lock",
subcommand of perf.
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 08a4f068e61e..1298c05ef528 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -70,53 +70,42 @@ void atomic64_set(atomic64_t *v, long long i)
}
EXPORT_SYMBOL(atomic64_set);
-void atomic64_add(long long a, atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
-
- raw_spin_lock_irqsave(lock, flags);
- v->counter += a;
- raw_spin_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(atomic64_add);
-
-long long atomic64_add_return(long long a, atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
- long long val;
-
- raw_spin_lock_irqsave(lock, flags);
- val = v->counter += a;
- raw_spin_unlock_irqrestore(lock, flags);
- return val;
-}
-EXPORT_SYMBOL(atomic64_add_return);
-
-void atomic64_sub(long long a, atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
-
- raw_spin_lock_irqsave(lock, flags);
- v->counter -= a;
- raw_spin_unlock_irqrestore(lock, flags);
-}
-EXPORT_SYMBOL(atomic64_sub);
-
-long long atomic64_sub_return(long long a, atomic64_t *v)
-{
- unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
- long long val;
-
- raw_spin_lock_irqsave(lock, flags);
- val = v->counter -= a;
- raw_spin_unlock_irqrestore(lock, flags);
- return val;
-}
-EXPORT_SYMBOL(atomic64_sub_return);
+#define ATOMIC64_OP(op, c_op) \
+void atomic64_##op(long long a, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ raw_spinlock_t *lock = lock_addr(v); \
+ \
+ raw_spin_lock_irqsave(lock, flags); \
+ v->counter c_op a; \
+ raw_spin_unlock_irqrestore(lock, flags); \
+} \
+EXPORT_SYMBOL(atomic64_##op);
+
+#define ATOMIC64_OP_RETURN(op, c_op) \
+long long atomic64_##op##_return(long long a, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ raw_spinlock_t *lock = lock_addr(v); \
+ long long val; \
+ \
+ raw_spin_lock_irqsave(lock, flags); \
+ val = (v->counter c_op a); \
+ raw_spin_unlock_irqrestore(lock, flags); \
+ return val; \
+} \
+EXPORT_SYMBOL(atomic64_##op##_return);
+
+#define ATOMIC64_OPS(op, c_op) \
+ ATOMIC64_OP(op, c_op) \
+ ATOMIC64_OP_RETURN(op, c_op)
+
+ATOMIC64_OPS(add, +=)
+ATOMIC64_OPS(sub, -=)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
long long atomic64_dec_if_positive(atomic64_t *v)
{
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 8499c810909a..270773b91923 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -59,6 +59,22 @@ int hex2bin(u8 *dst, const char *src, size_t count)
EXPORT_SYMBOL(hex2bin);
/**
+ * bin2hex - convert binary data to an ascii hexadecimal string
+ * @dst: ascii hexadecimal result
+ * @src: binary data
+ * @count: binary data length
+ */
+char *bin2hex(char *dst, const void *src, size_t count)
+{
+ const unsigned char *_src = src;
+
+ while (count--)
+ dst = hex_byte_pack(dst, *_src++);
+ return dst;
+}
+EXPORT_SYMBOL(bin2hex);
+
+/**
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
* @buf: data blob to dump
* @len: number of bytes in the @buf
diff --git a/mm/Makefile b/mm/Makefile
index 1f534a7f0a71..8405eb0023a9 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -28,8 +28,9 @@ else
obj-y += bootmem.o
endif
+obj-$(CONFIG_ADVISE_SYSCALLS) += fadvise.o
ifdef CONFIG_MMU
- obj-$(CONFIG_ADVISE_SYSCALLS) += fadvise.o madvise.o
+ obj-$(CONFIG_ADVISE_SYSCALLS) += madvise.o
endif
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
diff --git a/mm/iov_iter.c b/mm/iov_iter.c
index 9a09f2034fcc..eafcf60f6b83 100644
--- a/mm/iov_iter.c
+++ b/mm/iov_iter.c
@@ -4,6 +4,96 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
+static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i)
+{
+ size_t skip, copy, left, wanted;
+ const struct iovec *iov;
+ char __user *buf;
+
+ if (unlikely(bytes > i->count))
+ bytes = i->count;
+
+ if (unlikely(!bytes))
+ return 0;
+
+ wanted = bytes;
+ iov = i->iov;
+ skip = i->iov_offset;
+ buf = iov->iov_base + skip;
+ copy = min(bytes, iov->iov_len - skip);
+
+ left = __copy_to_user(buf, from, copy);
+ copy -= left;
+ skip += copy;
+ from += copy;
+ bytes -= copy;
+ while (unlikely(!left && bytes)) {
+ iov++;
+ buf = iov->iov_base;
+ copy = min(bytes, iov->iov_len);
+ left = __copy_to_user(buf, from, copy);
+ copy -= left;
+ skip = copy;
+ from += copy;
+ bytes -= copy;
+ }
+
+ if (skip == iov->iov_len) {
+ iov++;
+ skip = 0;
+ }
+ i->count -= wanted - bytes;
+ i->nr_segs -= iov - i->iov;
+ i->iov = iov;
+ i->iov_offset = skip;
+ return wanted - bytes;
+}
+
+static size_t copy_from_iter_iovec(void *to, size_t bytes, struct iov_iter *i)
+{
+ size_t skip, copy, left, wanted;
+ const struct iovec *iov;
+ char __user *buf;
+
+ if (unlikely(bytes > i->count))
+ bytes = i->count;
+
+ if (unlikely(!bytes))
+ return 0;
+
+ wanted = bytes;
+ iov = i->iov;
+ skip = i->iov_offset;
+ buf = iov->iov_base + skip;
+ copy = min(bytes, iov->iov_len - skip);
+
+ left = __copy_from_user(to, buf, copy);
+ copy -= left;
+ skip += copy;
+ to += copy;
+ bytes -= copy;
+ while (unlikely(!left && bytes)) {
+ iov++;
+ buf = iov->iov_base;
+ copy = min(bytes, iov->iov_len);
+ left = __copy_from_user(to, buf, copy);
+ copy -= left;
+ skip = copy;
+ to += copy;
+ bytes -= copy;
+ }
+
+ if (skip == iov->iov_len) {
+ iov++;
+ skip = 0;
+ }
+ i->count -= wanted - bytes;
+ i->nr_segs -= iov - i->iov;
+ i->iov = iov;
+ i->iov_offset = skip;
+ return wanted - bytes;
+}
+
static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
@@ -166,6 +256,50 @@ done:
return wanted - bytes;
}
+static size_t zero_iovec(size_t bytes, struct iov_iter *i)
+{
+ size_t skip, copy, left, wanted;
+ const struct iovec *iov;
+ char __user *buf;
+
+ if (unlikely(bytes > i->count))
+ bytes = i->count;
+
+ if (unlikely(!bytes))
+ return 0;
+
+ wanted = bytes;
+ iov = i->iov;
+ skip = i->iov_offset;
+ buf = iov->iov_base + skip;
+ copy = min(bytes, iov->iov_len - skip);
+
+ left = __clear_user(buf, copy);
+ copy -= left;
+ skip += copy;
+ bytes -= copy;
+
+ while (unlikely(!left && bytes)) {
+ iov++;
+ buf = iov->iov_base;
+ copy = min(bytes, iov->iov_len);
+ left = __clear_user(buf, copy);
+ copy -= left;
+ skip = copy;
+ bytes -= copy;
+ }
+
+ if (skip == iov->iov_len) {
+ iov++;
+ skip = 0;
+ }
+ i->count -= wanted - bytes;
+ i->nr_segs -= iov - i->iov;
+ i->iov = iov;
+ i->iov_offset = skip;
+ return wanted - bytes;
+}
+
static size_t __iovec_copy_from_user_inatomic(char *vaddr,
const struct iovec *iov, size_t base, size_t bytes)
{
@@ -414,12 +548,17 @@ static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t
kunmap_atomic(to);
}
-static size_t copy_page_to_iter_bvec(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
+static void memzero_page(struct page *page, size_t offset, size_t len)
+{
+ char *addr = kmap_atomic(page);
+ memset(addr + offset, 0, len);
+ kunmap_atomic(addr);
+}
+
+static size_t copy_to_iter_bvec(void *from, size_t bytes, struct iov_iter *i)
{
size_t skip, copy, wanted;
const struct bio_vec *bvec;
- void *kaddr, *from;
if (unlikely(bytes > i->count))
bytes = i->count;
@@ -432,8 +571,6 @@ static size_t copy_page_to_iter_bvec(struct page *page, size_t offset, size_t by
skip = i->iov_offset;
copy = min_t(size_t, bytes, bvec->bv_len - skip);
- kaddr = kmap_atomic(page);
- from = kaddr + offset;
memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy);
skip += copy;
from += copy;
@@ -446,7 +583,6 @@ static size_t copy_page_to_iter_bvec(struct page *page, size_t offset, size_t by
from += copy;
bytes -= copy;
}
- kunmap_atomic(kaddr);
if (skip == bvec->bv_len) {
bvec++;
skip = 0;
@@ -458,12 +594,10 @@ static size_t copy_page_to_iter_bvec(struct page *page, size_t offset, size_t by
return wanted - bytes;
}
-static size_t copy_page_from_iter_bvec(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
+static size_t copy_from_iter_bvec(void *to, size_t bytes, struct iov_iter *i)
{
size_t skip, copy, wanted;
const struct bio_vec *bvec;
- void *kaddr, *to;
if (unlikely(bytes > i->count))
bytes = i->count;
@@ -475,10 +609,6 @@ static size_t copy_page_from_iter_bvec(struct page *page, size_t offset, size_t
bvec = i->bvec;
skip = i->iov_offset;
- kaddr = kmap_atomic(page);
-
- to = kaddr + offset;
-
copy = min(bytes, bvec->bv_len - skip);
memcpy_from_page(to, bvec->bv_page, bvec->bv_offset + skip, copy);
@@ -495,7 +625,6 @@ static size_t copy_page_from_iter_bvec(struct page *page, size_t offset, size_t
to += copy;
bytes -= copy;
}
- kunmap_atomic(kaddr);
if (skip == bvec->bv_len) {
bvec++;
skip = 0;
@@ -507,6 +636,61 @@ static size_t copy_page_from_iter_bvec(struct page *page, size_t offset, size_t
return wanted;
}
+static size_t copy_page_to_iter_bvec(struct page *page, size_t offset,
+ size_t bytes, struct iov_iter *i)
+{
+ void *kaddr = kmap_atomic(page);
+ size_t wanted = copy_to_iter_bvec(kaddr + offset, bytes, i);
+ kunmap_atomic(kaddr);
+ return wanted;
+}
+
+static size_t copy_page_from_iter_bvec(struct page *page, size_t offset,
+ size_t bytes, struct iov_iter *i)
+{
+ void *kaddr = kmap_atomic(page);
+ size_t wanted = copy_from_iter_bvec(kaddr + offset, bytes, i);
+ kunmap_atomic(kaddr);
+ return wanted;
+}
+
+static size_t zero_bvec(size_t bytes, struct iov_iter *i)
+{
+ size_t skip, copy, wanted;
+ const struct bio_vec *bvec;
+
+ if (unlikely(bytes > i->count))
+ bytes = i->count;
+
+ if (unlikely(!bytes))
+ return 0;
+
+ wanted = bytes;
+ bvec = i->bvec;
+ skip = i->iov_offset;
+ copy = min_t(size_t, bytes, bvec->bv_len - skip);
+
+ memzero_page(bvec->bv_page, skip + bvec->bv_offset, copy);
+ skip += copy;
+ bytes -= copy;
+ while (bytes) {
+ bvec++;
+ copy = min(bytes, (size_t)bvec->bv_len);
+ memzero_page(bvec->bv_page, bvec->bv_offset, copy);
+ skip = copy;
+ bytes -= copy;
+ }
+ if (skip == bvec->bv_len) {
+ bvec++;
+ skip = 0;
+ }
+ i->count -= wanted - bytes;
+ i->nr_segs -= bvec - i->bvec;
+ i->bvec = bvec;
+ i->iov_offset = skip;
+ return wanted - bytes;
+}
+
static size_t copy_from_user_bvec(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
@@ -672,6 +856,34 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
}
EXPORT_SYMBOL(copy_page_from_iter);
+size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (i->type & ITER_BVEC)
+ return copy_to_iter_bvec(addr, bytes, i);
+ else
+ return copy_to_iter_iovec(addr, bytes, i);
+}
+EXPORT_SYMBOL(copy_to_iter);
+
+size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (i->type & ITER_BVEC)
+ return copy_from_iter_bvec(addr, bytes, i);
+ else
+ return copy_from_iter_iovec(addr, bytes, i);
+}
+EXPORT_SYMBOL(copy_from_iter);
+
+size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
+{
+ if (i->type & ITER_BVEC) {
+ return zero_bvec(bytes, i);
+ } else {
+ return zero_iovec(bytes, i);
+ }
+}
+EXPORT_SYMBOL(iov_iter_zero);
+
size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
diff --git a/mm/mlock.c b/mm/mlock.c
index 03aa8512723b..73cf0987088c 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -789,7 +789,7 @@ static int do_mlockall(int flags)
/* Ignore errors */
mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
- cond_resched();
+ cond_resched_rcu_qs();
}
out:
return 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c9710c9bbee2..736d8e1b6381 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4971,6 +4971,8 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
pgdat->node_start_pfn = node_start_pfn;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
+ printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
+ (u64) start_pfn << PAGE_SHIFT, (u64) (end_pfn << PAGE_SHIFT) - 1);
#endif
calculate_node_totalpages(pgdat, start_pfn, end_pfn,
zones_size, zholes_size);
diff --git a/net/Kconfig b/net/Kconfig
index d6b138e2c263..6272420a721b 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -6,6 +6,7 @@ menuconfig NET
bool "Networking support"
select NLATTR
select GENERIC_NET_UTILS
+ select ANON_INODES
---help---
Unless you really know what you are doing, you should say Y here.
The reason is that some programs need kernel networking support even
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index ffeba8f9dda9..62fc5e7a9acf 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -476,7 +476,6 @@ struct key_type key_type_ceph = {
.preparse = ceph_key_preparse,
.free_preparse = ceph_key_free_preparse,
.instantiate = generic_key_instantiate,
- .match = user_match,
.destroy = ceph_key_destroy,
};
diff --git a/net/core/filter.c b/net/core/filter.c
index fcd3f6742a6a..647b12265e18 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -51,9 +51,9 @@
* @skb: buffer to filter
*
* Run the filter code and then cut skb->data to correct size returned by
- * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
+ * SK_RUN_FILTER. If pkt_len is 0 we toss packet. If skb->len is smaller
* than pkt_len we keep whole skb->data. This is the socket level
- * wrapper to sk_run_filter. It returns 0 if the packet should
+ * wrapper to SK_RUN_FILTER. It returns 0 if the packet should
* be accepted or -EPERM if the packet should be tossed.
*
*/
@@ -566,11 +566,8 @@ err:
/* Security:
*
- * A BPF program is able to use 16 cells of memory to store intermediate
- * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter()).
- *
* As we dont want to clear mem[] array for each packet going through
- * sk_run_filter(), we check that filter loaded by user never try to read
+ * __bpf_prog_run(), we check that filter loaded by user never try to read
* a cell if not previously written, and we check all branches to be sure
* a malicious user doesn't try to abuse us.
*/
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 8560dea58803..45084938c403 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -100,6 +100,13 @@ ip:
if (ip_is_fragment(iph))
ip_proto = 0;
+ /* skip the address processing if skb is NULL. The assumption
+ * here is that if there is no skb we are not looking for flow
+ * info but lengths and protocols.
+ */
+ if (!skb)
+ break;
+
iph_to_flow_copy_addrs(flow, iph);
break;
}
@@ -114,17 +121,15 @@ ipv6:
return false;
ip_proto = iph->nexthdr;
- flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
- flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
nhoff += sizeof(struct ipv6hdr);
- /* skip the flow label processing if skb is NULL. The
- * assumption here is that if there is no skb we are not
- * looking for flow info as much as we are length.
- */
+ /* see comment above in IPv4 section */
if (!skb)
break;
+ flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
+ flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
+
flow_label = ip6_flowlabel(iph);
if (flow_label) {
/* Awesome, IPv6 packet has a flow label so we can
@@ -231,9 +236,13 @@ ipv6:
flow->n_proto = proto;
flow->ip_proto = ip_proto;
- flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen);
flow->thoff = (u16) nhoff;
+ /* unless skb is set we don't need to record port info */
+ if (skb)
+ flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
+ data, hlen);
+
return true;
}
EXPORT_SYMBOL(__skb_flow_dissect);
@@ -334,15 +343,16 @@ u32 __skb_get_poff(const struct sk_buff *skb, void *data,
switch (keys->ip_proto) {
case IPPROTO_TCP: {
- const struct tcphdr *tcph;
- struct tcphdr _tcph;
+ /* access doff as u8 to avoid unaligned access */
+ const u8 *doff;
+ u8 _doff;
- tcph = __skb_header_pointer(skb, poff, sizeof(_tcph),
- data, hlen, &_tcph);
- if (!tcph)
+ doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
+ data, hlen, &_doff);
+ if (!doff)
return poff;
- poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4);
+ poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
break;
}
case IPPROTO_UDP:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7b3df0d518ab..829d013745ab 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -360,18 +360,29 @@ refill:
goto end;
}
nc->frag.size = PAGE_SIZE << order;
-recycle:
- atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
+ /* Even if we own the page, we do not use atomic_set().
+ * This would break get_page_unless_zero() users.
+ */
+ atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1,
+ &nc->frag.page->_count);
nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
nc->frag.offset = 0;
}
if (nc->frag.offset + fragsz > nc->frag.size) {
- /* avoid unnecessary locked operations if possible */
- if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
- atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
- goto recycle;
- goto refill;
+ if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) {
+ if (!atomic_sub_and_test(nc->pagecnt_bias,
+ &nc->frag.page->_count))
+ goto refill;
+ /* OK, page count is 0, we can safely set it */
+ atomic_set(&nc->frag.page->_count,
+ NETDEV_PAGECNT_MAX_BIAS);
+ } else {
+ atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias,
+ &nc->frag.page->_count);
+ }
+ nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
+ nc->frag.offset = 0;
}
data = page_address(nc->frag.page) + nc->frag.offset;
@@ -4126,11 +4137,11 @@ EXPORT_SYMBOL(skb_vlan_untag);
/**
* alloc_skb_with_frags - allocate skb with page frags
*
- * header_len: size of linear part
- * data_len: needed length in frags
- * max_page_order: max page order desired.
- * errcode: pointer to error code if any
- * gfp_mask: allocation mask
+ * @header_len: size of linear part
+ * @data_len: needed length in frags
+ * @max_page_order: max page order desired.
+ * @errcode: pointer to error code if any
+ * @gfp_mask: allocation mask
*
* This can be used to allocate a paged skb, given a maximal order for frags.
*/
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index f380b2c58178..31cd4fd75486 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -176,11 +176,11 @@ static void dns_resolver_free_preparse(struct key_preparsed_payload *prep)
* The domain name may be a simple name or an absolute domain name (which
* should end with a period). The domain name is case-independent.
*/
-static int
-dns_resolver_match(const struct key *key, const void *description)
+static bool dns_resolver_cmp(const struct key *key,
+ const struct key_match_data *match_data)
{
int slen, dlen, ret = 0;
- const char *src = key->description, *dsp = description;
+ const char *src = key->description, *dsp = match_data->raw_data;
kenter("%s,%s", src, dsp);
@@ -209,6 +209,16 @@ no_match:
}
/*
+ * Preparse the match criterion.
+ */
+static int dns_resolver_match_preparse(struct key_match_data *match_data)
+{
+ match_data->lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE;
+ match_data->cmp = dns_resolver_cmp;
+ return 0;
+}
+
+/*
* Describe a DNS key
*/
static void dns_resolver_describe(const struct key *key, struct seq_file *m)
@@ -242,7 +252,7 @@ struct key_type key_type_dns_resolver = {
.preparse = dns_resolver_preparse,
.free_preparse = dns_resolver_free_preparse,
.instantiate = generic_key_instantiate,
- .match = dns_resolver_match,
+ .match_preparse = dns_resolver_match_preparse,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = dns_resolver_describe,
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index ec8a456092a7..57d3e1af5630 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -72,7 +72,7 @@ nla_put_failure:
}
EXPORT_SYMBOL_GPL(nft_reject_dump);
-static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX] = {
+static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX + 1] = {
[NFT_REJECT_ICMPX_NO_ROUTE] = ICMP_NET_UNREACH,
[NFT_REJECT_ICMPX_PORT_UNREACH] = ICMP_PORT_UNREACH,
[NFT_REJECT_ICMPX_HOST_UNREACH] = ICMP_HOST_UNREACH,
@@ -81,8 +81,7 @@ static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX] = {
int nft_reject_icmp_code(u8 code)
{
- if (code > NFT_REJECT_ICMPX_MAX)
- return -EINVAL;
+ BUG_ON(code > NFT_REJECT_ICMPX_MAX);
return icmp_code_v4[code];
}
@@ -90,7 +89,7 @@ int nft_reject_icmp_code(u8 code)
EXPORT_SYMBOL_GPL(nft_reject_icmp_code);
-static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX] = {
+static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX + 1] = {
[NFT_REJECT_ICMPX_NO_ROUTE] = ICMPV6_NOROUTE,
[NFT_REJECT_ICMPX_PORT_UNREACH] = ICMPV6_PORT_UNREACH,
[NFT_REJECT_ICMPX_HOST_UNREACH] = ICMPV6_ADDR_UNREACH,
@@ -99,8 +98,7 @@ static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX] = {
int nft_reject_icmpv6_code(u8 code)
{
- if (code > NFT_REJECT_ICMPX_MAX)
- return -EINVAL;
+ BUG_ON(code > NFT_REJECT_ICMPX_MAX);
return icmp_code_v6[code];
}
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 0b4692dd1c5e..a845cd4cf21e 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -246,7 +246,6 @@ int netlbl_cfg_unlbl_static_add(struct net *net,
* @addr: IP address in network byte order (struct in[6]_addr)
* @mask: address mask in network byte order (struct in[6]_addr)
* @family: address family
- * @secid: LSM secid value for the entry
* @audit_info: NetLabel audit information
*
* Description:
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index c416725d28c4..7a186e74b1b3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -715,7 +715,7 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
* after validation, the socket and the ring may only be used by a
* single process, otherwise we fall back to copying.
*/
- if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
+ if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
atomic_read(&nlk->mapped) > 1)
excl = false;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index b3b16c070a7f..fa7cd792791c 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -329,7 +329,7 @@ static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
/**
* __rfkill_switch_all - Toggle state of all switches of given type
* @type: type of interfaces to be affected
- * @state: the new state
+ * @blocked: the new state
*
* This function sets the state of all switches of given type,
* unless a specific switch is claimed by userspace (in which case,
@@ -353,7 +353,7 @@ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
/**
* rfkill_switch_all - Toggle state of all switches of given type
* @type: type of interfaces to be affected
- * @state: the new state
+ * @blocked: the new state
*
* Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
* Please refer to __rfkill_switch_all() for details.
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 1b24191167f1..db0f39f5ef96 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -44,7 +44,6 @@ struct key_type key_type_rxrpc = {
.preparse = rxrpc_preparse,
.free_preparse = rxrpc_free_preparse,
.instantiate = generic_key_instantiate,
- .match = user_match,
.destroy = rxrpc_destroy,
.describe = rxrpc_describe,
.read = rxrpc_read,
@@ -61,7 +60,6 @@ struct key_type key_type_rxrpc_s = {
.preparse = rxrpc_preparse_s,
.free_preparse = rxrpc_free_preparse_s,
.instantiate = generic_key_instantiate,
- .match = user_match,
.destroy = rxrpc_destroy_s,
.describe = rxrpc_describe,
};
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 38d58e6cef07..6efca30894aa 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -57,7 +57,8 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
static void try_bulk_dequeue_skb(struct Qdisc *q,
struct sk_buff *skb,
- const struct netdev_queue *txq)
+ const struct netdev_queue *txq,
+ int *packets)
{
int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
@@ -70,6 +71,7 @@ static void try_bulk_dequeue_skb(struct Qdisc *q,
bytelimit -= nskb->len; /* covers GSO len */
skb->next = nskb;
skb = nskb;
+ (*packets)++; /* GSO counts as one pkt */
}
skb->next = NULL;
}
@@ -77,11 +79,13 @@ static void try_bulk_dequeue_skb(struct Qdisc *q,
/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
* A requeued skb (via q->gso_skb) can also be a SKB list.
*/
-static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate)
+static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
+ int *packets)
{
struct sk_buff *skb = q->gso_skb;
const struct netdev_queue *txq = q->dev_queue;
+ *packets = 1;
*validate = true;
if (unlikely(skb)) {
/* check the reason of requeuing without tx lock first */
@@ -98,7 +102,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate)
!netif_xmit_frozen_or_stopped(txq)) {
skb = q->dequeue(q);
if (skb && qdisc_may_bulk(q))
- try_bulk_dequeue_skb(q, skb, txq);
+ try_bulk_dequeue_skb(q, skb, txq, packets);
}
}
return skb;
@@ -204,7 +208,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
* >0 - queue is not empty.
*
*/
-static inline int qdisc_restart(struct Qdisc *q)
+static inline int qdisc_restart(struct Qdisc *q, int *packets)
{
struct netdev_queue *txq;
struct net_device *dev;
@@ -213,7 +217,7 @@ static inline int qdisc_restart(struct Qdisc *q)
bool validate;
/* Dequeue packet */
- skb = dequeue_skb(q, &validate);
+ skb = dequeue_skb(q, &validate, packets);
if (unlikely(!skb))
return 0;
@@ -227,14 +231,16 @@ static inline int qdisc_restart(struct Qdisc *q)
void __qdisc_run(struct Qdisc *q)
{
int quota = weight_p;
+ int packets;
- while (qdisc_restart(q)) {
+ while (qdisc_restart(q, &packets)) {
/*
* Ordered by possible occurrence: Postpone processing if
* 1. we've exceeded packet quota
* 2. another process needs the CPU;
*/
- if (--quota <= 0 || need_resched()) {
+ quota -= packets;
+ if (quota <= 0 || need_resched()) {
__netif_schedule(q);
break;
}
diff --git a/net/socket.c b/net/socket.c
index ffd9cb46902b..fe20c319a0bb 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1065,7 +1065,8 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
err = -EFAULT;
if (get_user(pid, (int __user *)argp))
break;
- err = f_setown(sock->file, pid, 1);
+ f_setown(sock->file, pid, 1);
+ err = 0;
break;
case FIOGETOWN:
case SIOCGPGRP:
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 650ecc83d7d7..001facfa5b74 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -388,10 +388,6 @@ do_file(char const *const fname)
"unrecognized ET_REL file: %s\n", fname);
fail_file();
}
- if (w2(ehdr->e_machine) == EM_S390) {
- reltype = R_390_32;
- mcount_adjust_32 = -4;
- }
if (w2(ehdr->e_machine) == EM_MIPS) {
reltype = R_MIPS_32;
is_fake_mcount32 = MIPS32_is_fake_mcount;
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 397b6b84e8c5..d4b665610d67 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -241,13 +241,6 @@ if ($arch eq "x86_64") {
$objcopy .= " -O elf32-i386";
$cc .= " -m32";
-} elsif ($arch eq "s390" && $bits == 32) {
- $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_32\\s+_mcount\$";
- $mcount_adjust = -4;
- $alignment = 4;
- $ld .= " -m elf_s390";
- $cc .= " -m31";
-
} elsif ($arch eq "s390" && $bits == 64) {
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
$mcount_adjust = -8;
diff --git a/security/capability.c b/security/capability.c
index a74fde6a7468..d68c57a62bcf 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -343,9 +343,9 @@ static int cap_file_fcntl(struct file *file, unsigned int cmd,
return 0;
}
-static int cap_file_set_fowner(struct file *file)
+static void cap_file_set_fowner(struct file *file)
{
- return 0;
+ return;
}
static int cap_file_send_sigiotask(struct task_struct *tsk,
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
index 245c6d92065b..b76235ae4786 100644
--- a/security/integrity/Kconfig
+++ b/security/integrity/Kconfig
@@ -1,11 +1,23 @@
#
config INTEGRITY
- def_bool y
- depends on IMA || EVM
+ bool "Integrity subsystem"
+ depends on SECURITY
+ default y
+ help
+ This option enables the integrity subsystem, which is comprised
+ of a number of different components including the Integrity
+ Measurement Architecture (IMA), Extended Verification Module
+ (EVM), IMA-appraisal extension, digital signature verification
+ extension and audit measurement log support.
+
+ Each of these components can be enabled/disabled separately.
+ Refer to the individual components for additional details.
+
+if INTEGRITY
config INTEGRITY_SIGNATURE
boolean "Digital signature verification using multiple keyrings"
- depends on INTEGRITY && KEYS
+ depends on KEYS
default n
select SIGNATURE
help
@@ -17,9 +29,21 @@ config INTEGRITY_SIGNATURE
This is useful for evm and module keyrings, when keys are
usually only added from initramfs.
+config INTEGRITY_ASYMMETRIC_KEYS
+ boolean "Enable asymmetric keys support"
+ depends on INTEGRITY_SIGNATURE
+ default n
+ select ASYMMETRIC_KEY_TYPE
+ select ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+ select PUBLIC_KEY_ALGO_RSA
+ select X509_CERTIFICATE_PARSER
+ help
+ This option enables digital signature verification using
+ asymmetric keys.
+
config INTEGRITY_AUDIT
bool "Enables integrity auditing support "
- depends on INTEGRITY && AUDIT
+ depends on AUDIT
default y
help
In addition to enabling integrity auditing support, this
@@ -32,17 +56,7 @@ config INTEGRITY_AUDIT
be enabled by specifying 'integrity_audit=1' on the kernel
command line.
-config INTEGRITY_ASYMMETRIC_KEYS
- boolean "Enable asymmetric keys support"
- depends on INTEGRITY_SIGNATURE
- default n
- select ASYMMETRIC_KEY_TYPE
- select ASYMMETRIC_PUBLIC_KEY_SUBTYPE
- select PUBLIC_KEY_ALGO_RSA
- select X509_CERTIFICATE_PARSER
- help
- This option enables digital signature verification using
- asymmetric keys.
-
source security/integrity/ima/Kconfig
source security/integrity/evm/Kconfig
+
+endif # if INTEGRITY
diff --git a/security/integrity/Makefile b/security/integrity/Makefile
index 0793f4811cb7..8d1f4bf51087 100644
--- a/security/integrity/Makefile
+++ b/security/integrity/Makefile
@@ -3,11 +3,11 @@
#
obj-$(CONFIG_INTEGRITY) += integrity.o
-obj-$(CONFIG_INTEGRITY_AUDIT) += integrity_audit.o
-obj-$(CONFIG_INTEGRITY_SIGNATURE) += digsig.o
-obj-$(CONFIG_INTEGRITY_ASYMMETRIC_KEYS) += digsig_asymmetric.o
integrity-y := iint.o
+integrity-$(CONFIG_INTEGRITY_AUDIT) += integrity_audit.o
+integrity-$(CONFIG_INTEGRITY_SIGNATURE) += digsig.o
+integrity-$(CONFIG_INTEGRITY_ASYMMETRIC_KEYS) += digsig_asymmetric.o
subdir-$(CONFIG_IMA) += ima
obj-$(CONFIG_IMA) += ima/
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index 9eae4809006b..4fec1816a2b3 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -13,6 +13,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/err.h>
+#include <linux/ratelimit.h>
#include <linux/key-type.h>
#include <crypto/public_key.h>
#include <keys/asymmetric-type.h>
@@ -27,7 +28,7 @@ static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid)
struct key *key;
char name[12];
- sprintf(name, "id:%x", keyid);
+ sprintf(name, "id:%08x", keyid);
pr_debug("key search: \"%s\"\n", name);
@@ -45,8 +46,8 @@ static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid)
}
if (IS_ERR(key)) {
- pr_warn("Request for unknown key '%s' err %ld\n",
- name, PTR_ERR(key));
+ pr_err_ratelimited("Request for unknown key '%s' err %ld\n",
+ name, PTR_ERR(key));
switch (PTR_ERR(key)) {
/* Hide some search errors */
case -EACCES:
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig
index d606f3d12d6b..df586fa00ef1 100644
--- a/security/integrity/evm/Kconfig
+++ b/security/integrity/evm/Kconfig
@@ -1,6 +1,5 @@
config EVM
boolean "EVM support"
- depends on SECURITY
select KEYS
select ENCRYPTED_KEYS
select CRYPTO_HMAC
@@ -12,10 +11,6 @@ config EVM
If you are unsure how to answer this question, answer N.
-if EVM
-
-menu "EVM options"
-
config EVM_ATTR_FSUUID
bool "FSUUID (version 2)"
default y
@@ -47,6 +42,3 @@ config EVM_EXTRA_SMACK_XATTRS
additional info to the calculation, requires existing EVM
labeled file systems to be relabeled.
-endmenu
-
-endif
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 3bcb80df4d01..9685af330de5 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -126,14 +126,15 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
rc = vfs_getxattr_alloc(dentry, XATTR_NAME_EVM, (char **)&xattr_data, 0,
GFP_NOFS);
if (rc <= 0) {
- if (rc == 0)
- evm_status = INTEGRITY_FAIL; /* empty */
- else if (rc == -ENODATA) {
+ evm_status = INTEGRITY_FAIL;
+ if (rc == -ENODATA) {
rc = evm_find_protected_xattrs(dentry);
if (rc > 0)
evm_status = INTEGRITY_NOLABEL;
else if (rc == 0)
evm_status = INTEGRITY_NOXATTRS; /* new file */
+ } else if (rc == -EOPNOTSUPP) {
+ evm_status = INTEGRITY_UNKNOWN;
}
goto out;
}
@@ -284,6 +285,13 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
goto out;
}
evm_status = evm_verify_current_integrity(dentry);
+ if (evm_status == INTEGRITY_NOXATTRS) {
+ struct integrity_iint_cache *iint;
+
+ iint = integrity_iint_find(dentry->d_inode);
+ if (iint && (iint->flags & IMA_NEW_FILE))
+ return 0;
+ }
out:
if (evm_status != INTEGRITY_PASS)
integrity_audit_msg(AUDIT_INTEGRITY_METADATA, dentry->d_inode,
@@ -352,7 +360,6 @@ void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
return;
evm_update_evmxattr(dentry, xattr_name, xattr_value, xattr_value_len);
- return;
}
/**
@@ -372,7 +379,6 @@ void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
mutex_lock(&inode->i_mutex);
evm_update_evmxattr(dentry, xattr_name, NULL, 0);
mutex_unlock(&inode->i_mutex);
- return;
}
/**
@@ -414,7 +420,6 @@ void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
evm_update_evmxattr(dentry, NULL, NULL, 0);
- return;
}
/*
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 08758fbd496f..e099875643c5 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -2,8 +2,6 @@
#
config IMA
bool "Integrity Measurement Architecture(IMA)"
- depends on SECURITY
- select INTEGRITY
select SECURITYFS
select CRYPTO
select CRYPTO_HMAC
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 57da4bd7ba0c..8ee997dff139 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -43,6 +43,9 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
#define IMA_TEMPLATE_IMA_NAME "ima"
#define IMA_TEMPLATE_IMA_FMT "d|n"
+/* current content of the policy */
+extern int ima_policy_flag;
+
/* set during initialization */
extern int ima_initialized;
extern int ima_used_chip;
@@ -90,10 +93,7 @@ extern struct list_head ima_measurements; /* list of all measurements */
/* Internal IMA function definitions */
int ima_init(void);
-void ima_cleanup(void);
int ima_fs_init(void);
-void ima_fs_cleanup(void);
-int ima_inode_alloc(struct inode *inode);
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
const char *op, struct inode *inode,
const unsigned char *filename);
@@ -110,8 +110,6 @@ void ima_print_digest(struct seq_file *m, u8 *digest, int size);
struct ima_template_desc *ima_template_desc_current(void);
int ima_init_template(void);
-int ima_init_template(void);
-
/*
* used to protect h_table and sha_table
*/
@@ -151,12 +149,6 @@ int ima_store_template(struct ima_template_entry *entry, int violation,
void ima_free_template_entry(struct ima_template_entry *entry);
const char *ima_d_path(struct path *path, char **pathbuf);
-/* rbtree tree calls to lookup, insert, delete
- * integrity data associated with an inode.
- */
-struct integrity_iint_cache *integrity_iint_insert(struct inode *inode);
-struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
-
/* IMA policy related functions */
enum ima_hooks { FILE_CHECK = 1, MMAP_CHECK, BPRM_CHECK, MODULE_CHECK, FIRMWARE_CHECK, POST_SETATTR };
@@ -164,20 +156,22 @@ int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
int flags);
void ima_init_policy(void);
void ima_update_policy(void);
+void ima_update_policy_flag(void);
ssize_t ima_parse_add_rule(char *);
void ima_delete_rules(void);
/* Appraise integrity measurements */
#define IMA_APPRAISE_ENFORCE 0x01
#define IMA_APPRAISE_FIX 0x02
-#define IMA_APPRAISE_MODULES 0x04
-#define IMA_APPRAISE_FIRMWARE 0x08
+#define IMA_APPRAISE_LOG 0x04
+#define IMA_APPRAISE_MODULES 0x08
+#define IMA_APPRAISE_FIRMWARE 0x10
#ifdef CONFIG_IMA_APPRAISE
int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
struct file *file, const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len);
+ int xattr_len, int opened);
int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func);
void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
@@ -193,7 +187,7 @@ static inline int ima_appraise_measurement(int func,
struct file *file,
const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len)
+ int xattr_len, int opened)
{
return INTEGRITY_UNKNOWN;
}
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index d9cd5ce14d2b..86885979918c 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -179,11 +179,6 @@ int ima_get_action(struct inode *inode, int mask, int function)
return ima_match_policy(inode, function, mask, flags);
}
-int ima_must_measure(struct inode *inode, int mask, int function)
-{
- return ima_match_policy(inode, function, mask, IMA_MEASURE);
-}
-
/*
* ima_collect_measurement - collect file measurement
*
@@ -330,10 +325,9 @@ const char *ima_d_path(struct path *path, char **pathbuf)
{
char *pathname = NULL;
- /* We will allow 11 spaces for ' (deleted)' to be appended */
- *pathbuf = kmalloc(PATH_MAX + 11, GFP_KERNEL);
+ *pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
if (*pathbuf) {
- pathname = d_path(path, *pathbuf, PATH_MAX + 11);
+ pathname = d_absolute_path(path, *pathbuf, PATH_MAX);
if (IS_ERR(pathname)) {
kfree(*pathbuf);
*pathbuf = NULL;
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 86bfd5c5df85..922685483bd3 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -23,6 +23,8 @@ static int __init default_appraise_setup(char *str)
{
if (strncmp(str, "off", 3) == 0)
ima_appraise = 0;
+ else if (strncmp(str, "log", 3) == 0)
+ ima_appraise = IMA_APPRAISE_LOG;
else if (strncmp(str, "fix", 3) == 0)
ima_appraise = IMA_APPRAISE_FIX;
return 1;
@@ -183,7 +185,7 @@ int ima_read_xattr(struct dentry *dentry,
int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
struct file *file, const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len)
+ int xattr_len, int opened)
{
static const char op[] = "appraise_data";
char *cause = "unknown";
@@ -192,8 +194,6 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
enum integrity_status status = INTEGRITY_UNKNOWN;
int rc = xattr_len, hash_start = 0;
- if (!ima_appraise)
- return 0;
if (!inode->i_op->getxattr)
return INTEGRITY_UNKNOWN;
@@ -202,8 +202,11 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
goto out;
cause = "missing-hash";
- status =
- (inode->i_size == 0) ? INTEGRITY_PASS : INTEGRITY_NOLABEL;
+ status = INTEGRITY_NOLABEL;
+ if (opened & FILE_CREATED) {
+ iint->flags |= IMA_NEW_FILE;
+ status = INTEGRITY_PASS;
+ }
goto out;
}
@@ -315,7 +318,7 @@ void ima_inode_post_setattr(struct dentry *dentry)
struct integrity_iint_cache *iint;
int must_appraise, rc;
- if (!ima_initialized || !ima_appraise || !S_ISREG(inode->i_mode)
+ if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode)
|| !inode->i_op->removexattr)
return;
@@ -353,7 +356,7 @@ static void ima_reset_appraise_flags(struct inode *inode, int digsig)
{
struct integrity_iint_cache *iint;
- if (!ima_initialized || !ima_appraise || !S_ISREG(inode->i_mode))
+ if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode))
return;
iint = integrity_iint_find(inode);
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 0bd732843fe7..d34e7dfc1118 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -80,24 +80,24 @@ static int ima_kernel_read(struct file *file, loff_t offset,
{
mm_segment_t old_fs;
char __user *buf = addr;
- ssize_t ret;
+ ssize_t ret = -EINVAL;
if (!(file->f_mode & FMODE_READ))
return -EBADF;
- if (!file->f_op->read && !file->f_op->aio_read)
- return -EINVAL;
old_fs = get_fs();
set_fs(get_ds());
if (file->f_op->read)
ret = file->f_op->read(file, buf, count, &offset);
- else
+ else if (file->f_op->aio_read)
ret = do_sync_read(file, buf, count, &offset);
+ else if (file->f_op->read_iter)
+ ret = new_sync_read(file, buf, count, &offset);
set_fs(old_fs);
return ret;
}
-int ima_init_crypto(void)
+int __init ima_init_crypto(void)
{
long rc;
@@ -116,7 +116,10 @@ static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
struct crypto_shash *tfm = ima_shash_tfm;
int rc;
- if (algo != ima_hash_algo && algo < HASH_ALGO__LAST) {
+ if (algo < 0 || algo >= HASH_ALGO__LAST)
+ algo = ima_hash_algo;
+
+ if (algo != ima_hash_algo) {
tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
if (IS_ERR(tfm)) {
rc = PTR_ERR(tfm);
@@ -200,7 +203,10 @@ static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
struct crypto_ahash *tfm = ima_ahash_tfm;
int rc;
- if ((algo != ima_hash_algo && algo < HASH_ALGO__LAST) || !tfm) {
+ if (algo < 0 || algo >= HASH_ALGO__LAST)
+ algo = ima_hash_algo;
+
+ if (algo != ima_hash_algo || !tfm) {
tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
if (!IS_ERR(tfm)) {
if (algo == ima_hash_algo)
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index e8f9d70a465d..9164fc8cac84 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -43,7 +43,7 @@ int ima_used_chip;
* a different value.) Violations add a zero entry to the measurement
* list and extend the aggregate PCR value with ff...ff's.
*/
-static void __init ima_add_boot_aggregate(void)
+static int __init ima_add_boot_aggregate(void)
{
static const char op[] = "add_boot_aggregate";
const char *audit_cause = "ENOMEM";
@@ -72,17 +72,23 @@ static void __init ima_add_boot_aggregate(void)
result = ima_alloc_init_template(iint, NULL, boot_aggregate_name,
NULL, 0, &entry);
- if (result < 0)
- return;
+ if (result < 0) {
+ audit_cause = "alloc_entry";
+ goto err_out;
+ }
result = ima_store_template(entry, violation, NULL,
boot_aggregate_name);
- if (result < 0)
+ if (result < 0) {
ima_free_template_entry(entry);
- return;
+ audit_cause = "store_entry";
+ goto err_out;
+ }
+ return 0;
err_out:
integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op,
audit_cause, result, 0);
+ return result;
}
int __init ima_init(void)
@@ -98,6 +104,10 @@ int __init ima_init(void)
if (!ima_used_chip)
pr_info("No TPM chip found, activating TPM-bypass!\n");
+ rc = ima_init_keyring(INTEGRITY_KEYRING_IMA);
+ if (rc)
+ return rc;
+
rc = ima_init_crypto();
if (rc)
return rc;
@@ -105,7 +115,10 @@ int __init ima_init(void)
if (rc != 0)
return rc;
- ima_add_boot_aggregate(); /* boot aggregate must be first entry */
+ rc = ima_add_boot_aggregate(); /* boot aggregate must be first entry */
+ if (rc != 0)
+ return rc;
+
ima_init_policy();
return ima_fs_init();
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 2917f980bf30..62f59eca32d3 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -77,42 +77,39 @@ __setup("ima_hash=", hash_setup);
* could result in a file measurement error.
*
*/
-static void ima_rdwr_violation_check(struct file *file)
+static void ima_rdwr_violation_check(struct file *file,
+ struct integrity_iint_cache *iint,
+ int must_measure,
+ char **pathbuf,
+ const char **pathname)
{
struct inode *inode = file_inode(file);
fmode_t mode = file->f_mode;
bool send_tomtou = false, send_writers = false;
- char *pathbuf = NULL;
- const char *pathname;
-
- if (!S_ISREG(inode->i_mode) || !ima_initialized)
- return;
if (mode & FMODE_WRITE) {
if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) {
- struct integrity_iint_cache *iint;
- iint = integrity_iint_find(inode);
+ if (!iint)
+ iint = integrity_iint_find(inode);
/* IMA_MEASURE is set from reader side */
if (iint && (iint->flags & IMA_MEASURE))
send_tomtou = true;
}
} else {
- if ((atomic_read(&inode->i_writecount) > 0) &&
- ima_must_measure(inode, MAY_READ, FILE_CHECK))
+ if ((atomic_read(&inode->i_writecount) > 0) && must_measure)
send_writers = true;
}
if (!send_tomtou && !send_writers)
return;
- pathname = ima_d_path(&file->f_path, &pathbuf);
+ *pathname = ima_d_path(&file->f_path, pathbuf);
if (send_tomtou)
- ima_add_violation(file, pathname, "invalid_pcr", "ToMToU");
+ ima_add_violation(file, *pathname, "invalid_pcr", "ToMToU");
if (send_writers)
- ima_add_violation(file, pathname,
+ ima_add_violation(file, *pathname,
"invalid_pcr", "open_writers");
- kfree(pathbuf);
}
static void ima_check_last_writer(struct integrity_iint_cache *iint,
@@ -124,11 +121,13 @@ static void ima_check_last_writer(struct integrity_iint_cache *iint,
return;
mutex_lock(&inode->i_mutex);
- if (atomic_read(&inode->i_writecount) == 1 &&
- iint->version != inode->i_version) {
- iint->flags &= ~IMA_DONE_MASK;
- if (iint->flags & IMA_APPRAISE)
- ima_update_xattr(iint, file);
+ if (atomic_read(&inode->i_writecount) == 1) {
+ if ((iint->version != inode->i_version) ||
+ (iint->flags & IMA_NEW_FILE)) {
+ iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE);
+ if (iint->flags & IMA_APPRAISE)
+ ima_update_xattr(iint, file);
+ }
}
mutex_unlock(&inode->i_mutex);
}
@@ -154,19 +153,20 @@ void ima_file_free(struct file *file)
ima_check_last_writer(iint, inode, file);
}
-static int process_measurement(struct file *file, const char *filename,
- int mask, int function)
+static int process_measurement(struct file *file, int mask, int function,
+ int opened)
{
struct inode *inode = file_inode(file);
- struct integrity_iint_cache *iint;
+ struct integrity_iint_cache *iint = NULL;
struct ima_template_desc *template_desc;
char *pathbuf = NULL;
const char *pathname = NULL;
- int rc = -ENOMEM, action, must_appraise, _func;
+ int rc = -ENOMEM, action, must_appraise;
struct evm_ima_xattr_data *xattr_value = NULL, **xattr_ptr = NULL;
int xattr_len = 0;
+ bool violation_check;
- if (!ima_initialized || !S_ISREG(inode->i_mode))
+ if (!ima_policy_flag || !S_ISREG(inode->i_mode))
return 0;
/* Return an IMA_MEASURE, IMA_APPRAISE, IMA_AUDIT action
@@ -174,19 +174,33 @@ static int process_measurement(struct file *file, const char *filename,
* Included is the appraise submask.
*/
action = ima_get_action(inode, mask, function);
- if (!action)
+ violation_check = ((function == FILE_CHECK || function == MMAP_CHECK) &&
+ (ima_policy_flag & IMA_MEASURE));
+ if (!action && !violation_check)
return 0;
must_appraise = action & IMA_APPRAISE;
/* Is the appraise rule hook specific? */
- _func = (action & IMA_FILE_APPRAISE) ? FILE_CHECK : function;
+ if (action & IMA_FILE_APPRAISE)
+ function = FILE_CHECK;
mutex_lock(&inode->i_mutex);
- iint = integrity_inode_get(inode);
- if (!iint)
- goto out;
+ if (action) {
+ iint = integrity_inode_get(inode);
+ if (!iint)
+ goto out;
+ }
+
+ if (violation_check) {
+ ima_rdwr_violation_check(file, iint, action & IMA_MEASURE,
+ &pathbuf, &pathname);
+ if (!action) {
+ rc = 0;
+ goto out_free;
+ }
+ }
/* Determine if already appraised/measured based on bitmask
* (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
@@ -199,15 +213,13 @@ static int process_measurement(struct file *file, const char *filename,
/* Nothing to do, just return existing appraised status */
if (!action) {
if (must_appraise)
- rc = ima_get_cache_status(iint, _func);
+ rc = ima_get_cache_status(iint, function);
goto out_digsig;
}
template_desc = ima_template_desc_current();
- if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) {
- if (action & IMA_APPRAISE_SUBMASK)
- xattr_ptr = &xattr_value;
- } else
+ if ((action & IMA_APPRAISE_SUBMASK) ||
+ strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0)
xattr_ptr = &xattr_value;
rc = ima_collect_measurement(iint, file, xattr_ptr, &xattr_len);
@@ -217,23 +229,26 @@ static int process_measurement(struct file *file, const char *filename,
goto out_digsig;
}
- pathname = filename ?: ima_d_path(&file->f_path, &pathbuf);
+ if (!pathname) /* ima_rdwr_violation possibly pre-fetched */
+ pathname = ima_d_path(&file->f_path, &pathbuf);
if (action & IMA_MEASURE)
ima_store_measurement(iint, file, pathname,
xattr_value, xattr_len);
if (action & IMA_APPRAISE_SUBMASK)
- rc = ima_appraise_measurement(_func, iint, file, pathname,
- xattr_value, xattr_len);
+ rc = ima_appraise_measurement(function, iint, file, pathname,
+ xattr_value, xattr_len, opened);
if (action & IMA_AUDIT)
ima_audit_measurement(iint, pathname);
- kfree(pathbuf);
+
out_digsig:
if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG))
rc = -EACCES;
+ kfree(xattr_value);
+out_free:
+ kfree(pathbuf);
out:
mutex_unlock(&inode->i_mutex);
- kfree(xattr_value);
if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE))
return -EACCES;
return 0;
@@ -253,7 +268,7 @@ out:
int ima_file_mmap(struct file *file, unsigned long prot)
{
if (file && (prot & PROT_EXEC))
- return process_measurement(file, NULL, MAY_EXEC, MMAP_CHECK);
+ return process_measurement(file, MAY_EXEC, MMAP_CHECK, 0);
return 0;
}
@@ -272,10 +287,7 @@ int ima_file_mmap(struct file *file, unsigned long prot)
*/
int ima_bprm_check(struct linux_binprm *bprm)
{
- return process_measurement(bprm->file,
- (strcmp(bprm->filename, bprm->interp) == 0) ?
- bprm->filename : bprm->interp,
- MAY_EXEC, BPRM_CHECK);
+ return process_measurement(bprm->file, MAY_EXEC, BPRM_CHECK, 0);
}
/**
@@ -288,12 +300,11 @@ int ima_bprm_check(struct linux_binprm *bprm)
* On success return 0. On integrity appraisal error, assuming the file
* is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
*/
-int ima_file_check(struct file *file, int mask)
+int ima_file_check(struct file *file, int mask, int opened)
{
- ima_rdwr_violation_check(file);
- return process_measurement(file, NULL,
+ return process_measurement(file,
mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
- FILE_CHECK);
+ FILE_CHECK, opened);
}
EXPORT_SYMBOL_GPL(ima_file_check);
@@ -316,7 +327,7 @@ int ima_module_check(struct file *file)
#endif
return 0; /* We rely on module signature checking */
}
- return process_measurement(file, NULL, MAY_EXEC, MODULE_CHECK);
+ return process_measurement(file, MAY_EXEC, MODULE_CHECK, 0);
}
int ima_fw_from_file(struct file *file, char *buf, size_t size)
@@ -327,7 +338,7 @@ int ima_fw_from_file(struct file *file, char *buf, size_t size)
return -EACCES; /* INTEGRITY_UNKNOWN */
return 0;
}
- return process_measurement(file, NULL, MAY_EXEC, FIRMWARE_CHECK);
+ return process_measurement(file, MAY_EXEC, FIRMWARE_CHECK, 0);
}
static int __init init_ima(void)
@@ -336,14 +347,10 @@ static int __init init_ima(void)
hash_setup(CONFIG_IMA_DEFAULT_HASH);
error = ima_init();
- if (error)
- goto out;
-
- error = ima_init_keyring(INTEGRITY_KEYRING_IMA);
- if (error)
- goto out;
- ima_initialized = 1;
-out:
+ if (!error) {
+ ima_initialized = 1;
+ ima_update_policy_flag();
+ }
return error;
}
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 07099a8bc283..cdc620b2152f 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -35,6 +35,8 @@
#define DONT_APPRAISE 0x0008
#define AUDIT 0x0040
+int ima_policy_flag;
+
#define MAX_LSM_RULES 6
enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE
@@ -295,6 +297,26 @@ int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
return action;
}
+/*
+ * Initialize the ima_policy_flag variable based on the currently
+ * loaded policy. Based on this flag, the decision to short circuit
+ * out of a function or not call the function in the first place
+ * can be made earlier.
+ */
+void ima_update_policy_flag(void)
+{
+ struct ima_rule_entry *entry;
+
+ ima_policy_flag = 0;
+ list_for_each_entry(entry, ima_rules, list) {
+ if (entry->action & IMA_DO_MASK)
+ ima_policy_flag |= entry->action;
+ }
+
+ if (!ima_appraise)
+ ima_policy_flag &= ~IMA_APPRAISE;
+}
+
/**
* ima_init_policy - initialize the default measure rules.
*
@@ -341,6 +363,7 @@ void ima_update_policy(void)
if (ima_rules == &ima_default_rules) {
ima_rules = &ima_policy_rules;
+ ima_update_policy_flag();
cause = "complete";
result = 0;
}
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index a076a967ec47..e854862c9337 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -152,24 +152,6 @@ out:
return result;
}
-static int init_defined_templates(void)
-{
- int i = 0;
- int result = 0;
-
- /* Init defined templates. */
- for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
- struct ima_template_desc *template = &defined_templates[i];
-
- result = template_desc_init_fields(template->fmt,
- &(template->fields),
- &(template->num_fields));
- if (result < 0)
- return result;
- }
- return result;
-}
-
struct ima_template_desc *ima_template_desc_current(void)
{
if (!ima_template)
@@ -178,13 +160,11 @@ struct ima_template_desc *ima_template_desc_current(void)
return ima_template;
}
-int ima_init_template(void)
+int __init ima_init_template(void)
{
- int result;
-
- result = init_defined_templates();
- if (result < 0)
- return result;
+ struct ima_template_desc *template = ima_template_desc_current();
- return 0;
+ return template_desc_init_fields(template->fmt,
+ &(template->fields),
+ &(template->num_fields));
}
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index 19b8e314ca96..c0379d13dbe1 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -31,6 +31,7 @@
#define IMA_DIGSIG 0x01000000
#define IMA_DIGSIG_REQUIRED 0x02000000
#define IMA_PERMIT_DIRECTIO 0x04000000
+#define IMA_NEW_FILE 0x08000000
#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
IMA_APPRAISE_SUBMASK)
@@ -116,7 +117,6 @@ struct integrity_iint_cache {
/* rbtree tree calls to lookup, insert, delete
* integrity data associated with an inode.
*/
-struct integrity_iint_cache *integrity_iint_insert(struct inode *inode);
struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
#define INTEGRITY_KEYRING_EVM 0
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index c2f91a0cf889..b6adb94f6d52 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -33,11 +33,9 @@ MODULE_LICENSE("GPL");
*/
struct key_type key_type_big_key = {
.name = "big_key",
- .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.preparse = big_key_preparse,
.free_preparse = big_key_free_preparse,
.instantiate = generic_key_instantiate,
- .match = user_match,
.revoke = big_key_revoke,
.destroy = big_key_destroy,
.describe = big_key_describe,
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 5fe443d120af..db9675db1026 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -970,7 +970,6 @@ struct key_type key_type_encrypted = {
.name = "encrypted",
.instantiate = encrypted_instantiate,
.update = encrypted_update,
- .match = user_match,
.destroy = encrypted_destroy,
.describe = user_describe,
.read = encrypted_read,
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 5f20da01fd8d..b8960c4959a5 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -107,20 +107,16 @@ extern int iterate_over_keyring(const struct key *keyring,
int (*func)(const struct key *key, void *data),
void *data);
-typedef int (*key_match_func_t)(const struct key *, const void *);
-
struct keyring_search_context {
struct keyring_index_key index_key;
const struct cred *cred;
- key_match_func_t match;
- const void *match_data;
+ struct key_match_data match_data;
unsigned flags;
-#define KEYRING_SEARCH_LOOKUP_TYPE 0x0001 /* [as type->def_lookup_type] */
-#define KEYRING_SEARCH_NO_STATE_CHECK 0x0002 /* Skip state checks */
-#define KEYRING_SEARCH_DO_STATE_CHECK 0x0004 /* Override NO_STATE_CHECK */
-#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0008 /* Don't update times */
-#define KEYRING_SEARCH_NO_CHECK_PERM 0x0010 /* Don't check permissions */
-#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0020 /* Give an error on excessive depth */
+#define KEYRING_SEARCH_NO_STATE_CHECK 0x0001 /* Skip state checks */
+#define KEYRING_SEARCH_DO_STATE_CHECK 0x0002 /* Override NO_STATE_CHECK */
+#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0004 /* Don't update times */
+#define KEYRING_SEARCH_NO_CHECK_PERM 0x0008 /* Don't check permissions */
+#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010 /* Give an error on excessive depth */
int (*iterator)(const void *object, void *iterator_data);
@@ -131,6 +127,8 @@ struct keyring_search_context {
struct timespec now;
};
+extern bool key_default_cmp(const struct key *key,
+ const struct key_match_data *match_data);
extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
struct keyring_search_context *ctx);
@@ -152,7 +150,8 @@ extern struct key *request_key_and_link(struct key_type *type,
struct key *dest_keyring,
unsigned long flags);
-extern int lookup_user_key_possessed(const struct key *key, const void *target);
+extern bool lookup_user_key_possessed(const struct key *key,
+ const struct key_match_data *match_data);
extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
key_perm_t perm);
#define KEY_LOOKUP_CREATE 0x01
diff --git a/security/keys/key.c b/security/keys/key.c
index 6d0cad16f002..e17ba6aefdc0 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -799,7 +799,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
}
key_ref = ERR_PTR(-EINVAL);
- if (!index_key.type->match || !index_key.type->instantiate ||
+ if (!index_key.type->instantiate ||
(!index_key.description && !index_key.type->preparse))
goto error_put_type;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index e26f860e5f2e..eff88a5f5d40 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -37,6 +37,8 @@ static int key_get_type_from_user(char *type,
return ret;
if (ret == 0 || ret >= len)
return -EINVAL;
+ if (type[0] == '.')
+ return -EPERM;
type[len - 1] = '\0';
return 0;
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 8314a7d2104d..8177010174f7 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -89,7 +89,6 @@ struct key_type key_type_keyring = {
.preparse = keyring_preparse,
.free_preparse = keyring_free_preparse,
.instantiate = keyring_instantiate,
- .match = user_match,
.revoke = keyring_revoke,
.destroy = keyring_destroy,
.describe = keyring_describe,
@@ -512,6 +511,15 @@ struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
EXPORT_SYMBOL(keyring_alloc);
/*
+ * By default, we keys found by getting an exact match on their descriptions.
+ */
+bool key_default_cmp(const struct key *key,
+ const struct key_match_data *match_data)
+{
+ return strcmp(key->description, match_data->raw_data) == 0;
+}
+
+/*
* Iteration function to consider each key found.
*/
static int keyring_search_iterator(const void *object, void *iterator_data)
@@ -545,7 +553,7 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
}
/* keys that don't match */
- if (!ctx->match(key, ctx->match_data)) {
+ if (!ctx->match_data.cmp(key, &ctx->match_data)) {
kleave(" = 0 [!match]");
return 0;
}
@@ -585,8 +593,7 @@ skipped:
*/
static int search_keyring(struct key *keyring, struct keyring_search_context *ctx)
{
- if ((ctx->flags & KEYRING_SEARCH_LOOKUP_TYPE) ==
- KEYRING_SEARCH_LOOKUP_DIRECT) {
+ if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_DIRECT) {
const void *object;
object = assoc_array_find(&keyring->keys,
@@ -627,7 +634,7 @@ static bool search_nested_keyrings(struct key *keyring,
/* Check to see if this top-level keyring is what we are looking for
* and whether it is valid or not.
*/
- if (ctx->flags & KEYRING_SEARCH_LOOKUP_ITERATE ||
+ if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE ||
keyring_compare_object(keyring, &ctx->index_key)) {
ctx->skipped_ret = 2;
ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK;
@@ -885,16 +892,25 @@ key_ref_t keyring_search(key_ref_t keyring,
.index_key.type = type,
.index_key.description = description,
.cred = current_cred(),
- .match = type->match,
- .match_data = description,
- .flags = (type->def_lookup_type |
- KEYRING_SEARCH_DO_STATE_CHECK),
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = description,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = KEYRING_SEARCH_DO_STATE_CHECK,
};
+ key_ref_t key;
+ int ret;
- if (!ctx.match)
- return ERR_PTR(-ENOKEY);
+ if (type->match_preparse) {
+ ret = type->match_preparse(&ctx.match_data);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ }
- return keyring_search_aux(keyring, &ctx);
+ key = keyring_search_aux(keyring, &ctx);
+
+ if (type->match_free)
+ type->match_free(&ctx.match_data);
+ return key;
}
EXPORT_SYMBOL(keyring_search);
@@ -1014,7 +1030,7 @@ static int keyring_detect_cycle_iterator(const void *object,
/* We might get a keyring with matching index-key that is nonetheless a
* different keyring. */
- if (key != ctx->match_data)
+ if (key != ctx->match_data.raw_data)
return 0;
ctx->result = ERR_PTR(-EDEADLK);
@@ -1031,14 +1047,14 @@ static int keyring_detect_cycle_iterator(const void *object,
static int keyring_detect_cycle(struct key *A, struct key *B)
{
struct keyring_search_context ctx = {
- .index_key = A->index_key,
- .match_data = A,
- .iterator = keyring_detect_cycle_iterator,
- .flags = (KEYRING_SEARCH_LOOKUP_DIRECT |
- KEYRING_SEARCH_NO_STATE_CHECK |
- KEYRING_SEARCH_NO_UPDATE_TIME |
- KEYRING_SEARCH_NO_CHECK_PERM |
- KEYRING_SEARCH_DETECT_TOO_DEEP),
+ .index_key = A->index_key,
+ .match_data.raw_data = A,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .iterator = keyring_detect_cycle_iterator,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_NO_UPDATE_TIME |
+ KEYRING_SEARCH_NO_CHECK_PERM |
+ KEYRING_SEARCH_DETECT_TOO_DEEP),
};
rcu_read_lock();
diff --git a/security/keys/proc.c b/security/keys/proc.c
index d3f6f2fd21db..972eeb336b81 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -194,10 +194,10 @@ static int proc_keys_show(struct seq_file *m, void *v)
.index_key.type = key->type,
.index_key.description = key->description,
.cred = current_cred(),
- .match = lookup_user_key_possessed,
- .match_data = key,
- .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
- KEYRING_SEARCH_LOOKUP_DIRECT),
+ .match_data.cmp = lookup_user_key_possessed,
+ .match_data.raw_data = key,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = KEYRING_SEARCH_NO_STATE_CHECK,
};
key_ref = make_key_ref(key, 0);
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 0cf8a130a267..bd536cb221e2 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -489,9 +489,10 @@ found:
/*
* See if the key we're looking at is the target key.
*/
-int lookup_user_key_possessed(const struct key *key, const void *target)
+bool lookup_user_key_possessed(const struct key *key,
+ const struct key_match_data *match_data)
{
- return key == target;
+ return key == match_data->raw_data;
}
/*
@@ -516,9 +517,9 @@ key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
key_perm_t perm)
{
struct keyring_search_context ctx = {
- .match = lookup_user_key_possessed,
- .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
- KEYRING_SEARCH_LOOKUP_DIRECT),
+ .match_data.cmp = lookup_user_key_possessed,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = KEYRING_SEARCH_NO_STATE_CHECK,
};
struct request_key_auth *rka;
struct key *key;
@@ -673,7 +674,7 @@ try_again:
ctx.index_key.type = key->type;
ctx.index_key.description = key->description;
ctx.index_key.desc_len = strlen(key->description);
- ctx.match_data = key;
+ ctx.match_data.raw_data = key;
kdebug("check possessed");
skey_ref = search_process_keyrings(&ctx);
kdebug("possessed=%p", skey_ref);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 26a94f18af94..bb4337c7ae1b 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -513,9 +513,9 @@ struct key *request_key_and_link(struct key_type *type,
.index_key.type = type,
.index_key.description = description,
.cred = current_cred(),
- .match = type->match,
- .match_data = description,
- .flags = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = description,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
};
struct key *key;
key_ref_t key_ref;
@@ -525,6 +525,14 @@ struct key *request_key_and_link(struct key_type *type,
ctx.index_key.type->name, ctx.index_key.description,
callout_info, callout_len, aux, dest_keyring, flags);
+ if (type->match_preparse) {
+ ret = type->match_preparse(&ctx.match_data);
+ if (ret < 0) {
+ key = ERR_PTR(ret);
+ goto error;
+ }
+ }
+
/* search all the process keyrings for a key */
key_ref = search_process_keyrings(&ctx);
@@ -537,7 +545,7 @@ struct key *request_key_and_link(struct key_type *type,
if (ret < 0) {
key_put(key);
key = ERR_PTR(ret);
- goto error;
+ goto error_free;
}
}
} else if (PTR_ERR(key_ref) != -EAGAIN) {
@@ -547,12 +555,15 @@ struct key *request_key_and_link(struct key_type *type,
* should consult userspace if we can */
key = ERR_PTR(-ENOKEY);
if (!callout_info)
- goto error;
+ goto error_free;
key = construct_key_and_link(&ctx, callout_info, callout_len,
aux, dest_keyring, flags);
}
+error_free:
+ if (type->match_free)
+ type->match_free(&ctx.match_data);
error:
kleave(" = %p", key);
return key;
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 842e6f410d50..6639e2cb8853 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -44,12 +44,12 @@ struct key_type key_type_request_key_auth = {
.read = request_key_auth_read,
};
-int request_key_auth_preparse(struct key_preparsed_payload *prep)
+static int request_key_auth_preparse(struct key_preparsed_payload *prep)
{
return 0;
}
-void request_key_auth_free_preparse(struct key_preparsed_payload *prep)
+static void request_key_auth_free_preparse(struct key_preparsed_payload *prep)
{
}
@@ -246,9 +246,9 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
.index_key.type = &key_type_request_key_auth,
.index_key.description = description,
.cred = current_cred(),
- .match = user_match,
- .match_data = description,
- .flags = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = description,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
};
struct key *authkey;
key_ref_t authkey_ref;
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 6b804aa4529a..c0594cb07ada 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1096,7 +1096,6 @@ struct key_type key_type_trusted = {
.name = "trusted",
.instantiate = trusted_instantiate,
.update = trusted_update,
- .match = user_match,
.destroy = trusted_destroy,
.describe = user_describe,
.read = trusted_read,
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index eee340011f2b..36b47bbd3d8c 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -26,12 +26,10 @@ static int logon_vet_description(const char *desc);
*/
struct key_type key_type_user = {
.name = "user",
- .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.preparse = user_preparse,
.free_preparse = user_free_preparse,
.instantiate = generic_key_instantiate,
.update = user_update,
- .match = user_match,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = user_describe,
@@ -48,12 +46,10 @@ EXPORT_SYMBOL_GPL(key_type_user);
*/
struct key_type key_type_logon = {
.name = "logon",
- .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.preparse = user_preparse,
.free_preparse = user_free_preparse,
.instantiate = generic_key_instantiate,
.update = user_update,
- .match = user_match,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = user_describe,
@@ -139,16 +135,6 @@ error:
EXPORT_SYMBOL_GPL(user_update);
/*
- * match users on their name
- */
-int user_match(const struct key *key, const void *description)
-{
- return strcmp(key->description, description) == 0;
-}
-
-EXPORT_SYMBOL_GPL(user_match);
-
-/*
* dispose of the links from a revoked keyring
* - called with the key sem write-locked
*/
diff --git a/security/security.c b/security/security.c
index e41b1a8d7644..18b35c63fc0c 100644
--- a/security/security.c
+++ b/security/security.c
@@ -775,9 +775,9 @@ int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
return security_ops->file_fcntl(file, cmd, arg);
}
-int security_file_set_fowner(struct file *file)
+void security_file_set_fowner(struct file *file)
{
- return security_ops->file_set_fowner(file);
+ security_ops->file_set_fowner(file);
}
int security_file_send_sigiotask(struct task_struct *tsk,
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index b0e940497e23..8426a2aa8dce 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2097,6 +2097,41 @@ static int selinux_vm_enough_memory(struct mm_struct *mm, long pages)
/* binprm security operations */
+static int check_nnp_nosuid(const struct linux_binprm *bprm,
+ const struct task_security_struct *old_tsec,
+ const struct task_security_struct *new_tsec)
+{
+ int nnp = (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS);
+ int nosuid = (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID);
+ int rc;
+
+ if (!nnp && !nosuid)
+ return 0; /* neither NNP nor nosuid */
+
+ if (new_tsec->sid == old_tsec->sid)
+ return 0; /* No change in credentials */
+
+ /*
+ * The only transitions we permit under NNP or nosuid
+ * are transitions to bounded SIDs, i.e. SIDs that are
+ * guaranteed to only be allowed a subset of the permissions
+ * of the current SID.
+ */
+ rc = security_bounded_transition(old_tsec->sid, new_tsec->sid);
+ if (rc) {
+ /*
+ * On failure, preserve the errno values for NNP vs nosuid.
+ * NNP: Operation not permitted for caller.
+ * nosuid: Permission denied to file.
+ */
+ if (nnp)
+ return -EPERM;
+ else
+ return -EACCES;
+ }
+ return 0;
+}
+
static int selinux_bprm_set_creds(struct linux_binprm *bprm)
{
const struct task_security_struct *old_tsec;
@@ -2133,14 +2168,10 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
/* Reset exec SID on execve. */
new_tsec->exec_sid = 0;
- /*
- * Minimize confusion: if no_new_privs or nosuid and a
- * transition is explicitly requested, then fail the exec.
- */
- if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)
- return -EPERM;
- if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
- return -EACCES;
+ /* Fail on NNP or nosuid if not an allowed transition. */
+ rc = check_nnp_nosuid(bprm, old_tsec, new_tsec);
+ if (rc)
+ return rc;
} else {
/* Check for a default transition on this program. */
rc = security_transition_sid(old_tsec->sid, isec->sid,
@@ -2148,15 +2179,19 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
&new_tsec->sid);
if (rc)
return rc;
+
+ /*
+ * Fallback to old SID on NNP or nosuid if not an allowed
+ * transition.
+ */
+ rc = check_nnp_nosuid(bprm, old_tsec, new_tsec);
+ if (rc)
+ new_tsec->sid = old_tsec->sid;
}
ad.type = LSM_AUDIT_DATA_PATH;
ad.u.path = bprm->file->f_path;
- if ((bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) ||
- (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS))
- new_tsec->sid = old_tsec->sid;
-
if (new_tsec->sid == old_tsec->sid) {
rc = avc_has_perm(old_tsec->sid, isec->sid,
SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, &ad);
@@ -3346,14 +3381,12 @@ static int selinux_file_fcntl(struct file *file, unsigned int cmd,
return err;
}
-static int selinux_file_set_fowner(struct file *file)
+static void selinux_file_set_fowner(struct file *file)
{
struct file_security_struct *fsec;
fsec = file->f_security;
fsec->fown_sid = current_sid();
-
- return 0;
}
static int selinux_file_send_sigiotask(struct task_struct *tsk,
@@ -4272,15 +4305,15 @@ static int selinux_socket_unix_may_send(struct socket *sock,
&ad);
}
-static int selinux_inet_sys_rcv_skb(int ifindex, char *addrp, u16 family,
- u32 peer_sid,
+static int selinux_inet_sys_rcv_skb(struct net *ns, int ifindex,
+ char *addrp, u16 family, u32 peer_sid,
struct common_audit_data *ad)
{
int err;
u32 if_sid;
u32 node_sid;
- err = sel_netif_sid(ifindex, &if_sid);
+ err = sel_netif_sid(ns, ifindex, &if_sid);
if (err)
return err;
err = avc_has_perm(peer_sid, if_sid,
@@ -4373,8 +4406,8 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
err = selinux_skb_peerlbl_sid(skb, family, &peer_sid);
if (err)
return err;
- err = selinux_inet_sys_rcv_skb(skb->skb_iif, addrp, family,
- peer_sid, &ad);
+ err = selinux_inet_sys_rcv_skb(sock_net(sk), skb->skb_iif,
+ addrp, family, peer_sid, &ad);
if (err) {
selinux_netlbl_err(skb, err, 0);
return err;
@@ -4692,10 +4725,9 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
if (err) {
if (err == -EINVAL) {
- audit_log(current->audit_context, GFP_KERNEL, AUDIT_SELINUX_ERR,
- "SELinux: unrecognized netlink message"
- " type=%hu for sclass=%hu\n",
- nlh->nlmsg_type, sksec->sclass);
+ WARN_ONCE(1, "selinux_nlmsg_perm: unrecognized netlink message:"
+ " protocol=%hu nlmsg_type=%hu sclass=%hu\n",
+ sk->sk_protocol, nlh->nlmsg_type, sksec->sclass);
if (!selinux_enforcing || security_get_allow_unknown())
err = 0;
}
@@ -4713,7 +4745,8 @@ out:
#ifdef CONFIG_NETFILTER
-static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
+static unsigned int selinux_ip_forward(struct sk_buff *skb,
+ const struct net_device *indev,
u16 family)
{
int err;
@@ -4739,14 +4772,14 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
ad.type = LSM_AUDIT_DATA_NET;
ad.u.net = &net;
- ad.u.net->netif = ifindex;
+ ad.u.net->netif = indev->ifindex;
ad.u.net->family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0)
return NF_DROP;
if (peerlbl_active) {
- err = selinux_inet_sys_rcv_skb(ifindex, addrp, family,
- peer_sid, &ad);
+ err = selinux_inet_sys_rcv_skb(dev_net(indev), indev->ifindex,
+ addrp, family, peer_sid, &ad);
if (err) {
selinux_netlbl_err(skb, err, 1);
return NF_DROP;
@@ -4775,7 +4808,7 @@ static unsigned int selinux_ipv4_forward(const struct nf_hook_ops *ops,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- return selinux_ip_forward(skb, in->ifindex, PF_INET);
+ return selinux_ip_forward(skb, in, PF_INET);
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -4785,7 +4818,7 @@ static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- return selinux_ip_forward(skb, in->ifindex, PF_INET6);
+ return selinux_ip_forward(skb, in, PF_INET6);
}
#endif /* IPV6 */
@@ -4873,11 +4906,13 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
return NF_ACCEPT;
}
-static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
+static unsigned int selinux_ip_postroute(struct sk_buff *skb,
+ const struct net_device *outdev,
u16 family)
{
u32 secmark_perm;
u32 peer_sid;
+ int ifindex = outdev->ifindex;
struct sock *sk;
struct common_audit_data ad;
struct lsm_network_audit net = {0,};
@@ -4958,6 +4993,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
case PF_INET6:
if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
return NF_ACCEPT;
+ break;
default:
return NF_DROP_ERR(-ECONNREFUSED);
}
@@ -4989,7 +5025,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
u32 if_sid;
u32 node_sid;
- if (sel_netif_sid(ifindex, &if_sid))
+ if (sel_netif_sid(dev_net(outdev), ifindex, &if_sid))
return NF_DROP;
if (avc_has_perm(peer_sid, if_sid,
SECCLASS_NETIF, NETIF__EGRESS, &ad))
@@ -5011,7 +5047,7 @@ static unsigned int selinux_ipv4_postroute(const struct nf_hook_ops *ops,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- return selinux_ip_postroute(skb, out->ifindex, PF_INET);
+ return selinux_ip_postroute(skb, out, PF_INET);
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -5021,7 +5057,7 @@ static unsigned int selinux_ipv6_postroute(const struct nf_hook_ops *ops,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- return selinux_ip_postroute(skb, out->ifindex, PF_INET6);
+ return selinux_ip_postroute(skb, out, PF_INET6);
}
#endif /* IPV6 */
@@ -6035,7 +6071,7 @@ security_initcall(selinux_init);
#if defined(CONFIG_NETFILTER)
-static struct nf_hook_ops selinux_ipv4_ops[] = {
+static struct nf_hook_ops selinux_nf_ops[] = {
{
.hook = selinux_ipv4_postroute,
.owner = THIS_MODULE,
@@ -6056,12 +6092,8 @@ static struct nf_hook_ops selinux_ipv4_ops[] = {
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_SELINUX_FIRST,
- }
-};
-
+ },
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-
-static struct nf_hook_ops selinux_ipv6_ops[] = {
{
.hook = selinux_ipv6_postroute,
.owner = THIS_MODULE,
@@ -6075,32 +6107,24 @@ static struct nf_hook_ops selinux_ipv6_ops[] = {
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_FORWARD,
.priority = NF_IP6_PRI_SELINUX_FIRST,
- }
-};
-
+ },
#endif /* IPV6 */
+};
static int __init selinux_nf_ip_init(void)
{
- int err = 0;
+ int err;
if (!selinux_enabled)
- goto out;
+ return 0;
printk(KERN_DEBUG "SELinux: Registering netfilter hooks\n");
- err = nf_register_hooks(selinux_ipv4_ops, ARRAY_SIZE(selinux_ipv4_ops));
+ err = nf_register_hooks(selinux_nf_ops, ARRAY_SIZE(selinux_nf_ops));
if (err)
- panic("SELinux: nf_register_hooks for IPv4: error %d\n", err);
+ panic("SELinux: nf_register_hooks: error %d\n", err);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- err = nf_register_hooks(selinux_ipv6_ops, ARRAY_SIZE(selinux_ipv6_ops));
- if (err)
- panic("SELinux: nf_register_hooks for IPv6: error %d\n", err);
-#endif /* IPV6 */
-
-out:
- return err;
+ return 0;
}
__initcall(selinux_nf_ip_init);
@@ -6110,10 +6134,7 @@ static void selinux_nf_ip_exit(void)
{
printk(KERN_DEBUG "SELinux: Unregistering netfilter hooks\n");
- nf_unregister_hooks(selinux_ipv4_ops, ARRAY_SIZE(selinux_ipv4_ops));
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- nf_unregister_hooks(selinux_ipv6_ops, ARRAY_SIZE(selinux_ipv6_ops));
-#endif /* IPV6 */
+ nf_unregister_hooks(selinux_nf_ops, ARRAY_SIZE(selinux_nf_ops));
}
#endif
diff --git a/security/selinux/include/netif.h b/security/selinux/include/netif.h
index 57c6eae81eac..c72145444090 100644
--- a/security/selinux/include/netif.h
+++ b/security/selinux/include/netif.h
@@ -17,9 +17,11 @@
#ifndef _SELINUX_NETIF_H_
#define _SELINUX_NETIF_H_
+#include <net/net_namespace.h>
+
void sel_netif_flush(void);
-int sel_netif_sid(int ifindex, u32 *sid);
+int sel_netif_sid(struct net *ns, int ifindex, u32 *sid);
#endif /* _SELINUX_NETIF_H_ */
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 078e553f52f2..81fa718d5cb3 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -24,6 +24,7 @@
#include <linux/binfmts.h>
#include <linux/in.h>
#include <linux/spinlock.h>
+#include <net/net_namespace.h>
#include "flask.h"
#include "avc.h"
@@ -78,6 +79,7 @@ struct ipc_security_struct {
};
struct netif_security_struct {
+ struct net *ns; /* network namespace */
int ifindex; /* device index */
u32 sid; /* SID for this interface */
};
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index 50ce177d71a0..e607b4473ef6 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -45,6 +45,7 @@ static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
/**
* sel_netif_hashfn - Hashing function for the interface table
+ * @ns: the network namespace
* @ifindex: the network interface
*
* Description:
@@ -52,13 +53,14 @@ static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
* bucket number for the given interface.
*
*/
-static inline u32 sel_netif_hashfn(int ifindex)
+static inline u32 sel_netif_hashfn(const struct net *ns, int ifindex)
{
- return (ifindex & (SEL_NETIF_HASH_SIZE - 1));
+ return (((uintptr_t)ns + ifindex) & (SEL_NETIF_HASH_SIZE - 1));
}
/**
* sel_netif_find - Search for an interface record
+ * @ns: the network namespace
* @ifindex: the network interface
*
* Description:
@@ -66,15 +68,15 @@ static inline u32 sel_netif_hashfn(int ifindex)
* If an entry can not be found in the table return NULL.
*
*/
-static inline struct sel_netif *sel_netif_find(int ifindex)
+static inline struct sel_netif *sel_netif_find(const struct net *ns,
+ int ifindex)
{
- int idx = sel_netif_hashfn(ifindex);
+ int idx = sel_netif_hashfn(ns, ifindex);
struct sel_netif *netif;
list_for_each_entry_rcu(netif, &sel_netif_hash[idx], list)
- /* all of the devices should normally fit in the hash, so we
- * optimize for that case */
- if (likely(netif->nsec.ifindex == ifindex))
+ if (net_eq(netif->nsec.ns, ns) &&
+ netif->nsec.ifindex == ifindex)
return netif;
return NULL;
@@ -96,7 +98,7 @@ static int sel_netif_insert(struct sel_netif *netif)
if (sel_netif_total >= SEL_NETIF_HASH_MAX)
return -ENOSPC;
- idx = sel_netif_hashfn(netif->nsec.ifindex);
+ idx = sel_netif_hashfn(netif->nsec.ns, netif->nsec.ifindex);
list_add_rcu(&netif->list, &sel_netif_hash[idx]);
sel_netif_total++;
@@ -120,6 +122,7 @@ static void sel_netif_destroy(struct sel_netif *netif)
/**
* sel_netif_sid_slow - Lookup the SID of a network interface using the policy
+ * @ns: the network namespace
* @ifindex: the network interface
* @sid: interface SID
*
@@ -130,7 +133,7 @@ static void sel_netif_destroy(struct sel_netif *netif)
* failure.
*
*/
-static int sel_netif_sid_slow(int ifindex, u32 *sid)
+static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
{
int ret;
struct sel_netif *netif;
@@ -140,7 +143,7 @@ static int sel_netif_sid_slow(int ifindex, u32 *sid)
/* NOTE: we always use init's network namespace since we don't
* currently support containers */
- dev = dev_get_by_index(&init_net, ifindex);
+ dev = dev_get_by_index(ns, ifindex);
if (unlikely(dev == NULL)) {
printk(KERN_WARNING
"SELinux: failure in sel_netif_sid_slow(),"
@@ -149,7 +152,7 @@ static int sel_netif_sid_slow(int ifindex, u32 *sid)
}
spin_lock_bh(&sel_netif_lock);
- netif = sel_netif_find(ifindex);
+ netif = sel_netif_find(ns, ifindex);
if (netif != NULL) {
*sid = netif->nsec.sid;
ret = 0;
@@ -163,6 +166,7 @@ static int sel_netif_sid_slow(int ifindex, u32 *sid)
ret = security_netif_sid(dev->name, &new->nsec.sid);
if (ret != 0)
goto out;
+ new->nsec.ns = ns;
new->nsec.ifindex = ifindex;
ret = sel_netif_insert(new);
if (ret != 0)
@@ -184,6 +188,7 @@ out:
/**
* sel_netif_sid - Lookup the SID of a network interface
+ * @ns: the network namespace
* @ifindex: the network interface
* @sid: interface SID
*
@@ -195,12 +200,12 @@ out:
* on failure.
*
*/
-int sel_netif_sid(int ifindex, u32 *sid)
+int sel_netif_sid(struct net *ns, int ifindex, u32 *sid)
{
struct sel_netif *netif;
rcu_read_lock();
- netif = sel_netif_find(ifindex);
+ netif = sel_netif_find(ns, ifindex);
if (likely(netif != NULL)) {
*sid = netif->nsec.sid;
rcu_read_unlock();
@@ -208,11 +213,12 @@ int sel_netif_sid(int ifindex, u32 *sid)
}
rcu_read_unlock();
- return sel_netif_sid_slow(ifindex, sid);
+ return sel_netif_sid_slow(ns, ifindex, sid);
}
/**
* sel_netif_kill - Remove an entry from the network interface table
+ * @ns: the network namespace
* @ifindex: the network interface
*
* Description:
@@ -220,13 +226,13 @@ int sel_netif_sid(int ifindex, u32 *sid)
* table if it exists.
*
*/
-static void sel_netif_kill(int ifindex)
+static void sel_netif_kill(const struct net *ns, int ifindex)
{
struct sel_netif *netif;
rcu_read_lock();
spin_lock_bh(&sel_netif_lock);
- netif = sel_netif_find(ifindex);
+ netif = sel_netif_find(ns, ifindex);
if (netif)
sel_netif_destroy(netif);
spin_unlock_bh(&sel_netif_lock);
@@ -257,11 +263,8 @@ static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- if (dev_net(dev) != &init_net)
- return NOTIFY_DONE;
-
if (event == NETDEV_DOWN)
- sel_netif_kill(dev->ifindex);
+ sel_netif_kill(dev_net(dev), dev->ifindex);
return NOTIFY_DONE;
}
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 2aa9d172dc7e..a1d3944751b9 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -728,7 +728,7 @@ static int security_validtrans_handle_fail(struct context *ocontext,
if (context_struct_to_string(tcontext, &t, &tlen))
goto out;
audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
- "security_validate_transition: denied for"
+ "op=security_validate_transition seresult=denied"
" oldcontext=%s newcontext=%s taskcontext=%s tclass=%s",
o, n, t, sym_name(&policydb, SYM_CLASSES, tclass-1));
out:
@@ -877,7 +877,7 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
audit_log(current->audit_context,
GFP_ATOMIC, AUDIT_SELINUX_ERR,
"op=security_bounded_transition "
- "result=denied "
+ "seresult=denied "
"oldcontext=%s newcontext=%s",
old_name, new_name);
}
@@ -1351,8 +1351,8 @@ static int compute_sid_handle_invalid_context(
if (context_struct_to_string(newcontext, &n, &nlen))
goto out;
audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
- "security_compute_sid: invalid context %s"
- " for scontext=%s"
+ "op=security_compute_sid invalid_context=%s"
+ " scontext=%s"
" tcontext=%s"
" tclass=%s",
n, s, t, sym_name(&policydb, SYM_CLASSES, tclass-1));
@@ -2607,8 +2607,10 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid)
rc = convert_context_handle_invalid_context(&newcon);
if (rc) {
if (!context_struct_to_string(&newcon, &s, &len)) {
- audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
- "security_sid_mls_copy: invalid context %s", s);
+ audit_log(current->audit_context,
+ GFP_ATOMIC, AUDIT_SELINUX_ERR,
+ "op=security_sid_mls_copy "
+ "invalid_context=%s", s);
kfree(s);
}
goto out_unlock;
diff --git a/security/smack/Kconfig b/security/smack/Kconfig
index e69de9c642b7..b065f9789418 100644
--- a/security/smack/Kconfig
+++ b/security/smack/Kconfig
@@ -12,3 +12,19 @@ config SECURITY_SMACK
of other mandatory security schemes.
If you are unsure how to answer this question, answer N.
+config SECURITY_SMACK_BRINGUP
+ bool "Reporting on access granted by Smack rules"
+ depends on SECURITY_SMACK
+ default n
+ help
+ Enable the bring-up ("b") access mode in Smack rules.
+ When access is granted by a rule with the "b" mode a
+ message about the access requested is generated. The
+ intention is that a process can be granted a wide set
+ of access initially with the bringup mode set on the
+ rules. The developer can use the information to
+ identify which rules are necessary and what accesses
+ may be inappropriate. The developer can reduce the
+ access rule set once the behavior is well understood.
+ This is a superior mechanism to the oft abused
+ "permissive" mode of other systems.
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 020307ef0972..b828a379377c 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -71,11 +71,11 @@ struct smack_known {
#define SMK_CIPSOLEN 24
struct superblock_smack {
- char *smk_root;
- char *smk_floor;
- char *smk_hat;
- char *smk_default;
- int smk_initialized;
+ struct smack_known *smk_root;
+ struct smack_known *smk_floor;
+ struct smack_known *smk_hat;
+ struct smack_known *smk_default;
+ int smk_initialized;
};
struct socket_smack {
@@ -88,7 +88,7 @@ struct socket_smack {
* Inode smack data
*/
struct inode_smack {
- char *smk_inode; /* label of the fso */
+ struct smack_known *smk_inode; /* label of the fso */
struct smack_known *smk_task; /* label of the task */
struct smack_known *smk_mmap; /* label of the mmap domain */
struct mutex smk_lock; /* initialization lock */
@@ -112,7 +112,7 @@ struct task_smack {
struct smack_rule {
struct list_head list;
struct smack_known *smk_subject;
- char *smk_object;
+ struct smack_known *smk_object;
int smk_access;
};
@@ -123,7 +123,7 @@ struct smk_netlbladdr {
struct list_head list;
struct sockaddr_in smk_host; /* network address */
struct in_addr smk_mask; /* network mask */
- char *smk_label; /* label */
+ struct smack_known *smk_label; /* label */
};
/*
@@ -191,6 +191,7 @@ struct smk_port_label {
*/
#define MAY_TRANSMUTE 0x00001000 /* Controls directory labeling */
#define MAY_LOCK 0x00002000 /* Locks should be writes, but ... */
+#define MAY_BRINGUP 0x00004000 /* Report use of this rule */
/*
* Just to make the common cases easier to deal with
@@ -200,9 +201,9 @@ struct smk_port_label {
#define MAY_NOT 0
/*
- * Number of access types used by Smack (rwxatl)
+ * Number of access types used by Smack (rwxatlb)
*/
-#define SMK_NUM_ACCESS_TYPE 6
+#define SMK_NUM_ACCESS_TYPE 7
/* SMACK data */
struct smack_audit_data {
@@ -226,23 +227,23 @@ struct smk_audit_info {
/*
* These functions are in smack_lsm.c
*/
-struct inode_smack *new_inode_smack(char *);
+struct inode_smack *new_inode_smack(struct smack_known *);
/*
* These functions are in smack_access.c
*/
int smk_access_entry(char *, char *, struct list_head *);
-int smk_access(struct smack_known *, char *, int, struct smk_audit_info *);
-int smk_tskacc(struct task_smack *, char *, u32, struct smk_audit_info *);
-int smk_curacc(char *, u32, struct smk_audit_info *);
+int smk_access(struct smack_known *, struct smack_known *,
+ int, struct smk_audit_info *);
+int smk_tskacc(struct task_smack *, struct smack_known *,
+ u32, struct smk_audit_info *);
+int smk_curacc(struct smack_known *, u32, struct smk_audit_info *);
struct smack_known *smack_from_secid(const u32);
char *smk_parse_smack(const char *string, int len);
int smk_netlbl_mls(int, char *, struct netlbl_lsm_secattr *, int);
-char *smk_import(const char *, int);
struct smack_known *smk_import_entry(const char *, int);
void smk_insert_entry(struct smack_known *skp);
struct smack_known *smk_find_entry(const char *);
-u32 smack_to_secid(const char *);
/*
* Shared data.
@@ -252,7 +253,7 @@ extern int smack_cipso_mapped;
extern struct smack_known *smack_net_ambient;
extern struct smack_known *smack_onlycap;
extern struct smack_known *smack_syslog_label;
-extern const char *smack_cipso_option;
+extern struct smack_known smack_cipso_option;
extern int smack_ptrace_rule;
extern struct smack_known smack_known_floor;
@@ -281,9 +282,9 @@ static inline int smk_inode_transmutable(const struct inode *isp)
}
/*
- * Present a pointer to the smack label in an inode blob.
+ * Present a pointer to the smack label entry in an inode blob.
*/
-static inline char *smk_of_inode(const struct inode *isp)
+static inline struct smack_known *smk_of_inode(const struct inode *isp)
{
struct inode_smack *sip = isp->i_security;
return sip->smk_inode;
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index f97d0842e621..5b970ffde024 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -94,7 +94,7 @@ int smk_access_entry(char *subject_label, char *object_label,
struct smack_rule *srp;
list_for_each_entry_rcu(srp, rule_list, list) {
- if (srp->smk_object == object_label &&
+ if (srp->smk_object->smk_known == object_label &&
srp->smk_subject->smk_known == subject_label) {
may = srp->smk_access;
break;
@@ -111,8 +111,8 @@ int smk_access_entry(char *subject_label, char *object_label,
/**
* smk_access - determine if a subject has a specific access to an object
- * @subject_known: a pointer to the subject's Smack label entry
- * @object_label: a pointer to the object's Smack label
+ * @subject: a pointer to the subject's Smack label entry
+ * @object: a pointer to the object's Smack label entry
* @request: the access requested, in "MAY" format
* @a : a pointer to the audit data
*
@@ -122,8 +122,8 @@ int smk_access_entry(char *subject_label, char *object_label,
*
* Smack labels are shared on smack_list
*/
-int smk_access(struct smack_known *subject_known, char *object_label,
- int request, struct smk_audit_info *a)
+int smk_access(struct smack_known *subject, struct smack_known *object,
+ int request, struct smk_audit_info *a)
{
int may = MAY_NOT;
int rc = 0;
@@ -133,7 +133,7 @@ int smk_access(struct smack_known *subject_known, char *object_label,
*
* A star subject can't access any object.
*/
- if (subject_known == &smack_known_star) {
+ if (subject == &smack_known_star) {
rc = -EACCES;
goto out_audit;
}
@@ -142,28 +142,28 @@ int smk_access(struct smack_known *subject_known, char *object_label,
* Tasks cannot be assigned the internet label.
* An internet subject can access any object.
*/
- if (object_label == smack_known_web.smk_known ||
- subject_known == &smack_known_web)
+ if (object == &smack_known_web ||
+ subject == &smack_known_web)
goto out_audit;
/*
* A star object can be accessed by any subject.
*/
- if (object_label == smack_known_star.smk_known)
+ if (object == &smack_known_star)
goto out_audit;
/*
* An object can be accessed in any way by a subject
* with the same label.
*/
- if (subject_known->smk_known == object_label)
+ if (subject->smk_known == object->smk_known)
goto out_audit;
/*
* A hat subject can read any object.
* A floor object can be read by any subject.
*/
if ((request & MAY_ANYREAD) == request) {
- if (object_label == smack_known_floor.smk_known)
+ if (object == &smack_known_floor)
goto out_audit;
- if (subject_known == &smack_known_hat)
+ if (subject == &smack_known_hat)
goto out_audit;
}
/*
@@ -174,27 +174,38 @@ int smk_access(struct smack_known *subject_known, char *object_label,
* indicates there is no entry for this pair.
*/
rcu_read_lock();
- may = smk_access_entry(subject_known->smk_known, object_label,
- &subject_known->smk_rules);
+ may = smk_access_entry(subject->smk_known, object->smk_known,
+ &subject->smk_rules);
rcu_read_unlock();
- if (may > 0 && (request & may) == request)
+ if (may <= 0 || (request & may) != request) {
+ rc = -EACCES;
goto out_audit;
+ }
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+ /*
+ * Return a positive value if using bringup mode.
+ * This allows the hooks to identify checks that
+ * succeed because of "b" rules.
+ */
+ if (may & MAY_BRINGUP)
+ rc = MAY_BRINGUP;
+#endif
- rc = -EACCES;
out_audit:
#ifdef CONFIG_AUDIT
if (a)
- smack_log(subject_known->smk_known, object_label, request,
- rc, a);
+ smack_log(subject->smk_known, object->smk_known,
+ request, rc, a);
#endif
+
return rc;
}
/**
* smk_tskacc - determine if a task has a specific access to an object
- * @tsp: a pointer to the subject task
- * @obj_label: a pointer to the object's Smack label
+ * @tsp: a pointer to the subject's task
+ * @obj_known: a pointer to the object's label entry
* @mode: the access requested, in "MAY" format
* @a : common audit data
*
@@ -203,24 +214,25 @@ out_audit:
* non zero otherwise. It allows that the task may have the capability
* to override the rules.
*/
-int smk_tskacc(struct task_smack *subject, char *obj_label,
+int smk_tskacc(struct task_smack *tsp, struct smack_known *obj_known,
u32 mode, struct smk_audit_info *a)
{
- struct smack_known *skp = smk_of_task(subject);
+ struct smack_known *sbj_known = smk_of_task(tsp);
int may;
int rc;
/*
* Check the global rule list
*/
- rc = smk_access(skp, obj_label, mode, NULL);
- if (rc == 0) {
+ rc = smk_access(sbj_known, obj_known, mode, NULL);
+ if (rc >= 0) {
/*
* If there is an entry in the task's rule list
* it can further restrict access.
*/
- may = smk_access_entry(skp->smk_known, obj_label,
- &subject->smk_rules);
+ may = smk_access_entry(sbj_known->smk_known,
+ obj_known->smk_known,
+ &tsp->smk_rules);
if (may < 0)
goto out_audit;
if ((mode & may) == mode)
@@ -237,14 +249,15 @@ int smk_tskacc(struct task_smack *subject, char *obj_label,
out_audit:
#ifdef CONFIG_AUDIT
if (a)
- smack_log(skp->smk_known, obj_label, mode, rc, a);
+ smack_log(sbj_known->smk_known, obj_known->smk_known,
+ mode, rc, a);
#endif
return rc;
}
/**
* smk_curacc - determine if current has a specific access to an object
- * @obj_label: a pointer to the object's Smack label
+ * @obj_known: a pointer to the object's Smack label entry
* @mode: the access requested, in "MAY" format
* @a : common audit data
*
@@ -253,11 +266,12 @@ out_audit:
* non zero otherwise. It allows that current may have the capability
* to override the rules.
*/
-int smk_curacc(char *obj_label, u32 mode, struct smk_audit_info *a)
+int smk_curacc(struct smack_known *obj_known,
+ u32 mode, struct smk_audit_info *a)
{
struct task_smack *tsp = current_security();
- return smk_tskacc(tsp, obj_label, mode, a);
+ return smk_tskacc(tsp, obj_known, mode, a);
}
#ifdef CONFIG_AUDIT
@@ -328,6 +342,13 @@ void smack_log(char *subject_label, char *object_label, int request,
struct smack_audit_data *sad;
struct common_audit_data *a = &ad->a;
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+ /*
+ * The result may be positive in bringup mode.
+ */
+ if (result > 0)
+ result = 0;
+#endif
/* check if we have to log the current event */
if (result != 0 && (log_policy & SMACK_AUDIT_DENIED) == 0)
return;
@@ -544,27 +565,6 @@ unlockout:
}
/**
- * smk_import - import a smack label
- * @string: a text string that might be a Smack label
- * @len: the maximum size, or zero if it is NULL terminated.
- *
- * Returns a pointer to the label in the label list that
- * matches the passed string, adding it if necessary.
- */
-char *smk_import(const char *string, int len)
-{
- struct smack_known *skp;
-
- /* labels cannot begin with a '-' */
- if (string[0] == '-')
- return NULL;
- skp = smk_import_entry(string, len);
- if (skp == NULL)
- return NULL;
- return skp->smk_known;
-}
-
-/**
* smack_from_secid - find the Smack label associated with a secid
* @secid: an integer that might be associated with a Smack label
*
@@ -590,19 +590,3 @@ struct smack_known *smack_from_secid(const u32 secid)
rcu_read_unlock();
return &smack_known_invalid;
}
-
-/**
- * smack_to_secid - find the secid associated with a Smack label
- * @smack: the Smack label
- *
- * Returns the appropriate secid if there is one,
- * otherwise 0
- */
-u32 smack_to_secid(const char *smack)
-{
- struct smack_known *skp = smk_find_entry(smack);
-
- if (skp == NULL)
- return 0;
- return skp->smk_secid;
-}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index e6ab307ce86e..d515ec25ae9f 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -54,6 +54,151 @@
LIST_HEAD(smk_ipv6_port_list);
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static void smk_bu_mode(int mode, char *s)
+{
+ int i = 0;
+
+ if (mode & MAY_READ)
+ s[i++] = 'r';
+ if (mode & MAY_WRITE)
+ s[i++] = 'w';
+ if (mode & MAY_EXEC)
+ s[i++] = 'x';
+ if (mode & MAY_APPEND)
+ s[i++] = 'a';
+ if (mode & MAY_TRANSMUTE)
+ s[i++] = 't';
+ if (mode & MAY_LOCK)
+ s[i++] = 'l';
+ if (i == 0)
+ s[i++] = '-';
+ s[i] = '\0';
+}
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_note(char *note, struct smack_known *sskp,
+ struct smack_known *oskp, int mode, int rc)
+{
+ char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+ if (rc <= 0)
+ return rc;
+
+ smk_bu_mode(mode, acc);
+ pr_info("Smack Bringup: (%s %s %s) %s\n",
+ sskp->smk_known, oskp->smk_known, acc, note);
+ return 0;
+}
+#else
+#define smk_bu_note(note, sskp, oskp, mode, RC) (RC)
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_current(char *note, struct smack_known *oskp,
+ int mode, int rc)
+{
+ struct task_smack *tsp = current_security();
+ char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+ if (rc <= 0)
+ return rc;
+
+ smk_bu_mode(mode, acc);
+ pr_info("Smack Bringup: (%s %s %s) %s %s\n",
+ tsp->smk_task->smk_known, oskp->smk_known,
+ acc, current->comm, note);
+ return 0;
+}
+#else
+#define smk_bu_current(note, oskp, mode, RC) (RC)
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_task(struct task_struct *otp, int mode, int rc)
+{
+ struct task_smack *tsp = current_security();
+ struct task_smack *otsp = task_security(otp);
+ char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+ if (rc <= 0)
+ return rc;
+
+ smk_bu_mode(mode, acc);
+ pr_info("Smack Bringup: (%s %s %s) %s to %s\n",
+ tsp->smk_task->smk_known, otsp->smk_task->smk_known, acc,
+ current->comm, otp->comm);
+ return 0;
+}
+#else
+#define smk_bu_task(otp, mode, RC) (RC)
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_inode(struct inode *inode, int mode, int rc)
+{
+ struct task_smack *tsp = current_security();
+ char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+ if (rc <= 0)
+ return rc;
+
+ smk_bu_mode(mode, acc);
+ pr_info("Smack Bringup: (%s %s %s) inode=(%s %ld) %s\n",
+ tsp->smk_task->smk_known, smk_of_inode(inode)->smk_known, acc,
+ inode->i_sb->s_id, inode->i_ino, current->comm);
+ return 0;
+}
+#else
+#define smk_bu_inode(inode, mode, RC) (RC)
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_file(struct file *file, int mode, int rc)
+{
+ struct task_smack *tsp = current_security();
+ struct smack_known *sskp = tsp->smk_task;
+ struct inode *inode = file->f_inode;
+ char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+ if (rc <= 0)
+ return rc;
+
+ smk_bu_mode(mode, acc);
+ pr_info("Smack Bringup: (%s %s %s) file=(%s %ld %s) %s\n",
+ sskp->smk_known, (char *)file->f_security, acc,
+ inode->i_sb->s_id, inode->i_ino, file->f_dentry->d_name.name,
+ current->comm);
+ return 0;
+}
+#else
+#define smk_bu_file(file, mode, RC) (RC)
+#endif
+
+#ifdef CONFIG_SECURITY_SMACK_BRINGUP
+static int smk_bu_credfile(const struct cred *cred, struct file *file,
+ int mode, int rc)
+{
+ struct task_smack *tsp = cred->security;
+ struct smack_known *sskp = tsp->smk_task;
+ struct inode *inode = file->f_inode;
+ char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+ if (rc <= 0)
+ return rc;
+
+ smk_bu_mode(mode, acc);
+ pr_info("Smack Bringup: (%s %s %s) file=(%s %ld %s) %s\n",
+ sskp->smk_known, smk_of_inode(inode)->smk_known, acc,
+ inode->i_sb->s_id, inode->i_ino, file->f_dentry->d_name.name,
+ current->comm);
+ return 0;
+}
+#else
+#define smk_bu_credfile(cred, file, mode, RC) (RC)
+#endif
+
/**
* smk_fetch - Fetch the smack label from a file.
* @ip: a pointer to the inode
@@ -87,11 +232,11 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
/**
* new_inode_smack - allocate an inode security blob
- * @smack: a pointer to the Smack label to use in the blob
+ * @skp: a pointer to the Smack label entry to use in the blob
*
* Returns the new blob or NULL if there's no memory available
*/
-struct inode_smack *new_inode_smack(char *smack)
+struct inode_smack *new_inode_smack(struct smack_known *skp)
{
struct inode_smack *isp;
@@ -99,7 +244,7 @@ struct inode_smack *new_inode_smack(char *smack)
if (isp == NULL)
return NULL;
- isp->smk_inode = smack;
+ isp->smk_inode = skp;
isp->smk_flags = 0;
mutex_init(&isp->smk_lock);
@@ -178,20 +323,20 @@ static inline unsigned int smk_ptrace_mode(unsigned int mode)
/**
* smk_ptrace_rule_check - helper for ptrace access
* @tracer: tracer process
- * @tracee_label: label of the process that's about to be traced,
- * the pointer must originate from smack structures
+ * @tracee_known: label entry of the process that's about to be traced
* @mode: ptrace attachment mode (PTRACE_MODE_*)
* @func: name of the function that called us, used for audit
*
* Returns 0 on access granted, -error on error
*/
-static int smk_ptrace_rule_check(struct task_struct *tracer, char *tracee_label,
+static int smk_ptrace_rule_check(struct task_struct *tracer,
+ struct smack_known *tracee_known,
unsigned int mode, const char *func)
{
int rc;
struct smk_audit_info ad, *saip = NULL;
struct task_smack *tsp;
- struct smack_known *skp;
+ struct smack_known *tracer_known;
if ((mode & PTRACE_MODE_NOAUDIT) == 0) {
smk_ad_init(&ad, func, LSM_AUDIT_DATA_TASK);
@@ -200,12 +345,12 @@ static int smk_ptrace_rule_check(struct task_struct *tracer, char *tracee_label,
}
tsp = task_security(tracer);
- skp = smk_of_task(tsp);
+ tracer_known = smk_of_task(tsp);
if ((mode & PTRACE_MODE_ATTACH) &&
(smack_ptrace_rule == SMACK_PTRACE_EXACT ||
smack_ptrace_rule == SMACK_PTRACE_DRACONIAN)) {
- if (skp->smk_known == tracee_label)
+ if (tracer_known->smk_known == tracee_known->smk_known)
rc = 0;
else if (smack_ptrace_rule == SMACK_PTRACE_DRACONIAN)
rc = -EACCES;
@@ -215,13 +360,15 @@ static int smk_ptrace_rule_check(struct task_struct *tracer, char *tracee_label,
rc = -EACCES;
if (saip)
- smack_log(skp->smk_known, tracee_label, 0, rc, saip);
+ smack_log(tracer_known->smk_known,
+ tracee_known->smk_known,
+ 0, rc, saip);
return rc;
}
/* In case of rule==SMACK_PTRACE_DEFAULT or mode==PTRACE_MODE_READ */
- rc = smk_tskacc(tsp, tracee_label, smk_ptrace_mode(mode), saip);
+ rc = smk_tskacc(tsp, tracee_known, smk_ptrace_mode(mode), saip);
return rc;
}
@@ -250,7 +397,7 @@ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
skp = smk_of_task(task_security(ctp));
- rc = smk_ptrace_rule_check(current, skp->smk_known, mode, __func__);
+ rc = smk_ptrace_rule_check(current, skp, mode, __func__);
return rc;
}
@@ -273,8 +420,7 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
skp = smk_of_task(current_security());
- rc = smk_ptrace_rule_check(ptp, skp->smk_known,
- PTRACE_MODE_ATTACH, __func__);
+ rc = smk_ptrace_rule_check(ptp, skp, PTRACE_MODE_ATTACH, __func__);
return rc;
}
@@ -318,10 +464,10 @@ static int smack_sb_alloc_security(struct super_block *sb)
if (sbsp == NULL)
return -ENOMEM;
- sbsp->smk_root = smack_known_floor.smk_known;
- sbsp->smk_default = smack_known_floor.smk_known;
- sbsp->smk_floor = smack_known_floor.smk_known;
- sbsp->smk_hat = smack_known_hat.smk_known;
+ sbsp->smk_root = &smack_known_floor;
+ sbsp->smk_default = &smack_known_floor;
+ sbsp->smk_floor = &smack_known_floor;
+ sbsp->smk_hat = &smack_known_hat;
/*
* smk_initialized will be zero from kzalloc.
*/
@@ -405,7 +551,6 @@ static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data)
struct smack_known *skp;
char *op;
char *commap;
- char *nsp;
int transmute = 0;
int specified = 0;
@@ -421,38 +566,38 @@ static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data)
if (strncmp(op, SMK_FSHAT, strlen(SMK_FSHAT)) == 0) {
op += strlen(SMK_FSHAT);
- nsp = smk_import(op, 0);
- if (nsp != NULL) {
- sp->smk_hat = nsp;
+ skp = smk_import_entry(op, 0);
+ if (skp != NULL) {
+ sp->smk_hat = skp;
specified = 1;
}
} else if (strncmp(op, SMK_FSFLOOR, strlen(SMK_FSFLOOR)) == 0) {
op += strlen(SMK_FSFLOOR);
- nsp = smk_import(op, 0);
- if (nsp != NULL) {
- sp->smk_floor = nsp;
+ skp = smk_import_entry(op, 0);
+ if (skp != NULL) {
+ sp->smk_floor = skp;
specified = 1;
}
} else if (strncmp(op, SMK_FSDEFAULT,
strlen(SMK_FSDEFAULT)) == 0) {
op += strlen(SMK_FSDEFAULT);
- nsp = smk_import(op, 0);
- if (nsp != NULL) {
- sp->smk_default = nsp;
+ skp = smk_import_entry(op, 0);
+ if (skp != NULL) {
+ sp->smk_default = skp;
specified = 1;
}
} else if (strncmp(op, SMK_FSROOT, strlen(SMK_FSROOT)) == 0) {
op += strlen(SMK_FSROOT);
- nsp = smk_import(op, 0);
- if (nsp != NULL) {
- sp->smk_root = nsp;
+ skp = smk_import_entry(op, 0);
+ if (skp != NULL) {
+ sp->smk_root = skp;
specified = 1;
}
} else if (strncmp(op, SMK_FSTRANS, strlen(SMK_FSTRANS)) == 0) {
op += strlen(SMK_FSTRANS);
- nsp = smk_import(op, 0);
- if (nsp != NULL) {
- sp->smk_root = nsp;
+ skp = smk_import_entry(op, 0);
+ if (skp != NULL) {
+ sp->smk_root = skp;
transmute = 1;
specified = 1;
}
@@ -469,8 +614,8 @@ static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data)
* Unprivileged mounts get root and default from the caller.
*/
skp = smk_of_current();
- sp->smk_root = skp->smk_known;
- sp->smk_default = skp->smk_known;
+ sp->smk_root = skp;
+ sp->smk_default = skp;
}
/*
* Initialize the root inode.
@@ -507,6 +652,7 @@ static int smack_sb_statfs(struct dentry *dentry)
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
rc = smk_curacc(sbp->smk_floor, MAY_READ, &ad);
+ rc = smk_bu_current("statfs", sbp->smk_floor, MAY_READ, rc);
return rc;
}
@@ -546,7 +692,7 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
tracer = ptrace_parent(current);
if (likely(tracer != NULL))
rc = smk_ptrace_rule_check(tracer,
- isp->smk_task->smk_known,
+ isp->smk_task,
PTRACE_MODE_ATTACH,
__func__);
rcu_read_unlock();
@@ -607,7 +753,7 @@ static int smack_inode_alloc_security(struct inode *inode)
{
struct smack_known *skp = smk_of_current();
- inode->i_security = new_inode_smack(skp->smk_known);
+ inode->i_security = new_inode_smack(skp);
if (inode->i_security == NULL)
return -ENOMEM;
return 0;
@@ -627,8 +773,8 @@ static void smack_inode_free_security(struct inode *inode)
/**
* smack_inode_init_security - copy out the smack from an inode
- * @inode: the inode
- * @dir: unused
+ * @inode: the newly created inode
+ * @dir: containing directory object
* @qstr: unused
* @name: where to put the attribute name
* @value: where to put the attribute value
@@ -642,8 +788,8 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
{
struct inode_smack *issp = inode->i_security;
struct smack_known *skp = smk_of_current();
- char *isp = smk_of_inode(inode);
- char *dsp = smk_of_inode(dir);
+ struct smack_known *isp = smk_of_inode(inode);
+ struct smack_known *dsp = smk_of_inode(dir);
int may;
if (name)
@@ -651,7 +797,8 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
if (value) {
rcu_read_lock();
- may = smk_access_entry(skp->smk_known, dsp, &skp->smk_rules);
+ may = smk_access_entry(skp->smk_known, dsp->smk_known,
+ &skp->smk_rules);
rcu_read_unlock();
/*
@@ -666,13 +813,13 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
issp->smk_flags |= SMK_INODE_CHANGED;
}
- *value = kstrdup(isp, GFP_NOFS);
+ *value = kstrdup(isp->smk_known, GFP_NOFS);
if (*value == NULL)
return -ENOMEM;
}
if (len)
- *len = strlen(isp) + 1;
+ *len = strlen(isp->smk_known);
return 0;
}
@@ -688,7 +835,7 @@ static int smack_inode_init_security(struct inode *inode, struct inode *dir,
static int smack_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
- char *isp;
+ struct smack_known *isp;
struct smk_audit_info ad;
int rc;
@@ -697,11 +844,13 @@ static int smack_inode_link(struct dentry *old_dentry, struct inode *dir,
isp = smk_of_inode(old_dentry->d_inode);
rc = smk_curacc(isp, MAY_WRITE, &ad);
+ rc = smk_bu_inode(old_dentry->d_inode, MAY_WRITE, rc);
if (rc == 0 && new_dentry->d_inode != NULL) {
isp = smk_of_inode(new_dentry->d_inode);
smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
rc = smk_curacc(isp, MAY_WRITE, &ad);
+ rc = smk_bu_inode(new_dentry->d_inode, MAY_WRITE, rc);
}
return rc;
@@ -728,6 +877,7 @@ static int smack_inode_unlink(struct inode *dir, struct dentry *dentry)
* You need write access to the thing you're unlinking
*/
rc = smk_curacc(smk_of_inode(ip), MAY_WRITE, &ad);
+ rc = smk_bu_inode(ip, MAY_WRITE, rc);
if (rc == 0) {
/*
* You also need write access to the containing directory
@@ -735,6 +885,7 @@ static int smack_inode_unlink(struct inode *dir, struct dentry *dentry)
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
smk_ad_setfield_u_fs_inode(&ad, dir);
rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad);
+ rc = smk_bu_inode(dir, MAY_WRITE, rc);
}
return rc;
}
@@ -759,6 +910,7 @@ static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry)
* You need write access to the thing you're removing
*/
rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
+ rc = smk_bu_inode(dentry->d_inode, MAY_WRITE, rc);
if (rc == 0) {
/*
* You also need write access to the containing directory
@@ -766,6 +918,7 @@ static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry)
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
smk_ad_setfield_u_fs_inode(&ad, dir);
rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad);
+ rc = smk_bu_inode(dir, MAY_WRITE, rc);
}
return rc;
@@ -773,10 +926,10 @@ static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry)
/**
* smack_inode_rename - Smack check on rename
- * @old_inode: the old directory
- * @old_dentry: unused
- * @new_inode: the new directory
- * @new_dentry: unused
+ * @old_inode: unused
+ * @old_dentry: the old object
+ * @new_inode: unused
+ * @new_dentry: the new object
*
* Read and write access is required on both the old and
* new directories.
@@ -789,7 +942,7 @@ static int smack_inode_rename(struct inode *old_inode,
struct dentry *new_dentry)
{
int rc;
- char *isp;
+ struct smack_known *isp;
struct smk_audit_info ad;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
@@ -797,11 +950,13 @@ static int smack_inode_rename(struct inode *old_inode,
isp = smk_of_inode(old_dentry->d_inode);
rc = smk_curacc(isp, MAY_READWRITE, &ad);
+ rc = smk_bu_inode(old_dentry->d_inode, MAY_READWRITE, rc);
if (rc == 0 && new_dentry->d_inode != NULL) {
isp = smk_of_inode(new_dentry->d_inode);
smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
rc = smk_curacc(isp, MAY_READWRITE, &ad);
+ rc = smk_bu_inode(new_dentry->d_inode, MAY_READWRITE, rc);
}
return rc;
}
@@ -819,6 +974,7 @@ static int smack_inode_permission(struct inode *inode, int mask)
{
struct smk_audit_info ad;
int no_block = mask & MAY_NOT_BLOCK;
+ int rc;
mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND);
/*
@@ -832,7 +988,9 @@ static int smack_inode_permission(struct inode *inode, int mask)
return -ECHILD;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
smk_ad_setfield_u_fs_inode(&ad, inode);
- return smk_curacc(smk_of_inode(inode), mask, &ad);
+ rc = smk_curacc(smk_of_inode(inode), mask, &ad);
+ rc = smk_bu_inode(inode, mask, rc);
+ return rc;
}
/**
@@ -845,6 +1003,8 @@ static int smack_inode_permission(struct inode *inode, int mask)
static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
{
struct smk_audit_info ad;
+ int rc;
+
/*
* Need to allow for clearing the setuid bit.
*/
@@ -853,12 +1013,14 @@ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
- return smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
+ rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
+ rc = smk_bu_inode(dentry->d_inode, MAY_WRITE, rc);
+ return rc;
}
/**
* smack_inode_getattr - Smack check for getting attributes
- * @mnt: unused
+ * @mnt: vfsmount of the object
* @dentry: the object
*
* Returns 0 if access is permitted, an error code otherwise
@@ -867,21 +1029,24 @@ static int smack_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
{
struct smk_audit_info ad;
struct path path;
+ int rc;
path.dentry = dentry;
path.mnt = mnt;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, path);
- return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
+ rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
+ rc = smk_bu_inode(dentry->d_inode, MAY_READ, rc);
+ return rc;
}
/**
* smack_inode_setxattr - Smack check for setting xattrs
* @dentry: the object
* @name: name of the attribute
- * @value: unused
- * @size: unused
+ * @value: value of the attribute
+ * @size: size of the value
* @flags: unused
*
* This protects the Smack attribute explicitly.
@@ -923,7 +1088,7 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name,
rc = -EPERM;
if (rc == 0 && check_import) {
- skp = smk_import_entry(value, size);
+ skp = size ? smk_import_entry(value, size) : NULL;
if (skp == NULL || (check_star &&
(skp == &smack_known_star || skp == &smack_known_web)))
rc = -EINVAL;
@@ -932,8 +1097,10 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name,
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
- if (rc == 0)
+ if (rc == 0) {
rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
+ rc = smk_bu_inode(dentry->d_inode, MAY_WRITE, rc);
+ }
return rc;
}
@@ -963,9 +1130,9 @@ static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
if (strcmp(name, XATTR_NAME_SMACK) == 0) {
skp = smk_import_entry(value, size);
if (skp != NULL)
- isp->smk_inode = skp->smk_known;
+ isp->smk_inode = skp;
else
- isp->smk_inode = smack_known_invalid.smk_known;
+ isp->smk_inode = &smack_known_invalid;
} else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0) {
skp = smk_import_entry(value, size);
if (skp != NULL)
@@ -993,11 +1160,14 @@ static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
static int smack_inode_getxattr(struct dentry *dentry, const char *name)
{
struct smk_audit_info ad;
+ int rc;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
- return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
+ rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
+ rc = smk_bu_inode(dentry->d_inode, MAY_READ, rc);
+ return rc;
}
/**
@@ -1033,6 +1203,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
+ rc = smk_bu_inode(dentry->d_inode, MAY_WRITE, rc);
if (rc != 0)
return rc;
@@ -1070,14 +1241,14 @@ static int smack_inode_getsecurity(const struct inode *inode,
struct socket *sock;
struct super_block *sbp;
struct inode *ip = (struct inode *)inode;
- char *isp;
+ struct smack_known *isp;
int ilen;
int rc = 0;
if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
isp = smk_of_inode(inode);
- ilen = strlen(isp) + 1;
- *buffer = isp;
+ ilen = strlen(isp->smk_known);
+ *buffer = isp->smk_known;
return ilen;
}
@@ -1095,15 +1266,15 @@ static int smack_inode_getsecurity(const struct inode *inode,
ssp = sock->sk->sk_security;
if (strcmp(name, XATTR_SMACK_IPIN) == 0)
- isp = ssp->smk_in->smk_known;
+ isp = ssp->smk_in;
else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
- isp = ssp->smk_out->smk_known;
+ isp = ssp->smk_out;
else
return -EOPNOTSUPP;
- ilen = strlen(isp) + 1;
+ ilen = strlen(isp->smk_known);
if (rc == 0) {
- *buffer = isp;
+ *buffer = isp->smk_known;
rc = ilen;
}
@@ -1122,13 +1293,12 @@ static int smack_inode_getsecurity(const struct inode *inode,
static int smack_inode_listsecurity(struct inode *inode, char *buffer,
size_t buffer_size)
{
- int len = strlen(XATTR_NAME_SMACK);
+ int len = sizeof(XATTR_NAME_SMACK);
- if (buffer != NULL && len <= buffer_size) {
+ if (buffer != NULL && len <= buffer_size)
memcpy(buffer, XATTR_NAME_SMACK, len);
- return len;
- }
- return -EINVAL;
+
+ return len;
}
/**
@@ -1140,7 +1310,7 @@ static void smack_inode_getsecid(const struct inode *inode, u32 *secid)
{
struct inode_smack *isp = inode->i_security;
- *secid = smack_to_secid(isp->smk_inode);
+ *secid = isp->smk_inode->smk_secid;
}
/*
@@ -1179,7 +1349,7 @@ static int smack_file_alloc_security(struct file *file)
{
struct smack_known *skp = smk_of_current();
- file->f_security = skp->smk_known;
+ file->f_security = skp;
return 0;
}
@@ -1214,11 +1384,15 @@ static int smack_file_ioctl(struct file *file, unsigned int cmd,
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
- if (_IOC_DIR(cmd) & _IOC_WRITE)
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
rc = smk_curacc(file->f_security, MAY_WRITE, &ad);
+ rc = smk_bu_file(file, MAY_WRITE, rc);
+ }
- if (rc == 0 && (_IOC_DIR(cmd) & _IOC_READ))
+ if (rc == 0 && (_IOC_DIR(cmd) & _IOC_READ)) {
rc = smk_curacc(file->f_security, MAY_READ, &ad);
+ rc = smk_bu_file(file, MAY_READ, rc);
+ }
return rc;
}
@@ -1233,10 +1407,13 @@ static int smack_file_ioctl(struct file *file, unsigned int cmd,
static int smack_file_lock(struct file *file, unsigned int cmd)
{
struct smk_audit_info ad;
+ int rc;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
- return smk_curacc(file->f_security, MAY_LOCK, &ad);
+ rc = smk_curacc(file->f_security, MAY_LOCK, &ad);
+ rc = smk_bu_file(file, MAY_LOCK, rc);
+ return rc;
}
/**
@@ -1266,12 +1443,14 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
rc = smk_curacc(file->f_security, MAY_LOCK, &ad);
+ rc = smk_bu_file(file, MAY_LOCK, rc);
break;
case F_SETOWN:
case F_SETSIG:
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
rc = smk_curacc(file->f_security, MAY_WRITE, &ad);
+ rc = smk_bu_file(file, MAY_WRITE, rc);
break;
default:
break;
@@ -1298,7 +1477,7 @@ static int smack_mmap_file(struct file *file,
struct smack_known *mkp;
struct smack_rule *srp;
struct task_smack *tsp;
- char *osmack;
+ struct smack_known *okp;
struct inode_smack *isp;
int may;
int mmay;
@@ -1324,18 +1503,19 @@ static int smack_mmap_file(struct file *file,
* to that rule's object label.
*/
list_for_each_entry_rcu(srp, &skp->smk_rules, list) {
- osmack = srp->smk_object;
+ okp = srp->smk_object;
/*
* Matching labels always allows access.
*/
- if (mkp->smk_known == osmack)
+ if (mkp->smk_known == okp->smk_known)
continue;
/*
* If there is a matching local rule take
* that into account as well.
*/
- may = smk_access_entry(srp->smk_subject->smk_known, osmack,
- &tsp->smk_rules);
+ may = smk_access_entry(srp->smk_subject->smk_known,
+ okp->smk_known,
+ &tsp->smk_rules);
if (may == -ENOENT)
may = srp->smk_access;
else
@@ -1352,8 +1532,8 @@ static int smack_mmap_file(struct file *file,
* If there isn't one a SMACK64MMAP subject
* can't have as much access as current.
*/
- mmay = smk_access_entry(mkp->smk_known, osmack,
- &mkp->smk_rules);
+ mmay = smk_access_entry(mkp->smk_known, okp->smk_known,
+ &mkp->smk_rules);
if (mmay == -ENOENT) {
rc = -EACCES;
break;
@@ -1362,8 +1542,8 @@ static int smack_mmap_file(struct file *file,
* If there is a local entry it modifies the
* potential access, too.
*/
- tmay = smk_access_entry(mkp->smk_known, osmack,
- &tsp->smk_rules);
+ tmay = smk_access_entry(mkp->smk_known, okp->smk_known,
+ &tsp->smk_rules);
if (tmay != -ENOENT)
mmay &= tmay;
@@ -1390,12 +1570,11 @@ static int smack_mmap_file(struct file *file,
* Returns 0
* Further research may be required on this one.
*/
-static int smack_file_set_fowner(struct file *file)
+static void smack_file_set_fowner(struct file *file)
{
struct smack_known *skp = smk_of_current();
- file->f_security = skp->smk_known;
- return 0;
+ file->f_security = skp;
}
/**
@@ -1424,14 +1603,15 @@ static int smack_file_send_sigiotask(struct task_struct *tsk,
file = container_of(fown, struct file, f_owner);
/* we don't log here as rc can be overriden */
- skp = smk_find_entry(file->f_security);
- rc = smk_access(skp, tkp->smk_known, MAY_WRITE, NULL);
+ skp = file->f_security;
+ rc = smk_access(skp, tkp, MAY_WRITE, NULL);
+ rc = smk_bu_note("sigiotask", skp, tkp, MAY_WRITE, rc);
if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE))
rc = 0;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, tsk);
- smack_log(file->f_security, tkp->smk_known, MAY_WRITE, rc, &ad);
+ smack_log(skp->smk_known, tkp->smk_known, MAY_WRITE, rc, &ad);
return rc;
}
@@ -1443,6 +1623,7 @@ static int smack_file_send_sigiotask(struct task_struct *tsk,
*/
static int smack_file_receive(struct file *file)
{
+ int rc;
int may = 0;
struct smk_audit_info ad;
@@ -1456,7 +1637,9 @@ static int smack_file_receive(struct file *file)
if (file->f_mode & FMODE_WRITE)
may |= MAY_WRITE;
- return smk_curacc(file->f_security, may, &ad);
+ rc = smk_curacc(file->f_security, may, &ad);
+ rc = smk_bu_file(file, may, rc);
+ return rc;
}
/**
@@ -1478,12 +1661,15 @@ static int smack_file_open(struct file *file, const struct cred *cred)
struct smk_audit_info ad;
int rc;
- if (smack_privileged(CAP_MAC_OVERRIDE))
+ if (smack_privileged(CAP_MAC_OVERRIDE)) {
+ file->f_security = isp->smk_inode;
return 0;
+ }
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
rc = smk_access(tsp->smk_task, isp->smk_inode, MAY_READ, &ad);
+ rc = smk_bu_credfile(cred, file, MAY_READ, rc);
if (rc == 0)
file->f_security = isp->smk_inode;
@@ -1622,7 +1808,7 @@ static int smack_kernel_create_files_as(struct cred *new,
struct inode_smack *isp = inode->i_security;
struct task_smack *tsp = new->security;
- tsp->smk_forked = smk_find_entry(isp->smk_inode);
+ tsp->smk_forked = isp->smk_inode;
tsp->smk_task = tsp->smk_forked;
return 0;
}
@@ -1640,10 +1826,13 @@ static int smk_curacc_on_task(struct task_struct *p, int access,
{
struct smk_audit_info ad;
struct smack_known *skp = smk_of_task(task_security(p));
+ int rc;
smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, p);
- return smk_curacc(skp->smk_known, access, &ad);
+ rc = smk_curacc(skp, access, &ad);
+ rc = smk_bu_task(p, access, rc);
+ return rc;
}
/**
@@ -1797,6 +1986,7 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info,
struct smk_audit_info ad;
struct smack_known *skp;
struct smack_known *tkp = smk_of_task(task_security(p));
+ int rc;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, p);
@@ -1804,15 +1994,20 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info,
* Sending a signal requires that the sender
* can write the receiver.
*/
- if (secid == 0)
- return smk_curacc(tkp->smk_known, MAY_WRITE, &ad);
+ if (secid == 0) {
+ rc = smk_curacc(tkp, MAY_WRITE, &ad);
+ rc = smk_bu_task(p, MAY_WRITE, rc);
+ return rc;
+ }
/*
* If the secid isn't 0 we're dealing with some USB IO
* specific behavior. This is not clean. For one thing
* we can't take privilege into account.
*/
skp = smack_from_secid(secid);
- return smk_access(skp, tkp->smk_known, MAY_WRITE, &ad);
+ rc = smk_access(skp, tkp, MAY_WRITE, &ad);
+ rc = smk_bu_note("USB signal", skp, tkp, MAY_WRITE, rc);
+ return rc;
}
/**
@@ -1846,7 +2041,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
struct inode_smack *isp = inode->i_security;
struct smack_known *skp = smk_of_task(task_security(p));
- isp->smk_inode = skp->smk_known;
+ isp->smk_inode = skp;
}
/*
@@ -1904,7 +2099,7 @@ static void smack_sk_free_security(struct sock *sk)
*
* Returns the label of the far end or NULL if it's not special.
*/
-static char *smack_host_label(struct sockaddr_in *sip)
+static struct smack_known *smack_host_label(struct sockaddr_in *sip)
{
struct smk_netlbladdr *snp;
struct in_addr *siap = &sip->sin_addr;
@@ -1921,7 +2116,7 @@ static char *smack_host_label(struct sockaddr_in *sip)
if ((&snp->smk_host.sin_addr)->s_addr ==
(siap->s_addr & (&snp->smk_mask)->s_addr)) {
/* we have found the special CIPSO option */
- if (snp->smk_label == smack_cipso_option)
+ if (snp->smk_label == &smack_cipso_option)
return NULL;
return snp->smk_label;
}
@@ -1986,13 +2181,13 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
struct smack_known *skp;
int rc;
int sk_lbl;
- char *hostsp;
+ struct smack_known *hkp;
struct socket_smack *ssp = sk->sk_security;
struct smk_audit_info ad;
rcu_read_lock();
- hostsp = smack_host_label(sap);
- if (hostsp != NULL) {
+ hkp = smack_host_label(sap);
+ if (hkp != NULL) {
#ifdef CONFIG_AUDIT
struct lsm_network_audit net;
@@ -2003,7 +2198,8 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
#endif
sk_lbl = SMACK_UNLABELED_SOCKET;
skp = ssp->smk_out;
- rc = smk_access(skp, hostsp, MAY_WRITE, &ad);
+ rc = smk_access(skp, hkp, MAY_WRITE, &ad);
+ rc = smk_bu_note("IPv4 host check", skp, hkp, MAY_WRITE, rc);
} else {
sk_lbl = SMACK_CIPSO_SOCKET;
rc = 0;
@@ -2104,18 +2300,19 @@ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
struct socket_smack *ssp = sk->sk_security;
struct smack_known *skp;
unsigned short port = 0;
- char *object;
+ struct smack_known *object;
struct smk_audit_info ad;
+ int rc;
#ifdef CONFIG_AUDIT
struct lsm_network_audit net;
#endif
if (act == SMK_RECEIVING) {
skp = smack_net_ambient;
- object = ssp->smk_in->smk_known;
+ object = ssp->smk_in;
} else {
skp = ssp->smk_out;
- object = smack_net_ambient->smk_known;
+ object = smack_net_ambient;
}
/*
@@ -2142,7 +2339,7 @@ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
list_for_each_entry(spp, &smk_ipv6_port_list, list) {
if (spp->smk_port != port)
continue;
- object = spp->smk_in->smk_known;
+ object = spp->smk_in;
if (act == SMK_CONNECTING)
ssp->smk_packet = spp->smk_out;
break;
@@ -2159,7 +2356,9 @@ auditout:
else
ad.a.u.net->v6info.daddr = address->sin6_addr;
#endif
- return smk_access(skp, object, MAY_WRITE, &ad);
+ rc = smk_access(skp, object, MAY_WRITE, &ad);
+ rc = smk_bu_note("IPv6 port check", skp, object, MAY_WRITE, rc);
+ return rc;
}
/**
@@ -2191,7 +2390,7 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
return -EINVAL;
if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
- nsp->smk_inode = skp->smk_known;
+ nsp->smk_inode = skp;
nsp->smk_flags |= SMK_INODE_INSTANT;
return 0;
}
@@ -2333,7 +2532,7 @@ static int smack_msg_msg_alloc_security(struct msg_msg *msg)
{
struct smack_known *skp = smk_of_current();
- msg->security = skp->smk_known;
+ msg->security = skp;
return 0;
}
@@ -2354,9 +2553,9 @@ static void smack_msg_msg_free_security(struct msg_msg *msg)
*
* Returns a pointer to the smack value
*/
-static char *smack_of_shm(struct shmid_kernel *shp)
+static struct smack_known *smack_of_shm(struct shmid_kernel *shp)
{
- return (char *)shp->shm_perm.security;
+ return (struct smack_known *)shp->shm_perm.security;
}
/**
@@ -2370,7 +2569,7 @@ static int smack_shm_alloc_security(struct shmid_kernel *shp)
struct kern_ipc_perm *isp = &shp->shm_perm;
struct smack_known *skp = smk_of_current();
- isp->security = skp->smk_known;
+ isp->security = skp;
return 0;
}
@@ -2396,14 +2595,17 @@ static void smack_shm_free_security(struct shmid_kernel *shp)
*/
static int smk_curacc_shm(struct shmid_kernel *shp, int access)
{
- char *ssp = smack_of_shm(shp);
+ struct smack_known *ssp = smack_of_shm(shp);
struct smk_audit_info ad;
+ int rc;
#ifdef CONFIG_AUDIT
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
ad.a.u.ipc_id = shp->shm_perm.id;
#endif
- return smk_curacc(ssp, access, &ad);
+ rc = smk_curacc(ssp, access, &ad);
+ rc = smk_bu_current("shm", ssp, access, rc);
+ return rc;
}
/**
@@ -2478,9 +2680,9 @@ static int smack_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr,
*
* Returns a pointer to the smack value
*/
-static char *smack_of_sem(struct sem_array *sma)
+static struct smack_known *smack_of_sem(struct sem_array *sma)
{
- return (char *)sma->sem_perm.security;
+ return (struct smack_known *)sma->sem_perm.security;
}
/**
@@ -2494,7 +2696,7 @@ static int smack_sem_alloc_security(struct sem_array *sma)
struct kern_ipc_perm *isp = &sma->sem_perm;
struct smack_known *skp = smk_of_current();
- isp->security = skp->smk_known;
+ isp->security = skp;
return 0;
}
@@ -2520,14 +2722,17 @@ static void smack_sem_free_security(struct sem_array *sma)
*/
static int smk_curacc_sem(struct sem_array *sma, int access)
{
- char *ssp = smack_of_sem(sma);
+ struct smack_known *ssp = smack_of_sem(sma);
struct smk_audit_info ad;
+ int rc;
#ifdef CONFIG_AUDIT
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
ad.a.u.ipc_id = sma->sem_perm.id;
#endif
- return smk_curacc(ssp, access, &ad);
+ rc = smk_curacc(ssp, access, &ad);
+ rc = smk_bu_current("sem", ssp, access, rc);
+ return rc;
}
/**
@@ -2613,7 +2818,7 @@ static int smack_msg_queue_alloc_security(struct msg_queue *msq)
struct kern_ipc_perm *kisp = &msq->q_perm;
struct smack_known *skp = smk_of_current();
- kisp->security = skp->smk_known;
+ kisp->security = skp;
return 0;
}
@@ -2634,11 +2839,11 @@ static void smack_msg_queue_free_security(struct msg_queue *msq)
* smack_of_msq - the smack pointer for the msq
* @msq: the object
*
- * Returns a pointer to the smack value
+ * Returns a pointer to the smack label entry
*/
-static char *smack_of_msq(struct msg_queue *msq)
+static struct smack_known *smack_of_msq(struct msg_queue *msq)
{
- return (char *)msq->q_perm.security;
+ return (struct smack_known *)msq->q_perm.security;
}
/**
@@ -2650,14 +2855,17 @@ static char *smack_of_msq(struct msg_queue *msq)
*/
static int smk_curacc_msq(struct msg_queue *msq, int access)
{
- char *msp = smack_of_msq(msq);
+ struct smack_known *msp = smack_of_msq(msq);
struct smk_audit_info ad;
+ int rc;
#ifdef CONFIG_AUDIT
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
ad.a.u.ipc_id = msq->q_perm.id;
#endif
- return smk_curacc(msp, access, &ad);
+ rc = smk_curacc(msp, access, &ad);
+ rc = smk_bu_current("msq", msp, access, rc);
+ return rc;
}
/**
@@ -2750,15 +2958,18 @@ static int smack_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
*/
static int smack_ipc_permission(struct kern_ipc_perm *ipp, short flag)
{
- char *isp = ipp->security;
+ struct smack_known *iskp = ipp->security;
int may = smack_flags_to_may(flag);
struct smk_audit_info ad;
+ int rc;
#ifdef CONFIG_AUDIT
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC);
ad.a.u.ipc_id = ipp->id;
#endif
- return smk_curacc(isp, may, &ad);
+ rc = smk_curacc(iskp, may, &ad);
+ rc = smk_bu_current("svipc", iskp, may, rc);
+ return rc;
}
/**
@@ -2768,9 +2979,9 @@ static int smack_ipc_permission(struct kern_ipc_perm *ipp, short flag)
*/
static void smack_ipc_getsecid(struct kern_ipc_perm *ipp, u32 *secid)
{
- char *smack = ipp->security;
+ struct smack_known *iskp = ipp->security;
- *secid = smack_to_secid(smack);
+ *secid = iskp->smk_secid;
}
/**
@@ -2787,7 +2998,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
struct inode_smack *isp;
struct smack_known *skp;
struct smack_known *ckp = smk_of_current();
- char *final;
+ struct smack_known *final;
char trattr[TRANS_TRUE_SIZE];
int transflag = 0;
int rc;
@@ -2827,8 +3038,8 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
* so there's no opportunity to set the mount
* options.
*/
- sbsp->smk_root = smack_known_star.smk_known;
- sbsp->smk_default = smack_known_star.smk_known;
+ sbsp->smk_root = &smack_known_star;
+ sbsp->smk_default = &smack_known_star;
}
isp->smk_inode = sbsp->smk_root;
isp->smk_flags |= SMK_INODE_INSTANT;
@@ -2858,7 +3069,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
*
* Cgroupfs is special
*/
- final = smack_known_star.smk_known;
+ final = &smack_known_star;
break;
case DEVPTS_SUPER_MAGIC:
/*
@@ -2866,7 +3077,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
* Programs that change smack have to treat the
* pty with respect.
*/
- final = ckp->smk_known;
+ final = ckp;
break;
case PROC_SUPER_MAGIC:
/*
@@ -2880,7 +3091,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
* but watch out, because they're volitile,
* getting recreated on every reboot.
*/
- final = smack_known_star.smk_known;
+ final = &smack_known_star;
/*
* No break.
*
@@ -2899,7 +3110,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
* UNIX domain sockets use lower level socket data.
*/
if (S_ISSOCK(inode->i_mode)) {
- final = smack_known_star.smk_known;
+ final = &smack_known_star;
break;
}
/*
@@ -2916,7 +3127,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
dp = dget(opt_dentry);
skp = smk_fetch(XATTR_NAME_SMACK, inode, dp);
if (skp != NULL)
- final = skp->smk_known;
+ final = skp;
/*
* Transmuting directory
@@ -2965,7 +3176,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
}
if (final == NULL)
- isp->smk_inode = ckp->smk_known;
+ isp->smk_inode = ckp;
else
isp->smk_inode = final;
@@ -3090,9 +3301,13 @@ static int smack_unix_stream_connect(struct sock *sock,
smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
smk_ad_setfield_u_net_sk(&ad, other);
#endif
- rc = smk_access(skp, okp->smk_known, MAY_WRITE, &ad);
- if (rc == 0)
- rc = smk_access(okp, okp->smk_known, MAY_WRITE, NULL);
+ rc = smk_access(skp, okp, MAY_WRITE, &ad);
+ rc = smk_bu_note("UDS connect", skp, okp, MAY_WRITE, rc);
+ if (rc == 0) {
+ rc = smk_access(okp, skp, MAY_WRITE, NULL);
+ rc = smk_bu_note("UDS connect", okp, skp,
+ MAY_WRITE, rc);
+ }
}
/*
@@ -3118,8 +3333,8 @@ static int smack_unix_may_send(struct socket *sock, struct socket *other)
{
struct socket_smack *ssp = sock->sk->sk_security;
struct socket_smack *osp = other->sk->sk_security;
- struct smack_known *skp;
struct smk_audit_info ad;
+ int rc;
#ifdef CONFIG_AUDIT
struct lsm_network_audit net;
@@ -3131,8 +3346,9 @@ static int smack_unix_may_send(struct socket *sock, struct socket *other)
if (smack_privileged(CAP_MAC_OVERRIDE))
return 0;
- skp = ssp->smk_out;
- return smk_access(skp, osp->smk_in->smk_known, MAY_WRITE, &ad);
+ rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
+ rc = smk_bu_note("UDS send", ssp->smk_out, osp->smk_in, MAY_WRITE, rc);
+ return rc;
}
/**
@@ -3346,7 +3562,9 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
* This is the simplist possible security model
* for networking.
*/
- rc = smk_access(skp, ssp->smk_in->smk_known, MAY_WRITE, &ad);
+ rc = smk_access(skp, ssp->smk_in, MAY_WRITE, &ad);
+ rc = smk_bu_note("IPv4 delivery", skp, ssp->smk_in,
+ MAY_WRITE, rc);
if (rc != 0)
netlbl_skbuff_err(skb, rc, 0);
break;
@@ -3489,7 +3707,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
struct netlbl_lsm_secattr secattr;
struct sockaddr_in addr;
struct iphdr *hdr;
- char *hsp;
+ struct smack_known *hskp;
int rc;
struct smk_audit_info ad;
#ifdef CONFIG_AUDIT
@@ -3526,7 +3744,8 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
* Receiving a packet requires that the other end be able to write
* here. Read access is not required.
*/
- rc = smk_access(skp, ssp->smk_in->smk_known, MAY_WRITE, &ad);
+ rc = smk_access(skp, ssp->smk_in, MAY_WRITE, &ad);
+ rc = smk_bu_note("IPv4 connect", skp, ssp->smk_in, MAY_WRITE, rc);
if (rc != 0)
return rc;
@@ -3544,10 +3763,10 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
hdr = ip_hdr(skb);
addr.sin_addr.s_addr = hdr->saddr;
rcu_read_lock();
- hsp = smack_host_label(&addr);
+ hskp = smack_host_label(&addr);
rcu_read_unlock();
- if (hsp == NULL)
+ if (hskp == NULL)
rc = netlbl_req_setattr(req, &skp->smk_netlabel);
else
netlbl_req_delattr(req);
@@ -3599,7 +3818,7 @@ static int smack_key_alloc(struct key *key, const struct cred *cred,
{
struct smack_known *skp = smk_of_task(cred->security);
- key->security = skp->smk_known;
+ key->security = skp;
return 0;
}
@@ -3630,6 +3849,7 @@ static int smack_key_permission(key_ref_t key_ref,
struct smk_audit_info ad;
struct smack_known *tkp = smk_of_task(cred->security);
int request = 0;
+ int rc;
keyp = key_ref_to_ptr(key_ref);
if (keyp == NULL)
@@ -3654,7 +3874,9 @@ static int smack_key_permission(key_ref_t key_ref,
request = MAY_READ;
if (perm & (KEY_NEED_WRITE | KEY_NEED_LINK | KEY_NEED_SETATTR))
request = MAY_WRITE;
- return smk_access(tkp, keyp->security, request, &ad);
+ rc = smk_access(tkp, keyp->security, request, &ad);
+ rc = smk_bu_note("key access", tkp, keyp->security, request, rc);
+ return rc;
}
#endif /* CONFIG_KEYS */
@@ -3685,6 +3907,7 @@ static int smack_key_permission(key_ref_t key_ref,
*/
static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
{
+ struct smack_known *skp;
char **rule = (char **)vrule;
*rule = NULL;
@@ -3694,7 +3917,9 @@ static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
if (op != Audit_equal && op != Audit_not_equal)
return -EINVAL;
- *rule = smk_import(rulestr, 0);
+ skp = smk_import_entry(rulestr, 0);
+ if (skp)
+ *rule = skp->smk_known;
return 0;
}
@@ -3813,7 +4038,12 @@ static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
*/
static int smack_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
- *secid = smack_to_secid(secdata);
+ struct smack_known *skp = smk_find_entry(secdata);
+
+ if (skp)
+ *secid = skp->smk_secid;
+ else
+ *secid = 0;
return 0;
}
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 3c720ff10591..bce4e8f1b267 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -131,14 +131,17 @@ LIST_HEAD(smack_rule_list);
struct smack_parsed_rule {
struct smack_known *smk_subject;
- char *smk_object;
+ struct smack_known *smk_object;
int smk_access1;
int smk_access2;
};
static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
-const char *smack_cipso_option = SMACK_CIPSO_OPTION;
+struct smack_known smack_cipso_option = {
+ .smk_known = SMACK_CIPSO_OPTION,
+ .smk_secid = 0,
+};
/*
* Values for parsing cipso rules
@@ -304,6 +307,10 @@ static int smk_perm_from_str(const char *string)
case 'L':
perm |= MAY_LOCK;
break;
+ case 'b':
+ case 'B':
+ perm |= MAY_BRINGUP;
+ break;
default:
return perm;
}
@@ -335,7 +342,7 @@ static int smk_fill_rule(const char *subject, const char *object,
if (rule->smk_subject == NULL)
return -EINVAL;
- rule->smk_object = smk_import(object, len);
+ rule->smk_object = smk_import_entry(object, len);
if (rule->smk_object == NULL)
return -EINVAL;
} else {
@@ -355,7 +362,7 @@ static int smk_fill_rule(const char *subject, const char *object,
kfree(cp);
if (skp == NULL)
return -ENOENT;
- rule->smk_object = skp->smk_known;
+ rule->smk_object = skp;
}
rule->smk_access1 = smk_perm_from_str(access1);
@@ -594,13 +601,15 @@ static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
* anything you read back.
*/
if (strlen(srp->smk_subject->smk_known) >= max ||
- strlen(srp->smk_object) >= max)
+ strlen(srp->smk_object->smk_known) >= max)
return;
if (srp->smk_access == 0)
return;
- seq_printf(s, "%s %s", srp->smk_subject->smk_known, srp->smk_object);
+ seq_printf(s, "%s %s",
+ srp->smk_subject->smk_known,
+ srp->smk_object->smk_known);
seq_putc(s, ' ');
@@ -616,6 +625,8 @@ static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
seq_putc(s, 't');
if (srp->smk_access & MAY_LOCK)
seq_putc(s, 'l');
+ if (srp->smk_access & MAY_BRINGUP)
+ seq_putc(s, 'b');
seq_putc(s, '\n');
}
@@ -1067,7 +1078,7 @@ static int netlbladdr_seq_show(struct seq_file *s, void *v)
for (maskn = 0; temp_mask; temp_mask <<= 1, maskn++);
seq_printf(s, "%u.%u.%u.%u/%d %s\n",
- hp[0], hp[1], hp[2], hp[3], maskn, skp->smk_label);
+ hp[0], hp[1], hp[2], hp[3], maskn, skp->smk_label->smk_known);
return 0;
}
@@ -1147,10 +1158,10 @@ static void smk_netlbladdr_insert(struct smk_netlbladdr *new)
static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct smk_netlbladdr *skp;
+ struct smk_netlbladdr *snp;
struct sockaddr_in newname;
char *smack;
- char *sp;
+ struct smack_known *skp;
char *data;
char *host = (char *)&newname.sin_addr.s_addr;
int rc;
@@ -1213,15 +1224,15 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
* If smack begins with '-', it is an option, don't import it
*/
if (smack[0] != '-') {
- sp = smk_import(smack, 0);
- if (sp == NULL) {
+ skp = smk_import_entry(smack, 0);
+ if (skp == NULL) {
rc = -EINVAL;
goto free_out;
}
} else {
/* check known options */
- if (strcmp(smack, smack_cipso_option) == 0)
- sp = (char *)smack_cipso_option;
+ if (strcmp(smack, smack_cipso_option.smk_known) == 0)
+ skp = &smack_cipso_option;
else {
rc = -EINVAL;
goto free_out;
@@ -1244,9 +1255,9 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
nsa = newname.sin_addr.s_addr;
/* try to find if the prefix is already in the list */
found = 0;
- list_for_each_entry_rcu(skp, &smk_netlbladdr_list, list) {
- if (skp->smk_host.sin_addr.s_addr == nsa &&
- skp->smk_mask.s_addr == mask.s_addr) {
+ list_for_each_entry_rcu(snp, &smk_netlbladdr_list, list) {
+ if (snp->smk_host.sin_addr.s_addr == nsa &&
+ snp->smk_mask.s_addr == mask.s_addr) {
found = 1;
break;
}
@@ -1254,26 +1265,26 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
smk_netlabel_audit_set(&audit_info);
if (found == 0) {
- skp = kzalloc(sizeof(*skp), GFP_KERNEL);
- if (skp == NULL)
+ snp = kzalloc(sizeof(*snp), GFP_KERNEL);
+ if (snp == NULL)
rc = -ENOMEM;
else {
rc = 0;
- skp->smk_host.sin_addr.s_addr = newname.sin_addr.s_addr;
- skp->smk_mask.s_addr = mask.s_addr;
- skp->smk_label = sp;
- smk_netlbladdr_insert(skp);
+ snp->smk_host.sin_addr.s_addr = newname.sin_addr.s_addr;
+ snp->smk_mask.s_addr = mask.s_addr;
+ snp->smk_label = skp;
+ smk_netlbladdr_insert(snp);
}
} else {
/* we delete the unlabeled entry, only if the previous label
* wasn't the special CIPSO option */
- if (skp->smk_label != smack_cipso_option)
+ if (snp->smk_label != &smack_cipso_option)
rc = netlbl_cfg_unlbl_static_del(&init_net, NULL,
- &skp->smk_host.sin_addr, &skp->smk_mask,
+ &snp->smk_host.sin_addr, &snp->smk_mask,
PF_INET, &audit_info);
else
rc = 0;
- skp->smk_label = sp;
+ snp->smk_label = skp;
}
/*
@@ -1281,10 +1292,10 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
* this host so that incoming packets get labeled.
* but only if we didn't get the special CIPSO option
*/
- if (rc == 0 && sp != smack_cipso_option)
+ if (rc == 0 && skp != &smack_cipso_option)
rc = netlbl_cfg_unlbl_static_add(&init_net, NULL,
- &skp->smk_host.sin_addr, &skp->smk_mask, PF_INET,
- smack_to_secid(skp->smk_label), &audit_info);
+ &snp->smk_host.sin_addr, &snp->smk_mask, PF_INET,
+ snp->smk_label->smk_secid, &audit_info);
if (rc == 0)
rc = count;
@@ -1677,7 +1688,7 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
if (smack_onlycap != NULL && smack_onlycap != skp)
return -EPERM;
- data = kzalloc(count, GFP_KERNEL);
+ data = kzalloc(count + 1, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -1880,7 +1891,10 @@ static ssize_t smk_user_access(struct file *file, const char __user *buf,
else if (res != -ENOENT)
return -EINVAL;
- data[0] = res == 0 ? '1' : '0';
+ /*
+ * smk_access() can return a value > 0 in the "bringup" case.
+ */
+ data[0] = res >= 0 ? '1' : '0';
data[1] = '\0';
simple_transaction_set(file, 2);
@@ -2228,7 +2242,7 @@ static ssize_t smk_write_syslog(struct file *file, const char __user *buf,
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
- data = kzalloc(count, GFP_KERNEL);
+ data = kzalloc(count + 1, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
diff --git a/sound/core/misc.c b/sound/core/misc.c
index 30e027ecf4da..f2e8226c88fb 100644
--- a/sound/core/misc.c
+++ b/sound/core/misc.c
@@ -145,6 +145,8 @@ EXPORT_SYMBOL(snd_pci_quirk_lookup_id);
const struct snd_pci_quirk *
snd_pci_quirk_lookup(struct pci_dev *pci, const struct snd_pci_quirk *list)
{
+ if (!pci)
+ return NULL;
return snd_pci_quirk_lookup_id(pci->subsystem_vendor,
pci->subsystem_device,
list);
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 43932e8dce66..42ded997b223 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -215,6 +215,7 @@ static char *snd_pcm_format_names[] = {
FORMAT(G723_40_1B),
FORMAT(DSD_U8),
FORMAT(DSD_U16_LE),
+ FORMAT(DSD_U32_LE),
};
const char *snd_pcm_format_name(snd_pcm_format_t format)
@@ -698,6 +699,7 @@ int snd_pcm_new_stream(struct snd_pcm *pcm, int stream, int substream_count)
}
substream->group = &substream->self_group;
spin_lock_init(&substream->self_group.lock);
+ mutex_init(&substream->self_group.mutex);
INIT_LIST_HEAD(&substream->self_group.substreams);
list_add_tail(&substream->link_list, &substream->self_group.substreams);
atomic_set(&substream->mmap_count, 0);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 0032278567ad..dfc28542a007 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1113,18 +1113,20 @@ int snd_interval_list(struct snd_interval *i, unsigned int count,
EXPORT_SYMBOL(snd_interval_list);
-static int snd_interval_step(struct snd_interval *i, unsigned int min, unsigned int step)
+static int snd_interval_step(struct snd_interval *i, unsigned int step)
{
unsigned int n;
int changed = 0;
- n = (i->min - min) % step;
+ n = i->min % step;
if (n != 0 || i->openmin) {
i->min += step - n;
+ i->openmin = 0;
changed = 1;
}
- n = (i->max - min) % step;
+ n = i->max % step;
if (n != 0 || i->openmax) {
i->max -= n;
+ i->openmax = 0;
changed = 1;
}
if (snd_interval_checkempty(i)) {
@@ -1427,7 +1429,7 @@ static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
unsigned long step = (unsigned long) rule->private;
- return snd_interval_step(hw_param_interval(params, rule->var), 0, step);
+ return snd_interval_step(hw_param_interval(params, rule->var), step);
}
/**
diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c
index 2c6fd80e0bd1..ae7a0feb3b76 100644
--- a/sound/core/pcm_misc.c
+++ b/sound/core/pcm_misc.c
@@ -148,6 +148,10 @@ static struct pcm_format_data pcm_formats[(INT)SNDRV_PCM_FORMAT_LAST+1] = {
.width = 16, .phys = 16, .le = 1, .signd = 0,
.silence = { 0x69, 0x69 },
},
+ [SNDRV_PCM_FORMAT_DSD_U32_LE] = {
+ .width = 32, .phys = 32, .le = 1, .signd = 0,
+ .silence = { 0x69, 0x69, 0x69, 0x69 },
+ },
/* FIXME: the following three formats are not defined properly yet */
[SNDRV_PCM_FORMAT_MPEG] = {
.le = -1, .signd = -1,
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 8cd2f930ad0b..85fe1a216225 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -74,11 +74,68 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
*
*/
-DEFINE_RWLOCK(snd_pcm_link_rwlock);
-EXPORT_SYMBOL(snd_pcm_link_rwlock);
-
+static DEFINE_RWLOCK(snd_pcm_link_rwlock);
static DECLARE_RWSEM(snd_pcm_link_rwsem);
+void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
+{
+ if (substream->pcm->nonatomic) {
+ down_read(&snd_pcm_link_rwsem);
+ mutex_lock(&substream->self_group.mutex);
+ } else {
+ read_lock(&snd_pcm_link_rwlock);
+ spin_lock(&substream->self_group.lock);
+ }
+}
+EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
+
+void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
+{
+ if (substream->pcm->nonatomic) {
+ mutex_unlock(&substream->self_group.mutex);
+ up_read(&snd_pcm_link_rwsem);
+ } else {
+ spin_unlock(&substream->self_group.lock);
+ read_unlock(&snd_pcm_link_rwlock);
+ }
+}
+EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
+
+void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
+{
+ if (!substream->pcm->nonatomic)
+ local_irq_disable();
+ snd_pcm_stream_lock(substream);
+}
+EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
+
+void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
+{
+ snd_pcm_stream_unlock(substream);
+ if (!substream->pcm->nonatomic)
+ local_irq_enable();
+}
+EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
+
+unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
+{
+ unsigned long flags = 0;
+ if (!substream->pcm->nonatomic)
+ local_irq_save(flags);
+ snd_pcm_stream_lock(substream);
+ return flags;
+}
+EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
+
+void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
+ unsigned long flags)
+{
+ snd_pcm_stream_unlock(substream);
+ if (!substream->pcm->nonatomic)
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
+
static inline mm_segment_t snd_enter_user(void)
{
mm_segment_t fs = get_fs();
@@ -727,9 +784,14 @@ static int snd_pcm_action_group(struct action_ops *ops,
int res = 0;
snd_pcm_group_for_each_entry(s, substream) {
- if (do_lock && s != substream)
- spin_lock_nested(&s->self_group.lock,
- SINGLE_DEPTH_NESTING);
+ if (do_lock && s != substream) {
+ if (s->pcm->nonatomic)
+ mutex_lock_nested(&s->self_group.mutex,
+ SINGLE_DEPTH_NESTING);
+ else
+ spin_lock_nested(&s->self_group.lock,
+ SINGLE_DEPTH_NESTING);
+ }
res = ops->pre_action(s, state);
if (res < 0)
goto _unlock;
@@ -755,8 +817,12 @@ static int snd_pcm_action_group(struct action_ops *ops,
if (do_lock) {
/* unlock streams */
snd_pcm_group_for_each_entry(s1, substream) {
- if (s1 != substream)
- spin_unlock(&s1->self_group.lock);
+ if (s1 != substream) {
+ if (s->pcm->nonatomic)
+ mutex_unlock(&s1->self_group.mutex);
+ else
+ spin_unlock(&s1->self_group.lock);
+ }
if (s1 == s) /* end */
break;
}
@@ -784,6 +850,27 @@ static int snd_pcm_action_single(struct action_ops *ops,
return res;
}
+/* call in mutex-protected context */
+static int snd_pcm_action_mutex(struct action_ops *ops,
+ struct snd_pcm_substream *substream,
+ int state)
+{
+ int res;
+
+ if (snd_pcm_stream_linked(substream)) {
+ if (!mutex_trylock(&substream->group->mutex)) {
+ mutex_unlock(&substream->self_group.mutex);
+ mutex_lock(&substream->group->mutex);
+ mutex_lock(&substream->self_group.mutex);
+ }
+ res = snd_pcm_action_group(ops, substream, state, 1);
+ mutex_unlock(&substream->group->mutex);
+ } else {
+ res = snd_pcm_action_single(ops, substream, state);
+ }
+ return res;
+}
+
/*
* Note: call with stream lock
*/
@@ -793,6 +880,9 @@ static int snd_pcm_action(struct action_ops *ops,
{
int res;
+ if (substream->pcm->nonatomic)
+ return snd_pcm_action_mutex(ops, substream, state);
+
if (snd_pcm_stream_linked(substream)) {
if (!spin_trylock(&substream->group->lock)) {
spin_unlock(&substream->self_group.lock);
@@ -807,6 +897,29 @@ static int snd_pcm_action(struct action_ops *ops,
return res;
}
+static int snd_pcm_action_lock_mutex(struct action_ops *ops,
+ struct snd_pcm_substream *substream,
+ int state)
+{
+ int res;
+
+ down_read(&snd_pcm_link_rwsem);
+ if (snd_pcm_stream_linked(substream)) {
+ mutex_lock(&substream->group->mutex);
+ mutex_lock_nested(&substream->self_group.mutex,
+ SINGLE_DEPTH_NESTING);
+ res = snd_pcm_action_group(ops, substream, state, 1);
+ mutex_unlock(&substream->self_group.mutex);
+ mutex_unlock(&substream->group->mutex);
+ } else {
+ mutex_lock(&substream->self_group.mutex);
+ res = snd_pcm_action_single(ops, substream, state);
+ mutex_unlock(&substream->self_group.mutex);
+ }
+ up_read(&snd_pcm_link_rwsem);
+ return res;
+}
+
/*
* Note: don't use any locks before
*/
@@ -816,6 +929,9 @@ static int snd_pcm_action_lock_irq(struct action_ops *ops,
{
int res;
+ if (substream->pcm->nonatomic)
+ return snd_pcm_action_lock_mutex(ops, substream, state);
+
read_lock_irq(&snd_pcm_link_rwlock);
if (snd_pcm_stream_linked(substream)) {
spin_lock(&substream->group->lock);
@@ -1634,7 +1750,8 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
down_write(&snd_pcm_link_rwsem);
write_lock_irq(&snd_pcm_link_rwlock);
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
- substream->runtime->status->state != substream1->runtime->status->state) {
+ substream->runtime->status->state != substream1->runtime->status->state ||
+ substream->pcm->nonatomic != substream1->pcm->nonatomic) {
res = -EBADFD;
goto _end;
}
@@ -1646,6 +1763,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
substream->group = group;
group = NULL;
spin_lock_init(&substream->group->lock);
+ mutex_init(&substream->group->mutex);
INIT_LIST_HEAD(&substream->group->substreams);
list_add_tail(&substream->link_list, &substream->group->substreams);
substream->group->count = 1;
diff --git a/sound/drivers/vx/vx_core.c b/sound/drivers/vx/vx_core.c
index 83596891cde4..e8cc16993903 100644
--- a/sound/drivers/vx/vx_core.c
+++ b/sound/drivers/vx/vx_core.c
@@ -117,7 +117,7 @@ static int vx_reset_chk(struct vx_core *chip)
*
* returns 0 if successful, or a negative error code.
* the error code can be VX-specific, retrieved via vx_get_error().
- * NB: call with spinlock held!
+ * NB: call with mutex held!
*/
static int vx_transfer_end(struct vx_core *chip, int cmd)
{
@@ -155,7 +155,7 @@ static int vx_transfer_end(struct vx_core *chip, int cmd)
*
* returns 0 if successful, or a negative error code.
* the error code can be VX-specific, retrieved via vx_get_error().
- * NB: call with spinlock held!
+ * NB: call with mutex held!
*/
static int vx_read_status(struct vx_core *chip, struct vx_rmh *rmh)
{
@@ -236,7 +236,7 @@ static int vx_read_status(struct vx_core *chip, struct vx_rmh *rmh)
* returns 0 if successful, or a negative error code.
* the error code can be VX-specific, retrieved via vx_get_error().
*
- * this function doesn't call spinlock at all.
+ * this function doesn't call mutex lock at all.
*/
int vx_send_msg_nolock(struct vx_core *chip, struct vx_rmh *rmh)
{
@@ -337,7 +337,7 @@ int vx_send_msg_nolock(struct vx_core *chip, struct vx_rmh *rmh)
/*
- * vx_send_msg - send a DSP message with spinlock
+ * vx_send_msg - send a DSP message with mutex
* @rmh: the rmh record to send and receive
*
* returns 0 if successful, or a negative error code.
@@ -345,12 +345,11 @@ int vx_send_msg_nolock(struct vx_core *chip, struct vx_rmh *rmh)
*/
int vx_send_msg(struct vx_core *chip, struct vx_rmh *rmh)
{
- unsigned long flags;
int err;
- spin_lock_irqsave(&chip->lock, flags);
+ mutex_lock(&chip->lock);
err = vx_send_msg_nolock(chip, rmh);
- spin_unlock_irqrestore(&chip->lock, flags);
+ mutex_unlock(&chip->lock);
return err;
}
@@ -362,7 +361,7 @@ int vx_send_msg(struct vx_core *chip, struct vx_rmh *rmh)
* returns 0 if successful, or a negative error code.
* the error code can be VX-specific, retrieved via vx_get_error().
*
- * this function doesn't call spinlock at all.
+ * this function doesn't call mutex at all.
*
* unlike RMH, no command is sent to DSP.
*/
@@ -398,19 +397,18 @@ int vx_send_rih_nolock(struct vx_core *chip, int cmd)
/*
- * vx_send_rih - send an RIH with spinlock
+ * vx_send_rih - send an RIH with mutex
* @cmd: the command to send
*
* see vx_send_rih_nolock().
*/
int vx_send_rih(struct vx_core *chip, int cmd)
{
- unsigned long flags;
int err;
- spin_lock_irqsave(&chip->lock, flags);
+ mutex_lock(&chip->lock);
err = vx_send_rih_nolock(chip, cmd);
- spin_unlock_irqrestore(&chip->lock, flags);
+ mutex_unlock(&chip->lock);
return err;
}
@@ -482,30 +480,30 @@ static int vx_test_irq_src(struct vx_core *chip, unsigned int *ret)
int err;
vx_init_rmh(&chip->irq_rmh, CMD_TEST_IT);
- spin_lock(&chip->lock);
+ mutex_lock(&chip->lock);
err = vx_send_msg_nolock(chip, &chip->irq_rmh);
if (err < 0)
*ret = 0;
else
*ret = chip->irq_rmh.Stat[0];
- spin_unlock(&chip->lock);
+ mutex_unlock(&chip->lock);
return err;
}
/*
- * vx_interrupt - soft irq handler
+ * snd_vx_threaded_irq_handler - threaded irq handler
*/
-static void vx_interrupt(unsigned long private_data)
+irqreturn_t snd_vx_threaded_irq_handler(int irq, void *dev)
{
- struct vx_core *chip = (struct vx_core *) private_data;
+ struct vx_core *chip = dev;
unsigned int events;
if (chip->chip_status & VX_STAT_IS_STALE)
- return;
+ return IRQ_HANDLED;
if (vx_test_irq_src(chip, &events) < 0)
- return;
+ return IRQ_HANDLED;
#if 0
if (events & 0x000800)
@@ -519,7 +517,7 @@ static void vx_interrupt(unsigned long private_data)
*/
if (events & FATAL_DSP_ERROR) {
snd_printk(KERN_ERR "vx_core: fatal DSP error!!\n");
- return;
+ return IRQ_HANDLED;
}
/* The start on time code conditions are filled (ie the time code
@@ -534,8 +532,9 @@ static void vx_interrupt(unsigned long private_data)
/* update the pcm streams */
vx_pcm_update_intr(chip, events);
+ return IRQ_HANDLED;
}
-
+EXPORT_SYMBOL(snd_vx_threaded_irq_handler);
/**
* snd_vx_irq_handler - interrupt handler
@@ -548,8 +547,8 @@ irqreturn_t snd_vx_irq_handler(int irq, void *dev)
(chip->chip_status & VX_STAT_IS_STALE))
return IRQ_NONE;
if (! vx_test_and_ack(chip))
- tasklet_schedule(&chip->tq);
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
+ return IRQ_NONE;
}
EXPORT_SYMBOL(snd_vx_irq_handler);
@@ -790,13 +789,11 @@ struct vx_core *snd_vx_create(struct snd_card *card, struct snd_vx_hardware *hw,
snd_printk(KERN_ERR "vx_core: no memory\n");
return NULL;
}
- spin_lock_init(&chip->lock);
- spin_lock_init(&chip->irq_lock);
+ mutex_init(&chip->lock);
chip->irq = -1;
chip->hw = hw;
chip->type = hw->type;
chip->ops = ops;
- tasklet_init(&chip->tq, vx_interrupt, (unsigned long)chip);
mutex_init(&chip->mixer_mutex);
chip->card = card;
diff --git a/sound/drivers/vx/vx_mixer.c b/sound/drivers/vx/vx_mixer.c
index c71b8d148d7f..3b6823fc0606 100644
--- a/sound/drivers/vx/vx_mixer.c
+++ b/sound/drivers/vx/vx_mixer.c
@@ -32,17 +32,15 @@
*/
static void vx_write_codec_reg(struct vx_core *chip, int codec, unsigned int data)
{
- unsigned long flags;
-
if (snd_BUG_ON(!chip->ops->write_codec))
return;
if (chip->chip_status & VX_STAT_IS_STALE)
return;
- spin_lock_irqsave(&chip->lock, flags);
+ mutex_lock(&chip->lock);
chip->ops->write_codec(chip, codec, data);
- spin_unlock_irqrestore(&chip->lock, flags);
+ mutex_unlock(&chip->lock);
}
/*
@@ -178,14 +176,12 @@ void vx_reset_codec(struct vx_core *chip, int cold_reset)
*/
static void vx_change_audio_source(struct vx_core *chip, int src)
{
- unsigned long flags;
-
if (chip->chip_status & VX_STAT_IS_STALE)
return;
- spin_lock_irqsave(&chip->lock, flags);
+ mutex_lock(&chip->lock);
chip->ops->change_audio_source(chip, src);
- spin_unlock_irqrestore(&chip->lock, flags);
+ mutex_unlock(&chip->lock);
}
diff --git a/sound/drivers/vx/vx_pcm.c b/sound/drivers/vx/vx_pcm.c
index deed5efff33c..11467272089e 100644
--- a/sound/drivers/vx/vx_pcm.c
+++ b/sound/drivers/vx/vx_pcm.c
@@ -229,7 +229,7 @@ static int vx_get_pipe_state(struct vx_core *chip, struct vx_pipe *pipe, int *st
vx_init_rmh(&rmh, CMD_PIPE_STATE);
vx_set_pipe_cmd_params(&rmh, pipe->is_capture, pipe->number, 0);
- err = vx_send_msg_nolock(chip, &rmh); /* no lock needed for trigger */
+ err = vx_send_msg(chip, &rmh);
if (! err)
*state = (rmh.Stat[0] & (1 << pipe->number)) ? 1 : 0;
return err;
@@ -280,7 +280,7 @@ static int vx_pipe_can_start(struct vx_core *chip, struct vx_pipe *pipe)
vx_set_pipe_cmd_params(&rmh, pipe->is_capture, pipe->number, 0);
rmh.Cmd[0] |= 1;
- err = vx_send_msg_nolock(chip, &rmh); /* no lock needed for trigger */
+ err = vx_send_msg(chip, &rmh);
if (! err) {
if (rmh.Stat[0])
err = 1;
@@ -300,7 +300,7 @@ static int vx_conf_pipe(struct vx_core *chip, struct vx_pipe *pipe)
if (pipe->is_capture)
rmh.Cmd[0] |= COMMAND_RECORD_MASK;
rmh.Cmd[1] = 1 << pipe->number;
- return vx_send_msg_nolock(chip, &rmh); /* no lock needed for trigger */
+ return vx_send_msg(chip, &rmh);
}
/*
@@ -311,7 +311,7 @@ static int vx_send_irqa(struct vx_core *chip)
struct vx_rmh rmh;
vx_init_rmh(&rmh, CMD_SEND_IRQA);
- return vx_send_msg_nolock(chip, &rmh); /* no lock needed for trigger */
+ return vx_send_msg(chip, &rmh);
}
@@ -389,7 +389,7 @@ static int vx_stop_pipe(struct vx_core *chip, struct vx_pipe *pipe)
struct vx_rmh rmh;
vx_init_rmh(&rmh, CMD_STOP_PIPE);
vx_set_pipe_cmd_params(&rmh, pipe->is_capture, pipe->number, 0);
- return vx_send_msg_nolock(chip, &rmh); /* no lock needed for trigger */
+ return vx_send_msg(chip, &rmh);
}
@@ -477,7 +477,7 @@ static int vx_start_stream(struct vx_core *chip, struct vx_pipe *pipe)
vx_init_rmh(&rmh, CMD_START_ONE_STREAM);
vx_set_stream_cmd_params(&rmh, pipe->is_capture, pipe->number);
vx_set_differed_time(chip, &rmh, pipe);
- return vx_send_msg_nolock(chip, &rmh); /* no lock needed for trigger */
+ return vx_send_msg(chip, &rmh);
}
@@ -492,7 +492,7 @@ static int vx_stop_stream(struct vx_core *chip, struct vx_pipe *pipe)
vx_init_rmh(&rmh, CMD_STOP_STREAM);
vx_set_stream_cmd_params(&rmh, pipe->is_capture, pipe->number);
- return vx_send_msg_nolock(chip, &rmh); /* no lock needed for trigger */
+ return vx_send_msg(chip, &rmh);
}
@@ -520,8 +520,6 @@ static struct snd_pcm_hardware vx_pcm_playback_hw = {
};
-static void vx_pcm_delayed_start(unsigned long arg);
-
/*
* vx_pcm_playback_open - open callback for playback
*/
@@ -553,7 +551,6 @@ static int vx_pcm_playback_open(struct snd_pcm_substream *subs)
pipe->references++;
pipe->substream = subs;
- tasklet_init(&pipe->start_tq, vx_pcm_delayed_start, (unsigned long)subs);
chip->playback_pipes[audio] = pipe;
runtime->hw = vx_pcm_playback_hw;
@@ -646,12 +643,12 @@ static int vx_pcm_playback_transfer_chunk(struct vx_core *chip,
/* we don't need irqsave here, because this function
* is called from either trigger callback or irq handler
*/
- spin_lock(&chip->lock);
+ mutex_lock(&chip->lock);
vx_pseudo_dma_write(chip, runtime, pipe, size);
err = vx_notify_end_of_buffer(chip, pipe);
/* disconnect the host, SIZE_HBUF command always switches to the stream mode */
vx_send_rih_nolock(chip, IRQ_CONNECT_STREAM_NEXT);
- spin_unlock(&chip->lock);
+ mutex_unlock(&chip->lock);
return err;
}
@@ -728,31 +725,6 @@ static void vx_pcm_playback_update(struct vx_core *chip,
}
/*
- * start the stream and pipe.
- * this function is called from tasklet, which is invoked by the trigger
- * START callback.
- */
-static void vx_pcm_delayed_start(unsigned long arg)
-{
- struct snd_pcm_substream *subs = (struct snd_pcm_substream *)arg;
- struct vx_core *chip = subs->pcm->private_data;
- struct vx_pipe *pipe = subs->runtime->private_data;
- int err;
-
- /* printk( KERN_DEBUG "DDDD tasklet delayed start jiffies = %ld\n", jiffies);*/
-
- if ((err = vx_start_stream(chip, pipe)) < 0) {
- snd_printk(KERN_ERR "vx: cannot start stream\n");
- return;
- }
- if ((err = vx_toggle_pipe(chip, pipe, 1)) < 0) {
- snd_printk(KERN_ERR "vx: cannot start pipe\n");
- return;
- }
- /* printk( KERN_DEBUG "dddd tasklet delayed start jiffies = %ld \n", jiffies);*/
-}
-
-/*
* vx_pcm_playback_trigger - trigger callback for playback
*/
static int vx_pcm_trigger(struct snd_pcm_substream *subs, int cmd)
@@ -769,11 +741,17 @@ static int vx_pcm_trigger(struct snd_pcm_substream *subs, int cmd)
case SNDRV_PCM_TRIGGER_RESUME:
if (! pipe->is_capture)
vx_pcm_playback_transfer(chip, subs, pipe, 2);
- /* FIXME:
- * we trigger the pipe using tasklet, so that the interrupts are
- * issued surely after the trigger is completed.
- */
- tasklet_schedule(&pipe->start_tq);
+ err = vx_start_stream(chip, pipe);
+ if (err < 0) {
+ pr_debug("vx: cannot start stream\n");
+ return err;
+ }
+ err = vx_toggle_pipe(chip, pipe, 1);
+ if (err < 0) {
+ pr_debug("vx: cannot start pipe\n");
+ vx_stop_stream(chip, pipe);
+ return err;
+ }
chip->pcm_running++;
pipe->running = 1;
break;
@@ -955,7 +933,6 @@ static int vx_pcm_capture_open(struct snd_pcm_substream *subs)
if (err < 0)
return err;
pipe->substream = subs;
- tasklet_init(&pipe->start_tq, vx_pcm_delayed_start, (unsigned long)subs);
chip->capture_pipes[audio] = pipe;
/* check if monitoring is needed */
@@ -1082,7 +1059,7 @@ static void vx_pcm_capture_update(struct vx_core *chip, struct snd_pcm_substream
count -= 3;
}
/* disconnect the host, SIZE_HBUF command always switches to the stream mode */
- vx_send_rih_nolock(chip, IRQ_CONNECT_STREAM_NEXT);
+ vx_send_rih(chip, IRQ_CONNECT_STREAM_NEXT);
/* read the last pending 6 bytes */
count = DMA_READ_ALIGN;
while (count > 0) {
@@ -1099,7 +1076,7 @@ static void vx_pcm_capture_update(struct vx_core *chip, struct snd_pcm_substream
_error:
/* disconnect the host, SIZE_HBUF command always switches to the stream mode */
- vx_send_rih_nolock(chip, IRQ_CONNECT_STREAM_NEXT);
+ vx_send_rih(chip, IRQ_CONNECT_STREAM_NEXT);
return;
}
@@ -1275,6 +1252,7 @@ int snd_vx_pcm_new(struct vx_core *chip)
pcm->private_data = chip;
pcm->private_free = snd_vx_pcm_free;
pcm->info_flags = 0;
+ pcm->nonatomic = true;
strcpy(pcm->name, chip->card->shortname);
chip->pcm[i] = pcm;
}
diff --git a/sound/drivers/vx/vx_uer.c b/sound/drivers/vx/vx_uer.c
index b0560fec6bba..ef0b40c0a594 100644
--- a/sound/drivers/vx/vx_uer.c
+++ b/sound/drivers/vx/vx_uer.c
@@ -60,9 +60,9 @@ static int vx_modify_board_inputs(struct vx_core *chip)
*/
static int vx_read_one_cbit(struct vx_core *chip, int index)
{
- unsigned long flags;
int val;
- spin_lock_irqsave(&chip->lock, flags);
+
+ mutex_lock(&chip->lock);
if (chip->type >= VX_TYPE_VXPOCKET) {
vx_outb(chip, CSUER, 1); /* read */
vx_outb(chip, RUER, index & XX_UER_CBITS_OFFSET_MASK);
@@ -72,7 +72,7 @@ static int vx_read_one_cbit(struct vx_core *chip, int index)
vx_outl(chip, RUER, index & XX_UER_CBITS_OFFSET_MASK);
val = (vx_inl(chip, RUER) >> 7) & 0x01;
}
- spin_unlock_irqrestore(&chip->lock, flags);
+ mutex_unlock(&chip->lock);
return val;
}
@@ -83,9 +83,8 @@ static int vx_read_one_cbit(struct vx_core *chip, int index)
*/
static void vx_write_one_cbit(struct vx_core *chip, int index, int val)
{
- unsigned long flags;
val = !!val; /* 0 or 1 */
- spin_lock_irqsave(&chip->lock, flags);
+ mutex_lock(&chip->lock);
if (vx_is_pcmcia(chip)) {
vx_outb(chip, CSUER, 0); /* write */
vx_outb(chip, RUER, (val << 7) | (index & XX_UER_CBITS_OFFSET_MASK));
@@ -93,7 +92,7 @@ static void vx_write_one_cbit(struct vx_core *chip, int index, int val)
vx_outl(chip, CSUER, 0); /* write */
vx_outl(chip, RUER, (val << 7) | (index & XX_UER_CBITS_OFFSET_MASK));
}
- spin_unlock_irqrestore(&chip->lock, flags);
+ mutex_unlock(&chip->lock);
}
/*
@@ -190,14 +189,12 @@ static int vx_calc_clock_from_freq(struct vx_core *chip, int freq)
*/
static void vx_change_clock_source(struct vx_core *chip, int source)
{
- unsigned long flags;
-
/* we mute DAC to prevent clicks */
vx_toggle_dac_mute(chip, 1);
- spin_lock_irqsave(&chip->lock, flags);
+ mutex_lock(&chip->lock);
chip->ops->set_clock_source(chip, source);
chip->clock_source = source;
- spin_unlock_irqrestore(&chip->lock, flags);
+ mutex_unlock(&chip->lock);
/* unmute */
vx_toggle_dac_mute(chip, 0);
}
@@ -209,11 +206,11 @@ static void vx_change_clock_source(struct vx_core *chip, int source)
void vx_set_internal_clock(struct vx_core *chip, unsigned int freq)
{
int clock;
- unsigned long flags;
+
/* Get real clock value */
clock = vx_calc_clock_from_freq(chip, freq);
snd_printdd(KERN_DEBUG "set internal clock to 0x%x from freq %d\n", clock, freq);
- spin_lock_irqsave(&chip->lock, flags);
+ mutex_lock(&chip->lock);
if (vx_is_pcmcia(chip)) {
vx_outb(chip, HIFREQ, (clock >> 8) & 0x0f);
vx_outb(chip, LOFREQ, clock & 0xff);
@@ -221,7 +218,7 @@ void vx_set_internal_clock(struct vx_core *chip, unsigned int freq)
vx_outl(chip, HIFREQ, (clock >> 8) & 0x0f);
vx_outl(chip, LOFREQ, clock & 0xff);
}
- spin_unlock_irqrestore(&chip->lock, flags);
+ mutex_unlock(&chip->lock);
}
diff --git a/sound/pci/au88x0/au88x0.c b/sound/pci/au88x0/au88x0.c
index afb1b44b741e..21ce31f636bc 100644
--- a/sound/pci/au88x0/au88x0.c
+++ b/sound/pci/au88x0/au88x0.c
@@ -48,10 +48,10 @@ static void vortex_fix_latency(struct pci_dev *vortex)
{
int rc;
if (!(rc = pci_write_config_byte(vortex, 0x40, 0xff))) {
- printk(KERN_INFO CARD_NAME
+ pr_info( CARD_NAME
": vortex latency is 0xff\n");
} else {
- printk(KERN_WARNING CARD_NAME
+ pr_warn( CARD_NAME
": could not set vortex latency: pci error 0x%x\n", rc);
}
}
@@ -70,10 +70,10 @@ static void vortex_fix_agp_bridge(struct pci_dev *via)
if (!(rc = pci_read_config_byte(via, 0x42, &value))
&& ((value & 0x10)
|| !(rc = pci_write_config_byte(via, 0x42, value | 0x10)))) {
- printk(KERN_INFO CARD_NAME
+ pr_info( CARD_NAME
": bridge config is 0x%x\n", value | 0x10);
} else {
- printk(KERN_WARNING CARD_NAME
+ pr_warn( CARD_NAME
": could not set vortex latency: pci error 0x%x\n", rc);
}
}
@@ -97,7 +97,7 @@ static void snd_vortex_workaround(struct pci_dev *vortex, int fix)
PCI_DEVICE_ID_AMD_FE_GATE_7007, NULL);
}
if (via) {
- printk(KERN_INFO CARD_NAME ": Activating latency workaround...\n");
+ pr_info( CARD_NAME ": Activating latency workaround...\n");
vortex_fix_latency(vortex);
vortex_fix_agp_bridge(via);
}
@@ -153,7 +153,7 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip)
return err;
if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) < 0 ||
pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) < 0) {
- printk(KERN_ERR "error to set DMA mask\n");
+ pr_err( "error to set DMA mask\n");
pci_disable_device(pci);
return -ENXIO;
}
@@ -182,7 +182,7 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip)
chip->mmio = pci_ioremap_bar(pci, 0);
if (!chip->mmio) {
- printk(KERN_ERR "MMIO area remap failed.\n");
+ pr_err( "MMIO area remap failed.\n");
err = -ENOMEM;
goto ioremap_out;
}
@@ -191,14 +191,14 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip)
* This must be done before we do request_irq otherwise we can get spurious
* interrupts that we do not handle properly and make a mess of things */
if ((err = vortex_core_init(chip)) != 0) {
- printk(KERN_ERR "hw core init failed\n");
+ pr_err( "hw core init failed\n");
goto core_out;
}
if ((err = request_irq(pci->irq, vortex_interrupt,
IRQF_SHARED, KBUILD_MODNAME,
chip)) != 0) {
- printk(KERN_ERR "cannot grab irq\n");
+ pr_err( "cannot grab irq\n");
goto irq_out;
}
chip->irq = pci->irq;
@@ -342,10 +342,10 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
chip->rev = pci->revision;
#ifdef CHIP_AU8830
if ((chip->rev) != 0xfe && (chip->rev) != 0xfa) {
- printk(KERN_ALERT
+ pr_alert(
"vortex: The revision (%x) of your card has not been seen before.\n",
chip->rev);
- printk(KERN_ALERT
+ pr_alert(
"vortex: Please email the results of 'lspci -vv' to openvortex-dev@nongnu.org.\n");
snd_card_free(card);
err = -ENODEV;
diff --git a/sound/pci/au88x0/au88x0_a3d.c b/sound/pci/au88x0/au88x0_a3d.c
index aad831acbb17..30f760e3d2c0 100644
--- a/sound/pci/au88x0/au88x0_a3d.c
+++ b/sound/pci/au88x0/au88x0_a3d.c
@@ -463,7 +463,7 @@ static void a3dsrc_ZeroSliceIO(a3dsrc_t * a)
static void a3dsrc_ZeroState(a3dsrc_t * a)
{
/*
- printk(KERN_DEBUG "vortex: ZeroState slice: %d, source %d\n",
+ pr_debug( "vortex: ZeroState slice: %d, source %d\n",
a->slice, a->source);
*/
a3dsrc_SetAtmosState(a, 0, 0, 0, 0);
@@ -489,7 +489,7 @@ static void a3dsrc_ZeroStateA3D(a3dsrc_t * a)
int i, var, var2;
if ((a->vortex) == NULL) {
- printk(KERN_ERR "vortex: ZeroStateA3D: ERROR: a->vortex is NULL\n");
+ pr_err( "vortex: ZeroStateA3D: ERROR: a->vortex is NULL\n");
return;
}
@@ -628,14 +628,14 @@ static void vortex_Vort3D_connect(vortex_t * v, int en)
v->mixxtlk[0] =
vortex_adb_checkinout(v, v->fixed_res, en, VORTEX_RESOURCE_MIXIN);
if (v->mixxtlk[0] < 0) {
- printk
+ pr_warn
("vortex: vortex_Vort3D: ERROR: not enough free mixer resources.\n");
return;
}
v->mixxtlk[1] =
vortex_adb_checkinout(v, v->fixed_res, en, VORTEX_RESOURCE_MIXIN);
if (v->mixxtlk[1] < 0) {
- printk
+ pr_warn
("vortex: vortex_Vort3D: ERROR: not enough free mixer resources.\n");
return;
}
@@ -679,7 +679,7 @@ static void vortex_Vort3D_connect(vortex_t * v, int en)
static void vortex_Vort3D_InitializeSource(a3dsrc_t * a, int en)
{
if (a->vortex == NULL) {
- printk
+ pr_warn
("vortex: Vort3D_InitializeSource: A3D source not initialized\n");
return;
}
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index ae59dbaa53d9..72e81286b70e 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -285,7 +285,7 @@ vortex_mixer_addWTD(vortex_t * vortex, unsigned char mix, unsigned char ch)
temp = hwread(vortex->mmio, prev);
//printk(KERN_INFO "vortex: mixAddWTD: while addr=%x, val=%x\n", prev, temp);
if ((++lifeboat) > 0xf) {
- printk(KERN_ERR
+ pr_err(
"vortex_mixer_addWTD: lifeboat overflow\n");
return 0;
}
@@ -303,7 +303,7 @@ vortex_mixer_delWTD(vortex_t * vortex, unsigned char mix, unsigned char ch)
eax = hwread(vortex->mmio, VORTEX_MIXER_SR);
if (((1 << ch) & eax) == 0) {
- printk(KERN_ERR "mix ALARM %x\n", eax);
+ pr_err( "mix ALARM %x\n", eax);
return 0;
}
ebp = VORTEX_MIXER_CHNBASE + (ch << 2);
@@ -324,7 +324,7 @@ vortex_mixer_delWTD(vortex_t * vortex, unsigned char mix, unsigned char ch)
//printk(KERN_INFO "vortex: mixdelWTD: 1 addr=%x, val=%x, src=%x\n", ebx, edx, src);
while ((edx & 0xf) != mix) {
if ((esi) > 0xf) {
- printk(KERN_ERR
+ pr_err(
"vortex: mixdelWTD: error lifeboat overflow\n");
return 0;
}
@@ -492,7 +492,7 @@ vortex_src_persist_convratio(vortex_t * vortex, unsigned char src, int ratio)
hwwrite(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2), ratio);
temp = hwread(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2));
if ((++lifeboat) > 0x9) {
- printk(KERN_ERR "Vortex: Src cvr fail\n");
+ pr_err( "Vortex: Src cvr fail\n");
break;
}
}
@@ -545,7 +545,7 @@ vortex_src_checkratio(vortex_t * vortex, unsigned char src,
hwwrite(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2), desired_ratio);
if ((lifeboat++) > 15) {
- printk(KERN_ERR "Vortex: could not set src-%d from %d to %d\n",
+ pr_err( "Vortex: could not set src-%d from %d to %d\n",
src, hw_ratio, desired_ratio);
break;
}
@@ -684,7 +684,7 @@ vortex_src_addWTD(vortex_t * vortex, unsigned char src, unsigned char ch)
temp = hwread(vortex->mmio, prev);
//printk(KERN_INFO "vortex: srcAddWTD: while addr=%x, val=%x\n", prev, temp);
if ((++lifeboat) > 0xf) {
- printk(KERN_ERR
+ pr_err(
"vortex_src_addWTD: lifeboat overflow\n");
return 0;
}
@@ -703,7 +703,7 @@ vortex_src_delWTD(vortex_t * vortex, unsigned char src, unsigned char ch)
eax = hwread(vortex->mmio, VORTEX_SRCBLOCK_SR);
if (((1 << ch) & eax) == 0) {
- printk(KERN_ERR "src alarm\n");
+ pr_err( "src alarm\n");
return 0;
}
ebp = VORTEX_SRC_CHNBASE + (ch << 2);
@@ -724,7 +724,7 @@ vortex_src_delWTD(vortex_t * vortex, unsigned char src, unsigned char ch)
//printk(KERN_INFO "vortex: srcdelWTD: 1 addr=%x, val=%x, src=%x\n", ebx, edx, src);
while ((edx & 0xf) != src) {
if ((esi) > 0xf) {
- printk
+ pr_warn
("vortex: srcdelWTD: error, lifeboat overflow\n");
return 0;
}
@@ -819,7 +819,7 @@ vortex_fifo_setadbctrl(vortex_t * vortex, int fifo, int stereo, int priority,
do {
temp = hwread(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2));
if (lifeboat++ > 0xbb8) {
- printk(KERN_ERR
+ pr_err(
"Vortex: vortex_fifo_setadbctrl fail\n");
break;
}
@@ -915,7 +915,7 @@ vortex_fifo_setwtctrl(vortex_t * vortex, int fifo, int ctrl, int priority,
do {
temp = hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2));
if (lifeboat++ > 0xbb8) {
- printk(KERN_ERR "Vortex: vortex_fifo_setwtctrl fail\n");
+ pr_err( "Vortex: vortex_fifo_setwtctrl fail\n");
break;
}
}
@@ -970,7 +970,7 @@ vortex_fifo_setwtctrl(vortex_t * vortex, int fifo, int ctrl, int priority,
do {
temp = hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2));
if (lifeboat++ > 0xbb8) {
- printk(KERN_ERR "Vortex: vortex_fifo_setwtctrl fail (hanging)\n");
+ pr_err( "Vortex: vortex_fifo_setwtctrl fail (hanging)\n");
break;
}
} while ((temp & FIFO_RDONLY)&&(temp & FIFO_VALID)&&(temp != 0xFFFFFFFF));
@@ -1042,7 +1042,7 @@ static void vortex_fifo_init(vortex_t * vortex)
for (x = NR_ADB - 1; x >= 0; x--) {
hwwrite(vortex->mmio, addr, (FIFO_U0 | FIFO_U1));
if (hwread(vortex->mmio, addr) != (FIFO_U0 | FIFO_U1))
- printk(KERN_ERR "bad adb fifo reset!");
+ pr_err( "bad adb fifo reset!");
vortex_fifo_clearadbdata(vortex, x, FIFO_SIZE);
addr -= 4;
}
@@ -1053,7 +1053,7 @@ static void vortex_fifo_init(vortex_t * vortex)
for (x = NR_WT - 1; x >= 0; x--) {
hwwrite(vortex->mmio, addr, FIFO_U0);
if (hwread(vortex->mmio, addr) != FIFO_U0)
- printk(KERN_ERR
+ pr_err(
"bad wt fifo reset (0x%08x, 0x%08x)!\n",
addr, hwread(vortex->mmio, addr));
vortex_fifo_clearwtdata(vortex, x, FIFO_SIZE);
@@ -1136,7 +1136,7 @@ vortex_adbdma_setbuffers(vortex_t * vortex, int adbdma,
break;
}
/*
- printk(KERN_DEBUG "vortex: cfg0 = 0x%x\nvortex: cfg1=0x%x\n",
+ pr_debug( "vortex: cfg0 = 0x%x\nvortex: cfg1=0x%x\n",
dma->cfg0, dma->cfg1);
*/
hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFCFG0 + (adbdma << 3), dma->cfg0);
@@ -1213,7 +1213,7 @@ static int vortex_adbdma_bufshift(vortex_t * vortex, int adbdma)
if (dma->period_virt >= dma->nr_periods)
dma->period_virt -= dma->nr_periods;
if (delta != 1)
- printk(KERN_INFO "vortex: %d virt=%d, real=%d, delta=%d\n",
+ pr_info( "vortex: %d virt=%d, real=%d, delta=%d\n",
adbdma, dma->period_virt, dma->period_real, delta);
return delta;
@@ -1482,7 +1482,7 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
dma->period_real = page;
if (delta != 1)
- printk(KERN_WARNING "vortex: wt virt = %d, delta = %d\n",
+ pr_warn( "vortex: wt virt = %d, delta = %d\n",
dma->period_virt, delta);
return delta;
@@ -1667,7 +1667,7 @@ vortex_adb_addroutes(vortex_t * vortex, unsigned char channel,
hwread(vortex->mmio,
VORTEX_ADB_RTBASE + (temp << 2)) & ADB_MASK;
if ((lifeboat++) > ADB_MASK) {
- printk(KERN_ERR
+ pr_err(
"vortex_adb_addroutes: unending route! 0x%x\n",
*route);
return;
@@ -1703,7 +1703,7 @@ vortex_adb_delroutes(vortex_t * vortex, unsigned char channel,
hwread(vortex->mmio,
VORTEX_ADB_RTBASE + (prev << 2)) & ADB_MASK;
if (((lifeboat++) > ADB_MASK) || (temp == ADB_MASK)) {
- printk(KERN_ERR
+ pr_err(
"vortex_adb_delroutes: route not found! 0x%x\n",
route0);
return;
@@ -1967,7 +1967,7 @@ vortex_connect_codecplay(vortex_t * vortex, int en, unsigned char mixers[])
ADB_CODECOUT(0 + 4));
vortex_connection_mix_adb(vortex, en, 0x11, mixers[3],
ADB_CODECOUT(1 + 4));
- /* printk(KERN_DEBUG "SDAC detected "); */
+ /* pr_debug( "SDAC detected "); */
}
#else
// Use plain direct output to codec.
@@ -2022,7 +2022,7 @@ vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, int restype)
else
vortex->dma_adb[i].resources[restype] |= (1 << i);
/*
- printk(KERN_DEBUG
+ pr_debug(
"vortex: ResManager: type %d out %d\n",
restype, i);
*/
@@ -2037,7 +2037,7 @@ vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, int restype)
if (resmap[restype] & (1 << i)) {
resmap[restype] &= ~(1 << i);
/*
- printk(KERN_DEBUG
+ pr_debug(
"vortex: ResManager: type %d in %d\n",
restype, i);
*/
@@ -2045,7 +2045,7 @@ vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, int restype)
}
}
}
- printk(KERN_ERR "vortex: FATAL: ResManager: resource type %d exhausted.\n", restype);
+ pr_err( "vortex: FATAL: ResManager: resource type %d exhausted.\n", restype);
return -ENOMEM;
}
@@ -2173,7 +2173,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
memset(stream->resources, 0,
sizeof(unsigned char) *
VORTEX_RESOURCE_LAST);
- printk(KERN_ERR "vortex: out of A3D sources. Sorry\n");
+ pr_err( "vortex: out of A3D sources. Sorry\n");
return -EBUSY;
}
/* (De)Initialize A3D hardware source. */
@@ -2421,7 +2421,7 @@ static irqreturn_t vortex_interrupt(int irq, void *dev_id)
hwread(vortex->mmio, VORTEX_IRQ_SOURCE);
// Is at least one IRQ flag set?
if (source == 0) {
- printk(KERN_ERR "vortex: missing irq source\n");
+ pr_err( "vortex: missing irq source\n");
return IRQ_NONE;
}
@@ -2429,19 +2429,19 @@ static irqreturn_t vortex_interrupt(int irq, void *dev_id)
// Attend every interrupt source.
if (unlikely(source & IRQ_ERR_MASK)) {
if (source & IRQ_FATAL) {
- printk(KERN_ERR "vortex: IRQ fatal error\n");
+ pr_err( "vortex: IRQ fatal error\n");
}
if (source & IRQ_PARITY) {
- printk(KERN_ERR "vortex: IRQ parity error\n");
+ pr_err( "vortex: IRQ parity error\n");
}
if (source & IRQ_REG) {
- printk(KERN_ERR "vortex: IRQ reg error\n");
+ pr_err( "vortex: IRQ reg error\n");
}
if (source & IRQ_FIFO) {
- printk(KERN_ERR "vortex: IRQ fifo error\n");
+ pr_err( "vortex: IRQ fifo error\n");
}
if (source & IRQ_DMA) {
- printk(KERN_ERR "vortex: IRQ dma error\n");
+ pr_err( "vortex: IRQ dma error\n");
}
handled = 1;
}
@@ -2489,7 +2489,7 @@ static irqreturn_t vortex_interrupt(int irq, void *dev_id)
}
if (!handled) {
- printk(KERN_ERR "vortex: unknown irq source %x\n", source);
+ pr_err( "vortex: unknown irq source %x\n", source);
}
return IRQ_RETVAL(handled);
}
@@ -2546,7 +2546,7 @@ vortex_codec_write(struct snd_ac97 * codec, unsigned short addr, unsigned short
while (!(hwread(card->mmio, VORTEX_CODEC_CTRL) & 0x100)) {
udelay(100);
if (lifeboat++ > POLL_COUNT) {
- printk(KERN_ERR "vortex: ac97 codec stuck busy\n");
+ pr_err( "vortex: ac97 codec stuck busy\n");
return;
}
}
@@ -2572,7 +2572,7 @@ static unsigned short vortex_codec_read(struct snd_ac97 * codec, unsigned short
while (!(hwread(card->mmio, VORTEX_CODEC_CTRL) & 0x100)) {
udelay(100);
if (lifeboat++ > POLL_COUNT) {
- printk(KERN_ERR "vortex: ac97 codec stuck busy\n");
+ pr_err( "vortex: ac97 codec stuck busy\n");
return 0xffff;
}
}
@@ -2586,7 +2586,7 @@ static unsigned short vortex_codec_read(struct snd_ac97 * codec, unsigned short
udelay(100);
data = hwread(card->mmio, VORTEX_CODEC_IO);
if (lifeboat++ > POLL_COUNT) {
- printk(KERN_ERR "vortex: ac97 address never arrived\n");
+ pr_err( "vortex: ac97 address never arrived\n");
return 0xffff;
}
} while ((data & VORTEX_CODEC_ADDMASK) !=
@@ -2683,7 +2683,7 @@ static void vortex_spdif_init(vortex_t * vortex, int spdif_sr, int spdif_mode)
static int vortex_core_init(vortex_t *vortex)
{
- printk(KERN_INFO "Vortex: init.... ");
+ pr_info( "Vortex: init.... ");
/* Hardware Init. */
hwwrite(vortex->mmio, VORTEX_CTRL, 0xffffffff);
msleep(5);
@@ -2728,7 +2728,7 @@ static int vortex_core_init(vortex_t *vortex)
//vortex_enable_timer_int(vortex);
//vortex_disable_timer_int(vortex);
- printk(KERN_INFO "done.\n");
+ pr_info( "done.\n");
spin_lock_init(&vortex->lock);
return 0;
@@ -2737,7 +2737,7 @@ static int vortex_core_init(vortex_t *vortex)
static int vortex_core_shutdown(vortex_t * vortex)
{
- printk(KERN_INFO "Vortex: shutdown...");
+ pr_info( "Vortex: shutdown...");
#ifndef CHIP_AU8820
vortex_eq_free(vortex);
vortex_Vort3D_disable(vortex);
@@ -2759,7 +2759,7 @@ static int vortex_core_shutdown(vortex_t * vortex)
msleep(5);
hwwrite(vortex->mmio, VORTEX_IRQ_SOURCE, 0xffff);
- printk(KERN_INFO "done.\n");
+ pr_info( "done.\n");
return 0;
}
@@ -2793,7 +2793,7 @@ static int vortex_alsafmt_aspfmt(int alsafmt)
break;
default:
fmt = 0x8;
- printk(KERN_ERR "vortex: format unsupported %d\n", alsafmt);
+ pr_err( "vortex: format unsupported %d\n", alsafmt);
break;
}
return fmt;
diff --git a/sound/pci/au88x0/au88x0_eq.c b/sound/pci/au88x0/au88x0_eq.c
index e7220533ecfc..9404ba73eaf6 100644
--- a/sound/pci/au88x0/au88x0_eq.c
+++ b/sound/pci/au88x0/au88x0_eq.c
@@ -845,7 +845,7 @@ snd_vortex_peaks_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *u
vortex_Eqlzr_GetAllPeaks(vortex, peaks, &count);
if (count != 20) {
- printk(KERN_ERR "vortex: peak count error 20 != %d \n", count);
+ pr_err( "vortex: peak count error 20 != %d \n", count);
return -1;
}
for (i = 0; i < 20; i++)
diff --git a/sound/pci/au88x0/au88x0_game.c b/sound/pci/au88x0/au88x0_game.c
index 280f86de2230..72daf6cf8169 100644
--- a/sound/pci/au88x0/au88x0_game.c
+++ b/sound/pci/au88x0/au88x0_game.c
@@ -98,7 +98,7 @@ static int vortex_gameport_register(vortex_t *vortex)
vortex->gameport = gp = gameport_allocate_port();
if (!gp) {
- printk(KERN_ERR "vortex: cannot allocate memory for gameport\n");
+ pr_err( "vortex: cannot allocate memory for gameport\n");
return -ENOMEM;
}
diff --git a/sound/pci/au88x0/au88x0_mpu401.c b/sound/pci/au88x0/au88x0_mpu401.c
index 29e5945eef60..328c1943c0c3 100644
--- a/sound/pci/au88x0/au88x0_mpu401.c
+++ b/sound/pci/au88x0/au88x0_mpu401.c
@@ -73,7 +73,7 @@ static int snd_vortex_midi(vortex_t *vortex)
/* Check if anything is OK. */
temp = hwread(vortex->mmio, VORTEX_MIDI_DATA);
if (temp != MPU401_ACK /*0xfe */ ) {
- printk(KERN_ERR "midi port doesn't acknowledge!\n");
+ pr_err( "midi port doesn't acknowledge!\n");
return -ENODEV;
}
/* Enable MPU401 interrupts. */
diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c
index 9fb03b4ea925..5adc6b92ffab 100644
--- a/sound/pci/au88x0/au88x0_pcm.c
+++ b/sound/pci/au88x0/au88x0_pcm.c
@@ -227,11 +227,11 @@ snd_vortex_pcm_hw_params(struct snd_pcm_substream *substream,
err =
snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0) {
- printk(KERN_ERR "Vortex: pcm page alloc failed!\n");
+ pr_err( "Vortex: pcm page alloc failed!\n");
return err;
}
/*
- printk(KERN_INFO "Vortex: periods %d, period_bytes %d, channels = %d\n", params_periods(hw_params),
+ pr_info( "Vortex: periods %d, period_bytes %d, channels = %d\n", params_periods(hw_params),
params_period_bytes(hw_params), params_channels(hw_params));
*/
spin_lock_irq(&chip->lock);
@@ -371,7 +371,7 @@ static int snd_vortex_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
}
#ifndef CHIP_AU8810
else {
- printk(KERN_INFO "vortex: wt start %d\n", dma);
+ pr_info( "vortex: wt start %d\n", dma);
vortex_wtdma_startfifo(chip, dma);
}
#endif
@@ -384,7 +384,7 @@ static int snd_vortex_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
vortex_adbdma_stopfifo(chip, dma);
#ifndef CHIP_AU8810
else {
- printk(KERN_INFO "vortex: wt stop %d\n", dma);
+ pr_info( "vortex: wt stop %d\n", dma);
vortex_wtdma_stopfifo(chip, dma);
}
#endif
diff --git a/sound/pci/au88x0/au88x0_synth.c b/sound/pci/au88x0/au88x0_synth.c
index 922a84bba2ef..f094bac24291 100644
--- a/sound/pci/au88x0/au88x0_synth.c
+++ b/sound/pci/au88x0/au88x0_synth.c
@@ -90,7 +90,7 @@ static int vortex_wt_allocroute(vortex_t * vortex, int wt, int nr_ch)
hwwrite(vortex->mmio, WT_PARM(wt, 2), 0);
temp = hwread(vortex->mmio, WT_PARM(wt, 3));
- printk(KERN_DEBUG "vortex: WT PARM3: %x\n", temp);
+ pr_debug( "vortex: WT PARM3: %x\n", temp);
//hwwrite(vortex->mmio, WT_PARM(wt, 3), temp);
hwwrite(vortex->mmio, WT_DELAY(wt, 0), 0);
@@ -98,7 +98,7 @@ static int vortex_wt_allocroute(vortex_t * vortex, int wt, int nr_ch)
hwwrite(vortex->mmio, WT_DELAY(wt, 2), 0);
hwwrite(vortex->mmio, WT_DELAY(wt, 3), 0);
- printk(KERN_DEBUG "vortex: WT GMODE: %x\n", hwread(vortex->mmio, WT_GMODE(wt)));
+ pr_debug( "vortex: WT GMODE: %x\n", hwread(vortex->mmio, WT_GMODE(wt)));
hwwrite(vortex->mmio, WT_PARM(wt, 2), 0xffffffff);
hwwrite(vortex->mmio, WT_PARM(wt, 3), 0xcff1c810);
@@ -106,7 +106,7 @@ static int vortex_wt_allocroute(vortex_t * vortex, int wt, int nr_ch)
voice->parm0 = voice->parm1 = 0xcfb23e2f;
hwwrite(vortex->mmio, WT_PARM(wt, 0), voice->parm0);
hwwrite(vortex->mmio, WT_PARM(wt, 1), voice->parm1);
- printk(KERN_DEBUG "vortex: WT GMODE 2 : %x\n", hwread(vortex->mmio, WT_GMODE(wt)));
+ pr_debug( "vortex: WT GMODE 2 : %x\n", hwread(vortex->mmio, WT_GMODE(wt)));
return 0;
}
@@ -196,14 +196,14 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
if ((reg == 5) || ((reg >= 7) && (reg <= 10)) || (reg == 0xc)) {
if (wt >= (NR_WT / NR_WT_PB)) {
- printk
+ pr_warn
("vortex: WT SetReg: bank out of range. reg=0x%x, wt=%d\n",
reg, wt);
return 0;
}
} else {
if (wt >= NR_WT) {
- printk(KERN_ERR "vortex: WT SetReg: voice out of range\n");
+ pr_err( "vortex: WT SetReg: voice out of range\n");
return 0;
}
}
@@ -214,42 +214,42 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
/* Voice specific parameters */
case 0: /* running */
/*
- printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
+ pr_debug( "vortex: WT SetReg(0x%x) = 0x%08x\n",
WT_RUN(wt), (int)val);
*/
hwwrite(vortex->mmio, WT_RUN(wt), val);
return 0xc;
case 1: /* param 0 */
/*
- printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
+ pr_debug( "vortex: WT SetReg(0x%x) = 0x%08x\n",
WT_PARM(wt,0), (int)val);
*/
hwwrite(vortex->mmio, WT_PARM(wt, 0), val);
return 0xc;
case 2: /* param 1 */
/*
- printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
+ pr_debug( "vortex: WT SetReg(0x%x) = 0x%08x\n",
WT_PARM(wt,1), (int)val);
*/
hwwrite(vortex->mmio, WT_PARM(wt, 1), val);
return 0xc;
case 3: /* param 2 */
/*
- printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
+ pr_debug( "vortex: WT SetReg(0x%x) = 0x%08x\n",
WT_PARM(wt,2), (int)val);
*/
hwwrite(vortex->mmio, WT_PARM(wt, 2), val);
return 0xc;
case 4: /* param 3 */
/*
- printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
+ pr_debug( "vortex: WT SetReg(0x%x) = 0x%08x\n",
WT_PARM(wt,3), (int)val);
*/
hwwrite(vortex->mmio, WT_PARM(wt, 3), val);
return 0xc;
case 6: /* mute */
/*
- printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
+ pr_debug( "vortex: WT SetReg(0x%x) = 0x%08x\n",
WT_MUTE(wt), (int)val);
*/
hwwrite(vortex->mmio, WT_MUTE(wt), val);
@@ -257,7 +257,7 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
case 0xb:
/* delay */
/*
- printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
+ pr_debug( "vortex: WT SetReg(0x%x) = 0x%08x\n",
WT_DELAY(wt,0), (int)val);
*/
hwwrite(vortex->mmio, WT_DELAY(wt, 3), val);
@@ -285,7 +285,7 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
return 0;
}
/*
- printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n", ecx, (int)val);
+ pr_debug( "vortex: WT SetReg(0x%x) = 0x%08x\n", ecx, (int)val);
*/
hwwrite(vortex->mmio, ecx, val);
return 1;
diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c
index fee35cfc0c7f..c7dc38d41b7f 100644
--- a/sound/pci/ctxfi/ctamixer.c
+++ b/sound/pci/ctxfi/ctamixer.c
@@ -258,7 +258,8 @@ static int get_amixer_rsc(struct amixer_mgr *mgr,
}
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
if (err) {
- printk(KERN_ERR "ctxfi: Can't meet AMIXER resource request!\n");
+ dev_err(mgr->card->dev,
+ "Can't meet AMIXER resource request!\n");
goto error;
}
@@ -296,7 +297,7 @@ static int put_amixer_rsc(struct amixer_mgr *mgr, struct amixer *amixer)
return 0;
}
-int amixer_mgr_create(void *hw, struct amixer_mgr **ramixer_mgr)
+int amixer_mgr_create(struct hw *hw, struct amixer_mgr **ramixer_mgr)
{
int err;
struct amixer_mgr *amixer_mgr;
@@ -314,6 +315,7 @@ int amixer_mgr_create(void *hw, struct amixer_mgr **ramixer_mgr)
amixer_mgr->get_amixer = get_amixer_rsc;
amixer_mgr->put_amixer = put_amixer_rsc;
+ amixer_mgr->card = hw->card;
*ramixer_mgr = amixer_mgr;
@@ -411,7 +413,8 @@ static int get_sum_rsc(struct sum_mgr *mgr,
}
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
if (err) {
- printk(KERN_ERR "ctxfi: Can't meet SUM resource request!\n");
+ dev_err(mgr->card->dev,
+ "Can't meet SUM resource request!\n");
goto error;
}
@@ -449,7 +452,7 @@ static int put_sum_rsc(struct sum_mgr *mgr, struct sum *sum)
return 0;
}
-int sum_mgr_create(void *hw, struct sum_mgr **rsum_mgr)
+int sum_mgr_create(struct hw *hw, struct sum_mgr **rsum_mgr)
{
int err;
struct sum_mgr *sum_mgr;
@@ -467,6 +470,7 @@ int sum_mgr_create(void *hw, struct sum_mgr **rsum_mgr)
sum_mgr->get_sum = get_sum_rsc;
sum_mgr->put_sum = put_sum_rsc;
+ sum_mgr->card = hw->card;
*rsum_mgr = sum_mgr;
diff --git a/sound/pci/ctxfi/ctamixer.h b/sound/pci/ctxfi/ctamixer.h
index cc49e5ab4750..72f42f27434e 100644
--- a/sound/pci/ctxfi/ctamixer.h
+++ b/sound/pci/ctxfi/ctamixer.h
@@ -21,6 +21,7 @@
#include "ctresource.h"
#include <linux/spinlock.h>
+#include <sound/core.h>
/* Define the descriptor of a summation node resource */
struct sum {
@@ -35,6 +36,7 @@ struct sum_desc {
struct sum_mgr {
struct rsc_mgr mgr; /* Basic resource manager info */
+ struct snd_card *card; /* pointer to this card */
spinlock_t mgr_lock;
/* request one sum resource */
@@ -45,7 +47,7 @@ struct sum_mgr {
};
/* Constructor and destructor of daio resource manager */
-int sum_mgr_create(void *hw, struct sum_mgr **rsum_mgr);
+int sum_mgr_create(struct hw *hw, struct sum_mgr **rsum_mgr);
int sum_mgr_destroy(struct sum_mgr *sum_mgr);
/* Define the descriptor of a amixer resource */
@@ -79,6 +81,7 @@ struct amixer_desc {
struct amixer_mgr {
struct rsc_mgr mgr; /* Basic resource manager info */
+ struct snd_card *card; /* pointer to this card */
spinlock_t mgr_lock;
/* request one amixer resource */
@@ -90,7 +93,7 @@ struct amixer_mgr {
};
/* Constructor and destructor of amixer resource manager */
-int amixer_mgr_create(void *hw, struct amixer_mgr **ramixer_mgr);
+int amixer_mgr_create(struct hw *hw, struct amixer_mgr **ramixer_mgr);
int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr);
#endif /* CTAMIXER_H */
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index af632bd08323..454659074390 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -106,11 +106,11 @@ static struct {
.public_name = "Mixer"}
};
-typedef int (*create_t)(void *, void **);
+typedef int (*create_t)(struct hw *, void **);
typedef int (*destroy_t)(void *);
static struct {
- int (*create)(void *hw, void **rmgr);
+ int (*create)(struct hw *hw, void **rmgr);
int (*destroy)(void *mgr);
} rsc_mgr_funcs[NUM_RSCTYP] = {
[SRC] = { .create = (create_t)src_mgr_create,
@@ -171,7 +171,8 @@ static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index)
return atc->vm->get_ptp_phys(atc->vm, index);
}
-static unsigned int convert_format(snd_pcm_format_t snd_format)
+static unsigned int convert_format(snd_pcm_format_t snd_format,
+ struct snd_card *card)
{
switch (snd_format) {
case SNDRV_PCM_FORMAT_U8:
@@ -185,7 +186,7 @@ static unsigned int convert_format(snd_pcm_format_t snd_format)
case SNDRV_PCM_FORMAT_FLOAT_LE:
return SRC_SF_F32;
default:
- printk(KERN_ERR "ctxfi: not recognized snd format is %d \n",
+ dev_err(card->dev, "not recognized snd format is %d\n",
snd_format);
return SRC_SF_S16;
}
@@ -268,7 +269,8 @@ static int atc_pcm_playback_prepare(struct ct_atc *atc, struct ct_atc_pcm *apcm)
src = apcm->src;
src->ops->set_pitch(src, pitch);
src->ops->set_rom(src, select_rom(pitch));
- src->ops->set_sf(src, convert_format(apcm->substream->runtime->format));
+ src->ops->set_sf(src, convert_format(apcm->substream->runtime->format,
+ atc->card));
src->ops->set_pm(src, (src->ops->next_interleave(src) != NULL));
/* Get AMIXER resource */
@@ -738,7 +740,8 @@ static int atc_pcm_capture_start(struct ct_atc *atc, struct ct_atc_pcm *apcm)
/* Set up recording SRC */
src = apcm->src;
- src->ops->set_sf(src, convert_format(apcm->substream->runtime->format));
+ src->ops->set_sf(src, convert_format(apcm->substream->runtime->format,
+ atc->card));
src->ops->set_sa(src, apcm->vm_block->addr);
src->ops->set_la(src, apcm->vm_block->addr + apcm->vm_block->size);
src->ops->set_ca(src, apcm->vm_block->addr);
@@ -807,7 +810,8 @@ static int spdif_passthru_playback_get_resources(struct ct_atc *atc,
src = apcm->src;
src->ops->set_pitch(src, pitch);
src->ops->set_rom(src, select_rom(pitch));
- src->ops->set_sf(src, convert_format(apcm->substream->runtime->format));
+ src->ops->set_sf(src, convert_format(apcm->substream->runtime->format,
+ atc->card));
src->ops->set_pm(src, (src->ops->next_interleave(src) != NULL));
src->ops->set_bp(src, 1);
@@ -1235,7 +1239,7 @@ static int ct_atc_destroy(struct ct_atc *atc)
}
if (atc->hw)
- destroy_hw_obj((struct hw *)atc->hw);
+ destroy_hw_obj(atc->hw);
/* Destroy device virtual memory manager object */
if (atc->vm) {
@@ -1282,9 +1286,9 @@ static int atc_identify_card(struct ct_atc *atc, unsigned int ssid)
p = snd_pci_quirk_lookup_id(vendor_id, device_id, list);
if (p) {
if (p->value < 0) {
- printk(KERN_ERR "ctxfi: "
- "Device %04x:%04x is black-listed\n",
- vendor_id, device_id);
+ dev_err(atc->card->dev,
+ "Device %04x:%04x is black-listed\n",
+ vendor_id, device_id);
return -ENOENT;
}
atc->model = p->value;
@@ -1315,8 +1319,8 @@ int ct_atc_create_alsa_devs(struct ct_atc *atc)
err = alsa_dev_funcs[i].create(atc, i,
alsa_dev_funcs[i].public_name);
if (err) {
- printk(KERN_ERR "ctxfi: "
- "Creating alsa device %d failed!\n", i);
+ dev_err(atc->card->dev,
+ "Creating alsa device %d failed!\n", i);
return err;
}
}
@@ -1332,9 +1336,10 @@ static int atc_create_hw_devs(struct ct_atc *atc)
err = create_hw_obj(atc->pci, atc->chip_type, atc->model, &hw);
if (err) {
- printk(KERN_ERR "Failed to create hw obj!!!\n");
+ dev_err(atc->card->dev, "Failed to create hw obj!!!\n");
return err;
}
+ hw->card = atc->card;
atc->hw = hw;
/* Initialize card hardware. */
@@ -1351,8 +1356,8 @@ static int atc_create_hw_devs(struct ct_atc *atc)
err = rsc_mgr_funcs[i].create(atc->hw, &atc->rsc_mgrs[i]);
if (err) {
- printk(KERN_ERR "ctxfi: "
- "Failed to create rsc_mgr %d!!!\n", i);
+ dev_err(atc->card->dev,
+ "Failed to create rsc_mgr %d!!!\n", i);
return err;
}
}
@@ -1399,8 +1404,9 @@ static int atc_get_resources(struct ct_atc *atc)
err = daio_mgr->get_daio(daio_mgr, &da_desc,
(struct daio **)&atc->daios[i]);
if (err) {
- printk(KERN_ERR "ctxfi: Failed to get DAIO "
- "resource %d!!!\n", i);
+ dev_err(atc->card->dev,
+ "Failed to get DAIO resource %d!!!\n",
+ i);
return err;
}
atc->n_daio++;
@@ -1603,8 +1609,8 @@ static int atc_resume(struct ct_atc *atc)
/* Do hardware resume. */
err = atc_hw_resume(atc);
if (err < 0) {
- printk(KERN_ERR "ctxfi: pci_enable_device failed, "
- "disabling device\n");
+ dev_err(atc->card->dev,
+ "pci_enable_device failed, disabling device\n");
snd_card_disconnect(atc->card);
return err;
}
@@ -1701,7 +1707,7 @@ int ct_atc_create(struct snd_card *card, struct pci_dev *pci,
/* Find card model */
err = atc_identify_card(atc, ssid);
if (err < 0) {
- printk(KERN_ERR "ctatc: Card not recognised\n");
+ dev_err(card->dev, "ctatc: Card not recognised\n");
goto error1;
}
@@ -1717,7 +1723,7 @@ int ct_atc_create(struct snd_card *card, struct pci_dev *pci,
err = ct_mixer_create(atc, (struct ct_mixer **)&atc->mixer);
if (err) {
- printk(KERN_ERR "ctxfi: Failed to create mixer obj!!!\n");
+ dev_err(card->dev, "Failed to create mixer obj!!!\n");
goto error1;
}
@@ -1744,6 +1750,6 @@ int ct_atc_create(struct snd_card *card, struct pci_dev *pci,
error1:
ct_atc_destroy(atc);
- printk(KERN_ERR "ctxfi: Something wrong!!!\n");
+ dev_err(card->dev, "Something wrong!!!\n");
return err;
}
diff --git a/sound/pci/ctxfi/ctatc.h b/sound/pci/ctxfi/ctatc.h
index 5f11ca22fcde..56413343a9e8 100644
--- a/sound/pci/ctxfi/ctatc.h
+++ b/sound/pci/ctxfi/ctatc.h
@@ -131,7 +131,7 @@ struct ct_atc {
/* Don't touch! Used for internal object. */
void *rsc_mgrs[NUM_RSCTYP]; /* chip resource managers */
void *mixer; /* internal mixer object */
- void *hw; /* chip specific hardware access object */
+ struct hw *hw; /* chip specific hardware access object */
void **daios; /* digital audio io resources */
void **pcm; /* SUMs for collecting all pcm stream */
void **srcs; /* Sample Rate Converters for input signal */
diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
index 84f86bf63b8f..c1c3f8816fff 100644
--- a/sound/pci/ctxfi/ctdaio.c
+++ b/sound/pci/ctxfi/ctdaio.c
@@ -140,19 +140,19 @@ static int dao_rsc_reinit(struct dao *dao, const struct dao_desc *desc);
static int dao_spdif_get_spos(struct dao *dao, unsigned int *spos)
{
- ((struct hw *)dao->hw)->dao_get_spos(dao->ctrl_blk, spos);
+ dao->hw->dao_get_spos(dao->ctrl_blk, spos);
return 0;
}
static int dao_spdif_set_spos(struct dao *dao, unsigned int spos)
{
- ((struct hw *)dao->hw)->dao_set_spos(dao->ctrl_blk, spos);
+ dao->hw->dao_set_spos(dao->ctrl_blk, spos);
return 0;
}
static int dao_commit_write(struct dao *dao)
{
- ((struct hw *)dao->hw)->dao_commit_write(dao->hw,
+ dao->hw->dao_commit_write(dao->hw,
daio_device_index(dao->daio.type, dao->hw), dao->ctrl_blk);
return 0;
}
@@ -277,16 +277,14 @@ static struct dao_rsc_ops dao_ops = {
static int dai_set_srt_srcl(struct dai *dai, struct rsc *src)
{
src->ops->master(src);
- ((struct hw *)dai->hw)->dai_srt_set_srcm(dai->ctrl_blk,
- src->ops->index(src));
+ dai->hw->dai_srt_set_srcm(dai->ctrl_blk, src->ops->index(src));
return 0;
}
static int dai_set_srt_srcr(struct dai *dai, struct rsc *src)
{
src->ops->master(src);
- ((struct hw *)dai->hw)->dai_srt_set_srco(dai->ctrl_blk,
- src->ops->index(src));
+ dai->hw->dai_srt_set_srco(dai->ctrl_blk, src->ops->index(src));
return 0;
}
@@ -297,25 +295,25 @@ static int dai_set_srt_msr(struct dai *dai, unsigned int msr)
for (rsr = 0; msr > 1; msr >>= 1)
rsr++;
- ((struct hw *)dai->hw)->dai_srt_set_rsr(dai->ctrl_blk, rsr);
+ dai->hw->dai_srt_set_rsr(dai->ctrl_blk, rsr);
return 0;
}
static int dai_set_enb_src(struct dai *dai, unsigned int enb)
{
- ((struct hw *)dai->hw)->dai_srt_set_ec(dai->ctrl_blk, enb);
+ dai->hw->dai_srt_set_ec(dai->ctrl_blk, enb);
return 0;
}
static int dai_set_enb_srt(struct dai *dai, unsigned int enb)
{
- ((struct hw *)dai->hw)->dai_srt_set_et(dai->ctrl_blk, enb);
+ dai->hw->dai_srt_set_et(dai->ctrl_blk, enb);
return 0;
}
static int dai_commit_write(struct dai *dai)
{
- ((struct hw *)dai->hw)->dai_commit_write(dai->hw,
+ dai->hw->dai_commit_write(dai->hw,
daio_device_index(dai->daio.type, dai->hw), dai->ctrl_blk);
return 0;
}
@@ -331,12 +329,12 @@ static struct dai_rsc_ops dai_ops = {
static int daio_rsc_init(struct daio *daio,
const struct daio_desc *desc,
- void *hw)
+ struct hw *hw)
{
int err;
unsigned int idx_l, idx_r;
- switch (((struct hw *)hw)->chip_type) {
+ switch (hw->chip_type) {
case ATC20K1:
idx_l = idx_20k1[desc->type].left;
idx_r = idx_20k1[desc->type].right;
@@ -360,7 +358,7 @@ static int daio_rsc_init(struct daio *daio,
if (desc->type <= DAIO_OUT_MAX) {
daio->rscl.ops = daio->rscr.ops = &daio_out_rsc_ops;
} else {
- switch (((struct hw *)hw)->chip_type) {
+ switch (hw->chip_type) {
case ATC20K1:
daio->rscl.ops = daio->rscr.ops = &daio_in_rsc_ops_20k1;
break;
@@ -445,7 +443,7 @@ static int dao_rsc_uninit(struct dao *dao)
kfree(dao->imappers);
dao->imappers = NULL;
}
- ((struct hw *)dao->hw)->dao_put_ctrl_blk(dao->ctrl_blk);
+ dao->hw->dao_put_ctrl_blk(dao->ctrl_blk);
dao->hw = dao->ctrl_blk = NULL;
daio_rsc_uninit(&dao->daio);
@@ -502,7 +500,7 @@ error1:
static int dai_rsc_uninit(struct dai *dai)
{
- ((struct hw *)dai->hw)->dai_put_ctrl_blk(dai->ctrl_blk);
+ dai->hw->dai_put_ctrl_blk(dai->ctrl_blk);
dai->hw = dai->ctrl_blk = NULL;
daio_rsc_uninit(&dai->daio);
return 0;
@@ -541,7 +539,8 @@ static int get_daio_rsc(struct daio_mgr *mgr,
err = daio_mgr_get_rsc(&mgr->mgr, desc->type);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
if (err) {
- printk(KERN_ERR "Can't meet DAIO resource request!\n");
+ dev_err(mgr->card->dev,
+ "Can't meet DAIO resource request!\n");
return err;
}
@@ -692,7 +691,7 @@ static int daio_mgr_commit_write(struct daio_mgr *mgr)
return 0;
}
-int daio_mgr_create(void *hw, struct daio_mgr **rdaio_mgr)
+int daio_mgr_create(struct hw *hw, struct daio_mgr **rdaio_mgr)
{
int err, i;
struct daio_mgr *daio_mgr;
@@ -727,12 +726,13 @@ int daio_mgr_create(void *hw, struct daio_mgr **rdaio_mgr)
daio_mgr->imap_add = daio_imap_add;
daio_mgr->imap_delete = daio_imap_delete;
daio_mgr->commit_write = daio_mgr_commit_write;
+ daio_mgr->card = hw->card;
for (i = 0; i < 8; i++) {
- ((struct hw *)hw)->daio_mgr_dsb_dao(daio_mgr->mgr.ctrl_blk, i);
- ((struct hw *)hw)->daio_mgr_dsb_dai(daio_mgr->mgr.ctrl_blk, i);
+ hw->daio_mgr_dsb_dao(daio_mgr->mgr.ctrl_blk, i);
+ hw->daio_mgr_dsb_dai(daio_mgr->mgr.ctrl_blk, i);
}
- ((struct hw *)hw)->daio_mgr_commit_write(hw, daio_mgr->mgr.ctrl_blk);
+ hw->daio_mgr_commit_write(hw, daio_mgr->mgr.ctrl_blk);
*rdaio_mgr = daio_mgr;
diff --git a/sound/pci/ctxfi/ctdaio.h b/sound/pci/ctxfi/ctdaio.h
index 85ccb6ee1ab4..0ebbf350f51a 100644
--- a/sound/pci/ctxfi/ctdaio.h
+++ b/sound/pci/ctxfi/ctdaio.h
@@ -23,6 +23,7 @@
#include "ctimap.h"
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <sound/core.h>
/* Define the descriptor of a daio resource */
enum DAIOTYP {
@@ -53,14 +54,14 @@ struct dao {
struct dao_rsc_ops *ops; /* DAO specific operations */
struct imapper **imappers;
struct daio_mgr *mgr;
- void *hw;
+ struct hw *hw;
void *ctrl_blk;
};
struct dai {
struct daio daio;
struct dai_rsc_ops *ops; /* DAI specific operations */
- void *hw;
+ struct hw *hw;
void *ctrl_blk;
};
@@ -98,6 +99,7 @@ struct daio_desc {
struct daio_mgr {
struct rsc_mgr mgr; /* Basic resource manager info */
+ struct snd_card *card; /* pointer to this card */
spinlock_t mgr_lock;
spinlock_t imap_lock;
struct list_head imappers;
@@ -117,7 +119,7 @@ struct daio_mgr {
};
/* Constructor and destructor of daio resource manager */
-int daio_mgr_create(void *hw, struct daio_mgr **rdaio_mgr);
+int daio_mgr_create(struct hw *hw, struct daio_mgr **rdaio_mgr);
int daio_mgr_destroy(struct daio_mgr *daio_mgr);
#endif /* CTDAIO_H */
diff --git a/sound/pci/ctxfi/cthardware.h b/sound/pci/ctxfi/cthardware.h
index 5977e9a24b5c..54cc9cb75f00 100644
--- a/sound/pci/ctxfi/cthardware.h
+++ b/sound/pci/ctxfi/cthardware.h
@@ -20,6 +20,7 @@
#include <linux/types.h>
#include <linux/pci.h>
+#include <sound/core.h>
enum CHIPTYP {
ATC20K1,
@@ -184,9 +185,10 @@ struct hw {
void *irq_callback_data;
struct pci_dev *pci; /* the pci kernel structure of this card */
+ struct snd_card *card; /* pointer to this card */
int irq;
unsigned long io_base;
- unsigned long mem_base;
+ void __iomem *mem_base;
enum CHIPTYP chip_type;
enum CTCARDS model;
diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
index 6ac40beb49da..b425aa8ee578 100644
--- a/sound/pci/ctxfi/cthw20k1.c
+++ b/sound/pci/ctxfi/cthw20k1.c
@@ -1268,7 +1268,8 @@ static int hw_trn_init(struct hw *hw, const struct trn_conf *info)
/* Set up device page table */
if ((~0UL) == info->vm_pgt_phys) {
- printk(KERN_ERR "Wrong device page table page address!\n");
+ dev_err(hw->card->dev,
+ "Wrong device page table page address!\n");
return -1;
}
@@ -1327,7 +1328,7 @@ static int hw_pll_init(struct hw *hw, unsigned int rsr)
mdelay(40);
}
if (i >= 3) {
- printk(KERN_ALERT "PLL initialization failed!!!\n");
+ dev_alert(hw->card->dev, "PLL initialization failed!!!\n");
return -EBUSY;
}
@@ -1351,7 +1352,7 @@ static int hw_auto_init(struct hw *hw)
break;
}
if (!get_field(gctl, GCTL_AID)) {
- printk(KERN_ALERT "Card Auto-init failed!!!\n");
+ dev_alert(hw->card->dev, "Card Auto-init failed!!!\n");
return -EBUSY;
}
@@ -1802,7 +1803,7 @@ static int uaa_to_xfi(struct pci_dev *pci)
unsigned int is_uaa;
unsigned int data[4] = {0};
unsigned int io_base;
- void *mem_base;
+ void __iomem *mem_base;
int i;
const u32 CTLX = CTLBITS('C', 'T', 'L', 'X');
const u32 CTL_ = CTLBITS('C', 'T', 'L', '-');
@@ -1911,9 +1912,9 @@ static int hw_card_start(struct hw *hw)
/* Set DMA transfer mask */
if (pci_set_dma_mask(pci, CT_XFI_DMA_MASK) < 0 ||
pci_set_consistent_dma_mask(pci, CT_XFI_DMA_MASK) < 0) {
- printk(KERN_ERR "architecture does not support PCI "
- "busmaster DMA with mask 0x%llx\n",
- CT_XFI_DMA_MASK);
+ dev_err(hw->card->dev,
+ "architecture does not support PCI busmaster DMA with mask 0x%llx\n",
+ CT_XFI_DMA_MASK);
err = -ENXIO;
goto error1;
}
@@ -1942,7 +1943,8 @@ static int hw_card_start(struct hw *hw)
err = request_irq(pci->irq, ct_20k1_interrupt, IRQF_SHARED,
KBUILD_MODNAME, hw);
if (err < 0) {
- printk(KERN_ERR "XFi: Cannot get irq %d\n", pci->irq);
+ dev_err(hw->card->dev,
+ "XFi: Cannot get irq %d\n", pci->irq);
goto error2;
}
hw->irq = pci->irq;
@@ -1985,9 +1987,9 @@ static int hw_card_shutdown(struct hw *hw)
hw->irq = -1;
if (hw->mem_base)
- iounmap((void *)hw->mem_base);
+ iounmap(hw->mem_base);
- hw->mem_base = (unsigned long)NULL;
+ hw->mem_base = NULL;
if (hw->io_base)
pci_release_regions(hw->pci);
diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
index b1438861d38a..253899d13790 100644
--- a/sound/pci/ctxfi/cthw20k2.c
+++ b/sound/pci/ctxfi/cthw20k2.c
@@ -1187,7 +1187,8 @@ static int hw_daio_init(struct hw *hw, const struct daio_conf *info)
hw_write_20kx(hw, AUDIO_IO_TX_BLRCLK, 0x21212121);
hw_write_20kx(hw, AUDIO_IO_RX_BLRCLK, 0);
} else {
- printk(KERN_ALERT "ctxfi: ERROR!!! Invalid sampling rate!!!\n");
+ dev_alert(hw->card->dev,
+ "ERROR!!! Invalid sampling rate!!!\n");
return -EINVAL;
}
@@ -1246,8 +1247,8 @@ static int hw_trn_init(struct hw *hw, const struct trn_conf *info)
/* Set up device page table */
if ((~0UL) == info->vm_pgt_phys) {
- printk(KERN_ALERT "ctxfi: "
- "Wrong device page table page address!!!\n");
+ dev_alert(hw->card->dev,
+ "Wrong device page table page address!!!\n");
return -1;
}
@@ -1352,7 +1353,8 @@ static int hw_pll_init(struct hw *hw, unsigned int rsr)
break;
}
if (i >= 1000) {
- printk(KERN_ALERT "ctxfi: PLL initialization failed!!!\n");
+ dev_alert(hw->card->dev,
+ "PLL initialization failed!!!\n");
return -EBUSY;
}
@@ -1376,7 +1378,7 @@ static int hw_auto_init(struct hw *hw)
break;
}
if (!get_field(gctl, GCTL_AID)) {
- printk(KERN_ALERT "ctxfi: Card Auto-init failed!!!\n");
+ dev_alert(hw->card->dev, "Card Auto-init failed!!!\n");
return -EBUSY;
}
@@ -1847,7 +1849,7 @@ static int hw_adc_init(struct hw *hw, const struct adc_conf *info)
/* Initialize I2C */
err = hw20k2_i2c_init(hw, 0x1A, 1, 1);
if (err < 0) {
- printk(KERN_ALERT "ctxfi: Failure to acquire I2C!!!\n");
+ dev_alert(hw->card->dev, "Failure to acquire I2C!!!\n");
goto error;
}
@@ -1890,8 +1892,9 @@ static int hw_adc_init(struct hw *hw, const struct adc_conf *info)
hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_MMC, 0x0A),
MAKE_WM8775_DATA(0x0A));
} else {
- printk(KERN_ALERT "ctxfi: Invalid master sampling "
- "rate (msr %d)!!!\n", info->msr);
+ dev_alert(hw->card->dev,
+ "Invalid master sampling rate (msr %d)!!!\n",
+ info->msr);
err = -EINVAL;
goto error;
}
@@ -2034,8 +2037,9 @@ static int hw_card_start(struct hw *hw)
/* Set DMA transfer mask */
if (pci_set_dma_mask(pci, CT_XFI_DMA_MASK) < 0 ||
pci_set_consistent_dma_mask(pci, CT_XFI_DMA_MASK) < 0) {
- printk(KERN_ERR "ctxfi: architecture does not support PCI "
- "busmaster DMA with mask 0x%llx\n", CT_XFI_DMA_MASK);
+ dev_err(hw->card->dev,
+ "architecture does not support PCI busmaster DMA with mask 0x%llx\n",
+ CT_XFI_DMA_MASK);
err = -ENXIO;
goto error1;
}
@@ -2046,8 +2050,8 @@ static int hw_card_start(struct hw *hw)
goto error1;
hw->io_base = pci_resource_start(hw->pci, 2);
- hw->mem_base = (unsigned long)ioremap(hw->io_base,
- pci_resource_len(hw->pci, 2));
+ hw->mem_base = ioremap(hw->io_base,
+ pci_resource_len(hw->pci, 2));
if (!hw->mem_base) {
err = -ENOENT;
goto error2;
@@ -2063,7 +2067,8 @@ static int hw_card_start(struct hw *hw)
err = request_irq(pci->irq, ct_20k2_interrupt, IRQF_SHARED,
KBUILD_MODNAME, hw);
if (err < 0) {
- printk(KERN_ERR "XFi: Cannot get irq %d\n", pci->irq);
+ dev_err(hw->card->dev,
+ "XFi: Cannot get irq %d\n", pci->irq);
goto error2;
}
hw->irq = pci->irq;
@@ -2107,9 +2112,9 @@ static int hw_card_shutdown(struct hw *hw)
hw->irq = -1;
if (hw->mem_base)
- iounmap((void *)hw->mem_base);
+ iounmap(hw->mem_base);
- hw->mem_base = (unsigned long)NULL;
+ hw->mem_base = NULL;
if (hw->io_base)
pci_release_regions(hw->pci);
@@ -2229,12 +2234,12 @@ static int hw_resume(struct hw *hw, struct card_conf *info)
static u32 hw_read_20kx(struct hw *hw, u32 reg)
{
- return readl((void *)(hw->mem_base + reg));
+ return readl(hw->mem_base + reg);
}
static void hw_write_20kx(struct hw *hw, u32 reg, u32 data)
{
- writel(data, (void *)(hw->mem_base + reg));
+ writel(data, hw->mem_base + reg);
}
static struct hw ct20k2_preset = {
diff --git a/sound/pci/ctxfi/ctmixer.c b/sound/pci/ctxfi/ctmixer.c
index 48fe0e39c2be..4f4a2a5dedb8 100644
--- a/sound/pci/ctxfi/ctmixer.c
+++ b/sound/pci/ctxfi/ctmixer.c
@@ -854,8 +854,8 @@ static int ct_mixer_get_resources(struct ct_mixer *mixer)
for (i = 0; i < (NUM_CT_SUMS * CHN_NUM); i++) {
err = sum_mgr->get_sum(sum_mgr, &sum_desc, &sum);
if (err) {
- printk(KERN_ERR "ctxfi:Failed to get sum resources for "
- "front output!\n");
+ dev_err(mixer->atc->card->dev,
+ "Failed to get sum resources for front output!\n");
break;
}
mixer->sums[i] = sum;
@@ -869,8 +869,8 @@ static int ct_mixer_get_resources(struct ct_mixer *mixer)
for (i = 0; i < (NUM_CT_AMIXERS * CHN_NUM); i++) {
err = amixer_mgr->get_amixer(amixer_mgr, &am_desc, &amixer);
if (err) {
- printk(KERN_ERR "ctxfi:Failed to get amixer resources "
- "for mixer obj!\n");
+ dev_err(mixer->atc->card->dev,
+ "Failed to get amixer resources for mixer obj!\n");
break;
}
mixer->amixers[i] = amixer;
diff --git a/sound/pci/ctxfi/ctpcm.c b/sound/pci/ctxfi/ctpcm.c
index e8a4feb1ed86..d86c474ca5b6 100644
--- a/sound/pci/ctxfi/ctpcm.c
+++ b/sound/pci/ctxfi/ctpcm.c
@@ -217,7 +217,8 @@ static int ct_pcm_playback_prepare(struct snd_pcm_substream *substream)
err = atc->pcm_playback_prepare(atc, apcm);
if (err < 0) {
- printk(KERN_ERR "ctxfi: Preparing pcm playback failed!!!\n");
+ dev_err(atc->card->dev,
+ "Preparing pcm playback failed!!!\n");
return err;
}
@@ -324,7 +325,8 @@ static int ct_pcm_capture_prepare(struct snd_pcm_substream *substream)
err = atc->pcm_capture_prepare(atc, apcm);
if (err < 0) {
- printk(KERN_ERR "ctxfi: Preparing pcm capture failed!!!\n");
+ dev_err(atc->card->dev,
+ "Preparing pcm capture failed!!!\n");
return err;
}
@@ -435,7 +437,8 @@ int ct_alsa_pcm_create(struct ct_atc *atc,
err = snd_pcm_new(atc->card, "ctxfi", device,
playback_count, capture_count, &pcm);
if (err < 0) {
- printk(KERN_ERR "ctxfi: snd_pcm_new failed!! Err=%d\n", err);
+ dev_err(atc->card->dev, "snd_pcm_new failed!! Err=%d\n",
+ err);
return err;
}
diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c
index 7dfaf67344d4..1a97e406d8ec 100644
--- a/sound/pci/ctxfi/ctresource.c
+++ b/sound/pci/ctxfi/ctresource.c
@@ -134,7 +134,8 @@ static struct rsc_ops rsc_generic_ops = {
.next_conj = rsc_next_conj,
};
-int rsc_init(struct rsc *rsc, u32 idx, enum RSCTYP type, u32 msr, void *hw)
+int
+rsc_init(struct rsc *rsc, u32 idx, enum RSCTYP type, u32 msr, struct hw *hw)
{
int err = 0;
@@ -151,25 +152,24 @@ int rsc_init(struct rsc *rsc, u32 idx, enum RSCTYP type, u32 msr, void *hw)
switch (type) {
case SRC:
- err = ((struct hw *)hw)->src_rsc_get_ctrl_blk(&rsc->ctrl_blk);
+ err = hw->src_rsc_get_ctrl_blk(&rsc->ctrl_blk);
break;
case AMIXER:
- err = ((struct hw *)hw)->
- amixer_rsc_get_ctrl_blk(&rsc->ctrl_blk);
+ err = hw->amixer_rsc_get_ctrl_blk(&rsc->ctrl_blk);
break;
case SRCIMP:
case SUM:
case DAIO:
break;
default:
- printk(KERN_ERR
- "ctxfi: Invalid resource type value %d!\n", type);
+ dev_err(((struct hw *)hw)->card->dev,
+ "Invalid resource type value %d!\n", type);
return -EINVAL;
}
if (err) {
- printk(KERN_ERR
- "ctxfi: Failed to get resource control block!\n");
+ dev_err(((struct hw *)hw)->card->dev,
+ "Failed to get resource control block!\n");
return err;
}
@@ -181,19 +181,18 @@ int rsc_uninit(struct rsc *rsc)
if ((NULL != rsc->hw) && (NULL != rsc->ctrl_blk)) {
switch (rsc->type) {
case SRC:
- ((struct hw *)rsc->hw)->
- src_rsc_put_ctrl_blk(rsc->ctrl_blk);
+ rsc->hw->src_rsc_put_ctrl_blk(rsc->ctrl_blk);
break;
case AMIXER:
- ((struct hw *)rsc->hw)->
- amixer_rsc_put_ctrl_blk(rsc->ctrl_blk);
+ rsc->hw->amixer_rsc_put_ctrl_blk(rsc->ctrl_blk);
break;
case SUM:
case DAIO:
break;
default:
- printk(KERN_ERR "ctxfi: "
- "Invalid resource type value %d!\n", rsc->type);
+ dev_err(((struct hw *)rsc->hw)->card->dev,
+ "Invalid resource type value %d!\n",
+ rsc->type);
break;
}
@@ -208,10 +207,9 @@ int rsc_uninit(struct rsc *rsc)
}
int rsc_mgr_init(struct rsc_mgr *mgr, enum RSCTYP type,
- unsigned int amount, void *hw_obj)
+ unsigned int amount, struct hw *hw)
{
int err = 0;
- struct hw *hw = hw_obj;
mgr->type = NUM_RSCTYP;
@@ -235,15 +233,15 @@ int rsc_mgr_init(struct rsc_mgr *mgr, enum RSCTYP type,
case SUM:
break;
default:
- printk(KERN_ERR
- "ctxfi: Invalid resource type value %d!\n", type);
+ dev_err(hw->card->dev,
+ "Invalid resource type value %d!\n", type);
err = -EINVAL;
goto error;
}
if (err) {
- printk(KERN_ERR
- "ctxfi: Failed to get manager control block!\n");
+ dev_err(hw->card->dev,
+ "Failed to get manager control block!\n");
goto error;
}
@@ -268,26 +266,23 @@ int rsc_mgr_uninit(struct rsc_mgr *mgr)
if ((NULL != mgr->hw) && (NULL != mgr->ctrl_blk)) {
switch (mgr->type) {
case SRC:
- ((struct hw *)mgr->hw)->
- src_mgr_put_ctrl_blk(mgr->ctrl_blk);
+ mgr->hw->src_mgr_put_ctrl_blk(mgr->ctrl_blk);
break;
case SRCIMP:
- ((struct hw *)mgr->hw)->
- srcimp_mgr_put_ctrl_blk(mgr->ctrl_blk);
+ mgr->hw->srcimp_mgr_put_ctrl_blk(mgr->ctrl_blk);
break;
case AMIXER:
- ((struct hw *)mgr->hw)->
- amixer_mgr_put_ctrl_blk(mgr->ctrl_blk);
+ mgr->hw->amixer_mgr_put_ctrl_blk(mgr->ctrl_blk);
break;
case DAIO:
- ((struct hw *)mgr->hw)->
- daio_mgr_put_ctrl_blk(mgr->ctrl_blk);
+ mgr->hw->daio_mgr_put_ctrl_blk(mgr->ctrl_blk);
break;
case SUM:
break;
default:
- printk(KERN_ERR "ctxfi: "
- "Invalid resource type value %d!\n", mgr->type);
+ dev_err(((struct hw *)mgr->hw)->card->dev,
+ "Invalid resource type value %d!\n",
+ mgr->type);
break;
}
diff --git a/sound/pci/ctxfi/ctresource.h b/sound/pci/ctxfi/ctresource.h
index 0838c2e84f8b..9b746c3719e6 100644
--- a/sound/pci/ctxfi/ctresource.h
+++ b/sound/pci/ctxfi/ctresource.h
@@ -38,7 +38,7 @@ struct rsc {
u32 conj:12; /* Current conjugate index */
u32 msr:4; /* The Master Sample Rate a resource working on */
void *ctrl_blk; /* Chip specific control info block for a resource */
- void *hw; /* Chip specific object for hardware access means */
+ struct hw *hw; /* Chip specific object for hardware access means */
struct rsc_ops *ops; /* Generic resource operations */
};
@@ -50,7 +50,8 @@ struct rsc_ops {
int (*output_slot)(const struct rsc *rsc);
};
-int rsc_init(struct rsc *rsc, u32 idx, enum RSCTYP type, u32 msr, void *hw);
+int
+rsc_init(struct rsc *rsc, u32 idx, enum RSCTYP type, u32 msr, struct hw *hw);
int rsc_uninit(struct rsc *rsc);
struct rsc_mgr {
@@ -59,12 +60,12 @@ struct rsc_mgr {
unsigned int avail; /* The amount of currently available resources */
unsigned char *rscs; /* The bit-map for resource allocation */
void *ctrl_blk; /* Chip specific control info block */
- void *hw; /* Chip specific object for hardware access */
+ struct hw *hw; /* Chip specific object for hardware access */
};
/* Resource management is based on bit-map mechanism */
int rsc_mgr_init(struct rsc_mgr *mgr, enum RSCTYP type,
- unsigned int amount, void *hw);
+ unsigned int amount, struct hw *hw);
int rsc_mgr_uninit(struct rsc_mgr *mgr);
int mgr_get_resource(struct rsc_mgr *mgr, unsigned int n, unsigned int *ridx);
int mgr_put_resource(struct rsc_mgr *mgr, unsigned int n, unsigned int idx);
diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c
index 6e77e86307c2..ec1f08464d93 100644
--- a/sound/pci/ctxfi/ctsrc.c
+++ b/sound/pci/ctxfi/ctsrc.c
@@ -431,7 +431,8 @@ get_src_rsc(struct src_mgr *mgr, const struct src_desc *desc, struct src **rsrc)
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
if (err) {
- printk(KERN_ERR "ctxfi: Can't meet SRC resource request!\n");
+ dev_err(mgr->card->dev,
+ "Can't meet SRC resource request!\n");
return err;
}
@@ -543,7 +544,7 @@ static int src_mgr_commit_write(struct src_mgr *mgr)
return 0;
}
-int src_mgr_create(void *hw, struct src_mgr **rsrc_mgr)
+int src_mgr_create(struct hw *hw, struct src_mgr **rsrc_mgr)
{
int err, i;
struct src_mgr *src_mgr;
@@ -558,7 +559,7 @@ int src_mgr_create(void *hw, struct src_mgr **rsrc_mgr)
goto error1;
spin_lock_init(&src_mgr->mgr_lock);
- conj_mask = ((struct hw *)hw)->src_dirty_conj_mask();
+ conj_mask = hw->src_dirty_conj_mask();
src_mgr->get_src = get_src_rsc;
src_mgr->put_src = put_src_rsc;
@@ -566,12 +567,13 @@ int src_mgr_create(void *hw, struct src_mgr **rsrc_mgr)
src_mgr->src_enable = src_enable;
src_mgr->src_disable = src_disable;
src_mgr->commit_write = src_mgr_commit_write;
+ src_mgr->card = hw->card;
/* Disable all SRC resources. */
for (i = 0; i < 256; i++)
- ((struct hw *)hw)->src_mgr_dsb_src(src_mgr->mgr.ctrl_blk, i);
+ hw->src_mgr_dsb_src(src_mgr->mgr.ctrl_blk, i);
- ((struct hw *)hw)->src_mgr_commit_write(hw, src_mgr->mgr.ctrl_blk);
+ hw->src_mgr_commit_write(hw, src_mgr->mgr.ctrl_blk);
*rsrc_mgr = src_mgr;
@@ -739,7 +741,8 @@ static int get_srcimp_rsc(struct srcimp_mgr *mgr,
}
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
if (err) {
- printk(KERN_ERR "ctxfi: Can't meet SRCIMP resource request!\n");
+ dev_err(mgr->card->dev,
+ "Can't meet SRCIMP resource request!\n");
goto error1;
}
@@ -825,7 +828,7 @@ static int srcimp_imap_delete(struct srcimp_mgr *mgr, struct imapper *entry)
return err;
}
-int srcimp_mgr_create(void *hw, struct srcimp_mgr **rsrcimp_mgr)
+int srcimp_mgr_create(struct hw *hw, struct srcimp_mgr **rsrcimp_mgr)
{
int err;
struct srcimp_mgr *srcimp_mgr;
@@ -857,6 +860,7 @@ int srcimp_mgr_create(void *hw, struct srcimp_mgr **rsrcimp_mgr)
srcimp_mgr->put_srcimp = put_srcimp_rsc;
srcimp_mgr->imap_add = srcimp_imap_add;
srcimp_mgr->imap_delete = srcimp_imap_delete;
+ srcimp_mgr->card = hw->card;
*rsrcimp_mgr = srcimp_mgr;
diff --git a/sound/pci/ctxfi/ctsrc.h b/sound/pci/ctxfi/ctsrc.h
index 259366aabcac..da7573c5db9b 100644
--- a/sound/pci/ctxfi/ctsrc.h
+++ b/sound/pci/ctxfi/ctsrc.h
@@ -23,6 +23,7 @@
#include "ctimap.h"
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <sound/core.h>
#define SRC_STATE_OFF 0x0
#define SRC_STATE_INIT 0x4
@@ -85,6 +86,7 @@ struct src_desc {
/* Define src manager object */
struct src_mgr {
struct rsc_mgr mgr; /* Basic resource manager info */
+ struct snd_card *card; /* pointer to this card */
spinlock_t mgr_lock;
/* request src resource */
@@ -123,6 +125,7 @@ struct srcimp_desc {
struct srcimp_mgr {
struct rsc_mgr mgr; /* Basic resource manager info */
+ struct snd_card *card; /* pointer to this card */
spinlock_t mgr_lock;
spinlock_t imap_lock;
struct list_head imappers;
@@ -140,10 +143,10 @@ struct srcimp_mgr {
};
/* Constructor and destructor of SRC resource manager */
-int src_mgr_create(void *hw, struct src_mgr **rsrc_mgr);
+int src_mgr_create(struct hw *hw, struct src_mgr **rsrc_mgr);
int src_mgr_destroy(struct src_mgr *src_mgr);
/* Constructor and destructor of SRCIMP resource manager */
-int srcimp_mgr_create(void *hw, struct srcimp_mgr **rsrc_mgr);
+int srcimp_mgr_create(struct hw *hw, struct srcimp_mgr **rsrc_mgr);
int srcimp_mgr_destroy(struct srcimp_mgr *srcimp_mgr);
#endif /* CTSRC_H */
diff --git a/sound/pci/ctxfi/ctvmem.c b/sound/pci/ctxfi/ctvmem.c
index 6109490b83e8..419306ef825f 100644
--- a/sound/pci/ctxfi/ctvmem.c
+++ b/sound/pci/ctxfi/ctvmem.c
@@ -16,6 +16,7 @@
*/
#include "ctvmem.h"
+#include "ctatc.h"
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/io.h>
@@ -29,15 +30,15 @@
* @size must be page aligned.
* */
static struct ct_vm_block *
-get_vm_block(struct ct_vm *vm, unsigned int size)
+get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc)
{
struct ct_vm_block *block = NULL, *entry;
struct list_head *pos;
size = CT_PAGE_ALIGN(size);
if (size > vm->size) {
- printk(KERN_ERR "ctxfi: Fail! No sufficient device virtual "
- "memory space available!\n");
+ dev_err(atc->card->dev,
+ "Fail! No sufficient device virtual memory space available!\n");
return NULL;
}
@@ -129,11 +130,12 @@ ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
unsigned int pte_start;
unsigned i, pages;
unsigned long *ptp;
+ struct ct_atc *atc = snd_pcm_substream_chip(substream);
- block = get_vm_block(vm, size);
+ block = get_vm_block(vm, size, atc);
if (block == NULL) {
- printk(KERN_ERR "ctxfi: No virtual memory block that is big "
- "enough to allocate!\n");
+ dev_err(atc->card->dev,
+ "No virtual memory block that is big enough to allocate!\n");
return NULL;
}
diff --git a/sound/pci/ctxfi/xfi.c b/sound/pci/ctxfi/xfi.c
index 8f8b566a1b35..f2f32779de98 100644
--- a/sound/pci/ctxfi/xfi.c
+++ b/sound/pci/ctxfi/xfi.c
@@ -76,17 +76,18 @@ ct_card_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
if (err)
return err;
if ((reference_rate != 48000) && (reference_rate != 44100)) {
- printk(KERN_ERR "ctxfi: Invalid reference_rate value %u!!!\n",
- reference_rate);
- printk(KERN_ERR "ctxfi: The valid values for reference_rate "
- "are 48000 and 44100, Value 48000 is assumed.\n");
+ dev_err(card->dev,
+ "Invalid reference_rate value %u!!!\n",
+ reference_rate);
+ dev_err(card->dev,
+ "The valid values for reference_rate are 48000 and 44100, Value 48000 is assumed.\n");
reference_rate = 48000;
}
if ((multiple != 1) && (multiple != 2) && (multiple != 4)) {
- printk(KERN_ERR "ctxfi: Invalid multiple value %u!!!\n",
- multiple);
- printk(KERN_ERR "ctxfi: The valid values for multiple are "
- "1, 2 and 4, Value 2 is assumed.\n");
+ dev_err(card->dev, "Invalid multiple value %u!!!\n",
+ multiple);
+ dev_err(card->dev,
+ "The valid values for multiple are 1, 2 and 4, Value 2 is assumed.\n");
multiple = 2;
}
err = ct_atc_create(card, pci, reference_rate, multiple,
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 51dea49aadd4..fcc5e478c9a1 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -57,12 +57,14 @@ static void sort_pins_by_sequence(hda_nid_t *pins, struct auto_out_pin *list,
/* add the found input-pin to the cfg->inputs[] table */
-static void add_auto_cfg_input_pin(struct auto_pin_cfg *cfg, hda_nid_t nid,
- int type)
+static void add_auto_cfg_input_pin(struct hda_codec *codec, struct auto_pin_cfg *cfg,
+ hda_nid_t nid, int type)
{
if (cfg->num_inputs < AUTO_CFG_MAX_INS) {
cfg->inputs[cfg->num_inputs].pin = nid;
cfg->inputs[cfg->num_inputs].type = type;
+ cfg->inputs[cfg->num_inputs].has_boost_on_pin =
+ nid_has_volume(codec, nid, HDA_INPUT);
cfg->num_inputs++;
}
}
@@ -71,7 +73,12 @@ static int compare_input_type(const void *ap, const void *bp)
{
const struct auto_pin_cfg_item *a = ap;
const struct auto_pin_cfg_item *b = bp;
- return (int)(a->type - b->type);
+ if (a->type != b->type)
+ return (int)(a->type - b->type);
+
+ /* In case one has boost and the other one has not,
+ pick the one with boost first. */
+ return (int)(b->has_boost_on_pin - a->has_boost_on_pin);
}
/* Reorder the surround channels
@@ -268,16 +275,16 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
cfg->hp_outs++;
break;
case AC_JACK_MIC_IN:
- add_auto_cfg_input_pin(cfg, nid, AUTO_PIN_MIC);
+ add_auto_cfg_input_pin(codec, cfg, nid, AUTO_PIN_MIC);
break;
case AC_JACK_LINE_IN:
- add_auto_cfg_input_pin(cfg, nid, AUTO_PIN_LINE_IN);
+ add_auto_cfg_input_pin(codec, cfg, nid, AUTO_PIN_LINE_IN);
break;
case AC_JACK_CD:
- add_auto_cfg_input_pin(cfg, nid, AUTO_PIN_CD);
+ add_auto_cfg_input_pin(codec, cfg, nid, AUTO_PIN_CD);
break;
case AC_JACK_AUX:
- add_auto_cfg_input_pin(cfg, nid, AUTO_PIN_AUX);
+ add_auto_cfg_input_pin(codec, cfg, nid, AUTO_PIN_AUX);
break;
case AC_JACK_SPDIF_OUT:
case AC_JACK_DIG_OTHER_OUT:
diff --git a/sound/pci/hda/hda_auto_parser.h b/sound/pci/hda/hda_auto_parser.h
index e941f604f5e5..2b8e29fd73e7 100644
--- a/sound/pci/hda/hda_auto_parser.h
+++ b/sound/pci/hda/hda_auto_parser.h
@@ -38,6 +38,7 @@ struct auto_pin_cfg_item {
int type;
unsigned int is_headset_mic:1;
unsigned int is_headphone_mic:1; /* Mic-only in headphone jack */
+ unsigned int has_boost_on_pin:1;
};
struct auto_pin_cfg;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index ec6a7d0d1886..15e0089492f7 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2002,6 +2002,26 @@ u32 query_amp_caps(struct hda_codec *codec, hda_nid_t nid, int direction)
EXPORT_SYMBOL_GPL(query_amp_caps);
/**
+ * snd_hda_check_amp_caps - query AMP capabilities
+ * @codec: the HD-audio codec
+ * @nid: the NID to query
+ * @dir: either #HDA_INPUT or #HDA_OUTPUT
+ *
+ * Check whether the widget has the given amp capability for the direction.
+ */
+bool snd_hda_check_amp_caps(struct hda_codec *codec, hda_nid_t nid,
+ int dir, unsigned int bits)
+{
+ if (!nid)
+ return false;
+ if (get_wcaps(codec, nid) & (1 << (dir + 1)))
+ if (query_amp_caps(codec, nid, dir) & bits)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(snd_hda_check_amp_caps);
+
+/**
* snd_hda_override_amp_caps - Override the AMP capabilities
* @codec: the CODEC to clean up
* @nid: the NID to clean up
@@ -4817,121 +4837,6 @@ int snd_hda_build_pcms(struct hda_bus *bus)
EXPORT_SYMBOL_GPL(snd_hda_build_pcms);
/**
- * snd_hda_check_board_config - compare the current codec with the config table
- * @codec: the HDA codec
- * @num_configs: number of config enums
- * @models: array of model name strings
- * @tbl: configuration table, terminated by null entries
- *
- * Compares the modelname or PCI subsystem id of the current codec with the
- * given configuration table. If a matching entry is found, returns its
- * config value (supposed to be 0 or positive).
- *
- * If no entries are matching, the function returns a negative value.
- */
-int snd_hda_check_board_config(struct hda_codec *codec,
- int num_configs, const char * const *models,
- const struct snd_pci_quirk *tbl)
-{
- if (codec->modelname && models) {
- int i;
- for (i = 0; i < num_configs; i++) {
- if (models[i] &&
- !strcmp(codec->modelname, models[i])) {
- codec_info(codec, "model '%s' is selected\n",
- models[i]);
- return i;
- }
- }
- }
-
- if (!codec->bus->pci || !tbl)
- return -1;
-
- tbl = snd_pci_quirk_lookup(codec->bus->pci, tbl);
- if (!tbl)
- return -1;
- if (tbl->value >= 0 && tbl->value < num_configs) {
-#ifdef CONFIG_SND_DEBUG_VERBOSE
- char tmp[10];
- const char *model = NULL;
- if (models)
- model = models[tbl->value];
- if (!model) {
- sprintf(tmp, "#%d", tbl->value);
- model = tmp;
- }
- codec_info(codec, "model '%s' is selected for config %x:%x (%s)\n",
- model, tbl->subvendor, tbl->subdevice,
- (tbl->name ? tbl->name : "Unknown device"));
-#endif
- return tbl->value;
- }
- return -1;
-}
-EXPORT_SYMBOL_GPL(snd_hda_check_board_config);
-
-/**
- * snd_hda_check_board_codec_sid_config - compare the current codec
- subsystem ID with the
- config table
-
- This is important for Gateway notebooks with SB450 HDA Audio
- where the vendor ID of the PCI device is:
- ATI Technologies Inc SB450 HDA Audio [1002:437b]
- and the vendor/subvendor are found only at the codec.
-
- * @codec: the HDA codec
- * @num_configs: number of config enums
- * @models: array of model name strings
- * @tbl: configuration table, terminated by null entries
- *
- * Compares the modelname or PCI subsystem id of the current codec with the
- * given configuration table. If a matching entry is found, returns its
- * config value (supposed to be 0 or positive).
- *
- * If no entries are matching, the function returns a negative value.
- */
-int snd_hda_check_board_codec_sid_config(struct hda_codec *codec,
- int num_configs, const char * const *models,
- const struct snd_pci_quirk *tbl)
-{
- const struct snd_pci_quirk *q;
-
- /* Search for codec ID */
- for (q = tbl; q->subvendor; q++) {
- unsigned int mask = 0xffff0000 | q->subdevice_mask;
- unsigned int id = (q->subdevice | (q->subvendor << 16)) & mask;
- if ((codec->subsystem_id & mask) == id)
- break;
- }
-
- if (!q->subvendor)
- return -1;
-
- tbl = q;
-
- if (tbl->value >= 0 && tbl->value < num_configs) {
-#ifdef CONFIG_SND_DEBUG_VERBOSE
- char tmp[10];
- const char *model = NULL;
- if (models)
- model = models[tbl->value];
- if (!model) {
- sprintf(tmp, "#%d", tbl->value);
- model = tmp;
- }
- codec_info(codec, "model '%s' is selected for config %x:%x (%s)\n",
- model, tbl->subvendor, tbl->subdevice,
- (tbl->name ? tbl->name : "Unknown device"));
-#endif
- return tbl->value;
- }
- return -1;
-}
-EXPORT_SYMBOL_GPL(snd_hda_check_board_codec_sid_config);
-
-/**
* snd_hda_add_new_ctls - create controls from the array
* @codec: the HDA codec
* @knew: the array of struct snd_kcontrol_new
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index bbc5a1392c75..9c8820f21f94 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -687,6 +687,4 @@ snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
struct snd_dma_buffer *dmab) {}
#endif
-#define EXPORT_SYMBOL_HDA(sym) EXPORT_SYMBOL_GPL(sym)
-
#endif /* __SOUND_HDA_CODEC_H */
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index b956449ddada..64220c08bd98 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <sound/core.h>
#include <sound/jack.h>
+#include <sound/tlv.h>
#include "hda_codec.h"
#include "hda_local.h"
#include "hda_auto_parser.h"
@@ -518,18 +519,6 @@ static unsigned int amp_val_replace_channels(unsigned int val, unsigned int chs)
return val;
}
-/* check whether the widget has the given amp capability for the direction */
-static bool check_amp_caps(struct hda_codec *codec, hda_nid_t nid,
- int dir, unsigned int bits)
-{
- if (!nid)
- return false;
- if (get_wcaps(codec, nid) & (1 << (dir + 1)))
- if (query_amp_caps(codec, nid, dir) & bits)
- return true;
- return false;
-}
-
static bool same_amp_caps(struct hda_codec *codec, hda_nid_t nid1,
hda_nid_t nid2, int dir)
{
@@ -539,11 +528,6 @@ static bool same_amp_caps(struct hda_codec *codec, hda_nid_t nid1,
query_amp_caps(codec, nid2, dir));
}
-#define nid_has_mute(codec, nid, dir) \
- check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE))
-#define nid_has_volume(codec, nid, dir) \
- check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS)
-
/* look for a widget suitable for assigning a mute switch in the path */
static hda_nid_t look_for_out_mute_nid(struct hda_codec *codec,
struct nid_path *path)
@@ -1105,6 +1089,7 @@ enum {
*/
static int assign_out_path_ctls(struct hda_codec *codec, struct nid_path *path)
{
+ struct hda_gen_spec *spec = codec->spec;
hda_nid_t nid;
unsigned int val;
int badness = 0;
@@ -1119,6 +1104,8 @@ static int assign_out_path_ctls(struct hda_codec *codec, struct nid_path *path)
nid = look_for_out_vol_nid(codec, path);
if (nid) {
val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
+ if (spec->dac_min_mute)
+ val |= HDA_AMP_VAL_MIN_MUTE;
if (is_ctl_used(codec, val, NID_PATH_VOL_CTL))
badness += BAD_SHARED_VOL;
else
@@ -1880,9 +1867,12 @@ static int parse_output_paths(struct hda_codec *codec)
path = snd_hda_get_path_from_idx(codec, spec->out_paths[0]);
if (path)
spec->vmaster_nid = look_for_out_vol_nid(codec, path);
- if (spec->vmaster_nid)
+ if (spec->vmaster_nid) {
snd_hda_set_vmaster_tlv(codec, spec->vmaster_nid,
HDA_OUTPUT, spec->vmaster_tlv);
+ if (spec->dac_min_mute)
+ spec->vmaster_tlv[3] |= TLV_DB_SCALE_MUTE;
+ }
}
/* set initial pinctl targets */
@@ -2025,7 +2015,8 @@ static int create_speaker_out_ctls(struct hda_codec *codec)
* independent HP controls
*/
-static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack);
+static void call_hp_automute(struct hda_codec *codec,
+ struct hda_jack_callback *jack);
static int indep_hp_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
@@ -3941,7 +3932,8 @@ static void call_update_outputs(struct hda_codec *codec)
}
/* standard HP-automute helper */
-void snd_hda_gen_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack)
+void snd_hda_gen_hp_automute(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
{
struct hda_gen_spec *spec = codec->spec;
hda_nid_t *pins = spec->autocfg.hp_pins;
@@ -3961,7 +3953,8 @@ void snd_hda_gen_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack)
EXPORT_SYMBOL_GPL(snd_hda_gen_hp_automute);
/* standard line-out-automute helper */
-void snd_hda_gen_line_automute(struct hda_codec *codec, struct hda_jack_tbl *jack)
+void snd_hda_gen_line_automute(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
{
struct hda_gen_spec *spec = codec->spec;
@@ -3981,7 +3974,8 @@ void snd_hda_gen_line_automute(struct hda_codec *codec, struct hda_jack_tbl *jac
EXPORT_SYMBOL_GPL(snd_hda_gen_line_automute);
/* standard mic auto-switch helper */
-void snd_hda_gen_mic_autoswitch(struct hda_codec *codec, struct hda_jack_tbl *jack)
+void snd_hda_gen_mic_autoswitch(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
{
struct hda_gen_spec *spec = codec->spec;
int i;
@@ -4004,7 +3998,8 @@ void snd_hda_gen_mic_autoswitch(struct hda_codec *codec, struct hda_jack_tbl *ja
EXPORT_SYMBOL_GPL(snd_hda_gen_mic_autoswitch);
/* call appropriate hooks */
-static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack)
+static void call_hp_automute(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
{
struct hda_gen_spec *spec = codec->spec;
if (spec->hp_automute_hook)
@@ -4014,7 +4009,7 @@ static void call_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *jack)
}
static void call_line_automute(struct hda_codec *codec,
- struct hda_jack_tbl *jack)
+ struct hda_jack_callback *jack)
{
struct hda_gen_spec *spec = codec->spec;
if (spec->line_automute_hook)
@@ -4024,7 +4019,7 @@ static void call_line_automute(struct hda_codec *codec,
}
static void call_mic_autoswitch(struct hda_codec *codec,
- struct hda_jack_tbl *jack)
+ struct hda_jack_callback *jack)
{
struct hda_gen_spec *spec = codec->spec;
if (spec->mic_autoswitch_hook)
@@ -4173,7 +4168,7 @@ static int check_auto_mute_availability(struct hda_codec *codec)
if (!is_jack_detectable(codec, nid))
continue;
codec_dbg(codec, "Enable HP auto-muting on NID 0x%x\n", nid);
- snd_hda_jack_detect_enable_callback(codec, nid, HDA_GEN_HP_EVENT,
+ snd_hda_jack_detect_enable_callback(codec, nid,
call_hp_automute);
spec->detect_hp = 1;
}
@@ -4186,7 +4181,6 @@ static int check_auto_mute_availability(struct hda_codec *codec)
continue;
codec_dbg(codec, "Enable Line-Out auto-muting on NID 0x%x\n", nid);
snd_hda_jack_detect_enable_callback(codec, nid,
- HDA_GEN_FRONT_EVENT,
call_line_automute);
spec->detect_lo = 1;
}
@@ -4228,7 +4222,6 @@ static bool auto_mic_check_imux(struct hda_codec *codec)
for (i = 1; i < spec->am_num_entries; i++)
snd_hda_jack_detect_enable_callback(codec,
spec->am_entry[i].pin,
- HDA_GEN_MIC_EVENT,
call_mic_autoswitch);
return true;
}
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index bb2dea743986..61dd5153f512 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -12,12 +12,6 @@
#ifndef __SOUND_HDA_GENERIC_H
#define __SOUND_HDA_GENERIC_H
-/* unsol event tags */
-enum {
- HDA_GEN_HP_EVENT = 1, HDA_GEN_FRONT_EVENT, HDA_GEN_MIC_EVENT,
- HDA_GEN_LAST_EVENT = HDA_GEN_MIC_EVENT
-};
-
/* table entry for multi-io paths */
struct hda_multi_io {
hda_nid_t pin; /* multi-io widget pin NID */
@@ -231,6 +225,7 @@ struct hda_gen_spec {
unsigned int add_stereo_mix_input:1; /* add aamix as a capture src */
unsigned int add_jack_modes:1; /* add i/o jack mode enum ctls */
unsigned int power_down_unused:1; /* power down unused widgets */
+ unsigned int dac_min_mute:1; /* minimal = mute for DACs */
/* other internal flags */
unsigned int no_analog:1; /* digital I/O only */
@@ -289,11 +284,11 @@ struct hda_gen_spec {
/* automute / autoswitch hooks */
void (*hp_automute_hook)(struct hda_codec *codec,
- struct hda_jack_tbl *tbl);
+ struct hda_jack_callback *cb);
void (*line_automute_hook)(struct hda_codec *codec,
- struct hda_jack_tbl *tbl);
+ struct hda_jack_callback *cb);
void (*mic_autoswitch_hook)(struct hda_codec *codec,
- struct hda_jack_tbl *tbl);
+ struct hda_jack_callback *cb);
};
int snd_hda_gen_spec_init(struct hda_gen_spec *spec);
@@ -325,11 +320,11 @@ int snd_hda_gen_build_pcms(struct hda_codec *codec);
/* standard jack event callbacks */
void snd_hda_gen_hp_automute(struct hda_codec *codec,
- struct hda_jack_tbl *jack);
+ struct hda_jack_callback *jack);
void snd_hda_gen_line_automute(struct hda_codec *codec,
- struct hda_jack_tbl *jack);
+ struct hda_jack_callback *jack);
void snd_hda_gen_mic_autoswitch(struct hda_codec *codec,
- struct hda_jack_tbl *jack);
+ struct hda_jack_callback *jack);
void snd_hda_gen_update_outputs(struct hda_codec *codec);
#ifdef CONFIG_PM
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index 9746d73cec52..f56765ae73a7 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -94,7 +94,7 @@ EXPORT_SYMBOL_GPL(snd_hda_jack_tbl_get_from_tag);
/**
* snd_hda_jack_tbl_new - create a jack-table entry for the given NID
*/
-struct hda_jack_tbl *
+static struct hda_jack_tbl *
snd_hda_jack_tbl_new(struct hda_codec *codec, hda_nid_t nid)
{
struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid);
@@ -108,21 +108,24 @@ snd_hda_jack_tbl_new(struct hda_codec *codec, hda_nid_t nid)
jack->tag = codec->jacktbl.used;
return jack;
}
-EXPORT_SYMBOL_GPL(snd_hda_jack_tbl_new);
void snd_hda_jack_tbl_clear(struct hda_codec *codec)
{
+ struct hda_jack_tbl *jack = codec->jacktbl.list;
+ int i;
+
+ for (i = 0; i < codec->jacktbl.used; i++, jack++) {
+ struct hda_jack_callback *cb, *next;
#ifdef CONFIG_SND_HDA_INPUT_JACK
- /* free jack instances manually when clearing/reconfiguring */
- if (!codec->bus->shutdown && codec->jacktbl.list) {
- struct hda_jack_tbl *jack = codec->jacktbl.list;
- int i;
- for (i = 0; i < codec->jacktbl.used; i++, jack++) {
- if (jack->jack)
- snd_device_free(codec->bus->card, jack->jack);
+ /* free jack instances manually when clearing/reconfiguring */
+ if (!codec->bus->shutdown && jack->jack)
+ snd_device_free(codec->bus->card, jack->jack);
+#endif
+ for (cb = jack->callback; cb; cb = next) {
+ next = cb->next;
+ kfree(cb);
}
}
-#endif
snd_array_free(&codec->jacktbl);
}
@@ -215,33 +218,49 @@ EXPORT_SYMBOL_GPL(snd_hda_jack_detect_state);
/**
* snd_hda_jack_detect_enable - enable the jack-detection
+ *
+ * In the case of error, the return value will be a pointer embedded with
+ * errno. Check and handle the return value appropriately with standard
+ * macros such as @IS_ERR() and @PTR_ERR().
*/
-int snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
- unsigned char action,
- hda_jack_callback cb)
+struct hda_jack_callback *
+snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
+ hda_jack_callback_fn func)
{
- struct hda_jack_tbl *jack = snd_hda_jack_tbl_new(codec, nid);
+ struct hda_jack_tbl *jack;
+ struct hda_jack_callback *callback = NULL;
+ int err;
+
+ jack = snd_hda_jack_tbl_new(codec, nid);
if (!jack)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+ if (func) {
+ callback = kzalloc(sizeof(*callback), GFP_KERNEL);
+ if (!callback)
+ return ERR_PTR(-ENOMEM);
+ callback->func = func;
+ callback->tbl = jack;
+ callback->next = jack->callback;
+ jack->callback = callback;
+ }
+
if (jack->jack_detect)
- return 0; /* already registered */
+ return callback; /* already registered */
jack->jack_detect = 1;
- if (action)
- jack->action = action;
- if (cb)
- jack->callback = cb;
if (codec->jackpoll_interval > 0)
- return 0; /* No unsol if we're polling instead */
- return snd_hda_codec_write_cache(codec, nid, 0,
+ return callback; /* No unsol if we're polling instead */
+ err = snd_hda_codec_write_cache(codec, nid, 0,
AC_VERB_SET_UNSOLICITED_ENABLE,
AC_USRSP_EN | jack->tag);
+ if (err < 0)
+ return ERR_PTR(err);
+ return callback;
}
EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable_callback);
-int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid,
- unsigned char action)
+int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid)
{
- return snd_hda_jack_detect_enable_callback(codec, nid, action, NULL);
+ return PTR_ERR_OR_ZERO(snd_hda_jack_detect_enable_callback(codec, nid, NULL));
}
EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable);
@@ -431,7 +450,7 @@ static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid,
return err;
if (!phantom_jack)
- return snd_hda_jack_detect_enable(codec, nid, 0);
+ return snd_hda_jack_detect_enable(codec, nid);
return 0;
}
@@ -498,13 +517,17 @@ EXPORT_SYMBOL_GPL(snd_hda_jack_add_kctls);
static void call_jack_callback(struct hda_codec *codec,
struct hda_jack_tbl *jack)
{
- if (jack->callback)
- jack->callback(codec, jack);
+ struct hda_jack_callback *cb;
+
+ for (cb = jack->callback; cb; cb = cb->next)
+ cb->func(codec, cb);
if (jack->gated_jack) {
struct hda_jack_tbl *gated =
snd_hda_jack_tbl_get(codec, jack->gated_jack);
- if (gated && gated->callback)
- gated->callback(codec, gated);
+ if (gated) {
+ for (cb = gated->callback; cb; cb = cb->next)
+ cb->func(codec, cb);
+ }
}
}
diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h
index 46e1ea83ce3c..13cb375454f6 100644
--- a/sound/pci/hda/hda_jack.h
+++ b/sound/pci/hda/hda_jack.h
@@ -12,17 +12,25 @@
#ifndef __SOUND_HDA_JACK_H
#define __SOUND_HDA_JACK_H
+#include <linux/err.h>
+
struct auto_pin_cfg;
struct hda_jack_tbl;
+struct hda_jack_callback;
+
+typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callback *);
-typedef void (*hda_jack_callback) (struct hda_codec *, struct hda_jack_tbl *);
+struct hda_jack_callback {
+ struct hda_jack_tbl *tbl;
+ hda_jack_callback_fn func;
+ unsigned int private_data; /* arbitrary data */
+ struct hda_jack_callback *next;
+};
struct hda_jack_tbl {
hda_nid_t nid;
- unsigned char action; /* event action (0 = none) */
unsigned char tag; /* unsol event tag */
- unsigned int private_data; /* arbitrary data */
- hda_jack_callback callback;
+ struct hda_jack_callback *callback;
/* jack-detection stuff */
unsigned int pin_sense; /* cached pin-sense value */
unsigned int jack_detect:1; /* capable of jack-detection? */
@@ -43,34 +51,14 @@ snd_hda_jack_tbl_get(struct hda_codec *codec, hda_nid_t nid);
struct hda_jack_tbl *
snd_hda_jack_tbl_get_from_tag(struct hda_codec *codec, unsigned char tag);
-struct hda_jack_tbl *
-snd_hda_jack_tbl_new(struct hda_codec *codec, hda_nid_t nid);
void snd_hda_jack_tbl_clear(struct hda_codec *codec);
-/**
- * snd_hda_jack_get_action - get jack-tbl entry for the tag
- *
- * Call this from the unsol event handler to get the assigned action for the
- * event. This will mark the dirty flag for the later reporting, too.
- */
-static inline unsigned char
-snd_hda_jack_get_action(struct hda_codec *codec, unsigned int tag)
-{
- struct hda_jack_tbl *jack = snd_hda_jack_tbl_get_from_tag(codec, tag);
- if (jack) {
- jack->jack_dirty = 1;
- return jack->action;
- }
- return 0;
-}
-
void snd_hda_jack_set_dirty_all(struct hda_codec *codec);
-int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid,
- unsigned char action);
-int snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
- unsigned char action,
- hda_jack_callback cb);
+int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid);
+struct hda_jack_callback *
+snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
+ hda_jack_callback_fn cb);
int snd_hda_jack_set_gating_jack(struct hda_codec *codec, hda_nid_t gated_nid,
hda_nid_t gating_nid);
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 364bb413e02a..7eb44e78e141 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -371,12 +371,6 @@ void snd_print_pcm_bits(int pcm, char *buf, int buflen);
/*
* Misc
*/
-int snd_hda_check_board_config(struct hda_codec *codec, int num_configs,
- const char * const *modelnames,
- const struct snd_pci_quirk *pci_list);
-int snd_hda_check_board_codec_sid_config(struct hda_codec *codec,
- int num_configs, const char * const *models,
- const struct snd_pci_quirk *tbl);
int snd_hda_add_new_ctls(struct hda_codec *codec,
const struct snd_kcontrol_new *knew);
@@ -609,6 +603,14 @@ int snd_hda_override_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir,
u32 snd_hda_query_pin_caps(struct hda_codec *codec, hda_nid_t nid);
int snd_hda_override_pin_caps(struct hda_codec *codec, hda_nid_t nid,
unsigned int caps);
+bool snd_hda_check_amp_caps(struct hda_codec *codec, hda_nid_t nid,
+ int dir, unsigned int bits);
+
+#define nid_has_mute(codec, nid, dir) \
+ snd_hda_check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE))
+#define nid_has_volume(codec, nid, dir) \
+ snd_hda_check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS)
+
/* flags for hda_nid_item */
#define HDA_NID_ITEM_AMP (1<<0)
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index e2079090ca6f..9b49f156a12e 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -514,7 +514,7 @@ enum {
static inline int strmatch(const char *a, const char *b)
{
- return strnicmp(a, b, strlen(b)) == 0;
+ return strncasecmp(a, b, strlen(b)) == 0;
}
/* parse the contents after the line "[codec]"
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 5d8455e2dacd..4f7ffa8c4a0d 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -3224,8 +3224,14 @@ static void ca0132_unsol_hp_delayed(struct work_struct *work)
{
struct ca0132_spec *spec = container_of(
to_delayed_work(work), struct ca0132_spec, unsol_hp_work);
+ struct hda_jack_tbl *jack;
+
ca0132_select_out(spec->codec);
- snd_hda_jack_report_sync(spec->codec);
+ jack = snd_hda_jack_tbl_get(spec->codec, UNSOL_TAG_HP);
+ if (jack) {
+ jack->block_report = 0;
+ snd_hda_jack_report_sync(spec->codec);
+ }
}
static void ca0132_set_dmic(struct hda_codec *codec, int enable);
@@ -4114,12 +4120,6 @@ static void init_input(struct hda_codec *codec, hda_nid_t pin, hda_nid_t adc)
}
}
-static void ca0132_init_unsol(struct hda_codec *codec)
-{
- snd_hda_jack_detect_enable(codec, UNSOL_TAG_HP, UNSOL_TAG_HP);
- snd_hda_jack_detect_enable(codec, UNSOL_TAG_AMIC1, UNSOL_TAG_AMIC1);
-}
-
static void refresh_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir)
{
unsigned int caps;
@@ -4390,7 +4390,8 @@ static void ca0132_download_dsp(struct hda_codec *codec)
ca0132_set_dsp_msr(codec, true);
}
-static void ca0132_process_dsp_response(struct hda_codec *codec)
+static void ca0132_process_dsp_response(struct hda_codec *codec,
+ struct hda_jack_callback *callback)
{
struct ca0132_spec *spec = codec->spec;
@@ -4403,36 +4404,31 @@ static void ca0132_process_dsp_response(struct hda_codec *codec)
dspio_clear_response_queue(codec);
}
-static void ca0132_unsol_event(struct hda_codec *codec, unsigned int res)
+static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
{
struct ca0132_spec *spec = codec->spec;
- if (((res >> AC_UNSOL_RES_TAG_SHIFT) & 0x3f) == UNSOL_TAG_DSP) {
- ca0132_process_dsp_response(codec);
- } else {
- res = snd_hda_jack_get_action(codec,
- (res >> AC_UNSOL_RES_TAG_SHIFT) & 0x3f);
-
- codec_dbg(codec, "snd_hda_jack_get_action: 0x%x\n", res);
-
- switch (res) {
- case UNSOL_TAG_HP:
- /* Delay enabling the HP amp, to let the mic-detection
- * state machine run.
- */
- cancel_delayed_work_sync(&spec->unsol_hp_work);
- queue_delayed_work(codec->bus->workq,
- &spec->unsol_hp_work,
- msecs_to_jiffies(500));
- break;
- case UNSOL_TAG_AMIC1:
- ca0132_select_mic(codec);
- snd_hda_jack_report_sync(codec);
- break;
- default:
- break;
- }
- }
+ /* Delay enabling the HP amp, to let the mic-detection
+ * state machine run.
+ */
+ cancel_delayed_work_sync(&spec->unsol_hp_work);
+ queue_delayed_work(codec->bus->workq, &spec->unsol_hp_work,
+ msecs_to_jiffies(500));
+ cb->tbl->block_report = 1;
+}
+
+static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
+{
+ ca0132_select_mic(codec);
+}
+
+static void ca0132_init_unsol(struct hda_codec *codec)
+{
+ snd_hda_jack_detect_enable_callback(codec, UNSOL_TAG_HP, hp_callback);
+ snd_hda_jack_detect_enable_callback(codec, UNSOL_TAG_AMIC1,
+ amic_callback);
+ snd_hda_jack_detect_enable_callback(codec, UNSOL_TAG_DSP,
+ ca0132_process_dsp_response);
}
/*
@@ -4443,8 +4439,6 @@ static void ca0132_unsol_event(struct hda_codec *codec, unsigned int res)
static struct hda_verb ca0132_base_init_verbs[] = {
/*enable ct extension*/
{0x15, VENDOR_CHIPIO_CT_EXTENSIONS_ENABLE, 0x1},
- /*enable DSP node unsol, needed for DSP download*/
- {0x16, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | UNSOL_TAG_DSP},
{}
};
@@ -4561,6 +4555,8 @@ static int ca0132_init(struct hda_codec *codec)
snd_hda_power_up(codec);
+ ca0132_init_unsol(codec);
+
ca0132_init_params(codec);
ca0132_init_flags(codec);
snd_hda_sequence_write(codec, spec->base_init_verbs);
@@ -4583,8 +4579,6 @@ static int ca0132_init(struct hda_codec *codec)
for (i = 0; i < spec->num_init_verbs; i++)
snd_hda_sequence_write(codec, spec->init_verbs[i]);
- ca0132_init_unsol(codec);
-
ca0132_select_out(codec);
ca0132_select_mic(codec);
@@ -4612,7 +4606,7 @@ static struct hda_codec_ops ca0132_patch_ops = {
.build_pcms = ca0132_build_pcms,
.init = ca0132_init,
.free = ca0132_free,
- .unsol_event = ca0132_unsol_event,
+ .unsol_event = snd_hda_jack_unsol_event,
};
static void ca0132_config(struct hda_codec *codec)
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 3db724eaa53c..1589c9bcce3e 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -135,8 +135,6 @@ enum {
#define CS421X_IDX_DAC_CFG 0x03
#define CS421X_IDX_SPK_CTL 0x04
-#define SPDIF_EVENT 0x04
-
/* Cirrus Logic CS4213 is like CS4210 but does not have SPDIF input/output */
#define CS4213_VENDOR_NID 0x09
@@ -984,7 +982,7 @@ static void cs4210_pinmux_init(struct hda_codec *codec)
}
static void cs4210_spdif_automute(struct hda_codec *codec,
- struct hda_jack_tbl *tbl)
+ struct hda_jack_callback *tbl)
{
struct cs_spec *spec = codec->spec;
bool spdif_present = false;
@@ -1019,7 +1017,6 @@ static void parse_cs421x_digital(struct hda_codec *codec)
if (get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP) {
spec->spdif_detect = 1;
snd_hda_jack_detect_enable_callback(codec, nid,
- SPDIF_EVENT,
cs4210_spdif_automute);
}
}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 47ccb8f44adb..71e4bad06345 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <sound/core.h>
#include <sound/jack.h>
-#include <sound/tlv.h>
#include "hda_codec.h"
#include "hda_local.h"
@@ -394,7 +393,8 @@ static void olpc_xo_update_mic_pins(struct hda_codec *codec)
}
/* mic_autoswitch hook */
-static void olpc_xo_automic(struct hda_codec *codec, struct hda_jack_tbl *jack)
+static void olpc_xo_automic(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
{
struct conexant_spec *spec = codec->spec;
int saved_cached_write = codec->cached_write;
@@ -752,6 +752,7 @@ static const struct hda_model_fixup cxt5051_fixup_models[] = {
static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
@@ -787,6 +788,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
*/
static void add_cx5051_fake_mutes(struct hda_codec *codec)
{
+ struct conexant_spec *spec = codec->spec;
static hda_nid_t out_nids[] = {
0x10, 0x11, 0
};
@@ -796,6 +798,7 @@ static void add_cx5051_fake_mutes(struct hda_codec *codec)
snd_hda_override_amp_caps(codec, *p, HDA_OUTPUT,
AC_AMPCAP_MIN_MUTE |
query_amp_caps(codec, *p, HDA_OUTPUT));
+ spec->gen.dac_min_mute = true;
}
static int patch_conexant_auto(struct hda_codec *codec)
@@ -868,11 +871,6 @@ static int patch_conexant_auto(struct hda_codec *codec)
if (err < 0)
goto error;
- if (codec->vendor_id == 0x14f15051) {
- /* minimum value is actually mute */
- spec->gen.vmaster_tlv[3] |= TLV_DB_SCALE_MUTE;
- }
-
codec->patch_ops = cx_auto_patch_ops;
/* Some laptops with Conexant chips show stalls in S3 resume,
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 99d7d7fecaad..39862e98551c 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1163,17 +1163,23 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
-static void jack_callback(struct hda_codec *codec, struct hda_jack_tbl *jack)
+static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid)
{
struct hdmi_spec *spec = codec->spec;
- int pin_idx = pin_nid_to_pin_index(codec, jack->nid);
+ int pin_idx = pin_nid_to_pin_index(codec, nid);
+
if (pin_idx < 0)
return;
-
if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
snd_hda_jack_report_sync(codec);
}
+static void jack_callback(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
+{
+ check_presence_and_report(codec, jack->tbl->nid);
+}
+
static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
{
int tag = res >> AC_UNSOL_RES_TAG_SHIFT;
@@ -1190,7 +1196,7 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
codec->addr, jack->nid, dev_entry, !!(res & AC_UNSOL_RES_IA),
!!(res & AC_UNSOL_RES_PD), !!(res & AC_UNSOL_RES_ELDV));
- jack_callback(codec, jack);
+ check_presence_and_report(codec, jack->nid);
}
static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -2165,7 +2171,7 @@ static int generic_hdmi_init(struct hda_codec *codec)
hda_nid_t pin_nid = per_pin->pin_nid;
hdmi_init_pin(codec, pin_nid);
- snd_hda_jack_detect_enable_callback(codec, pin_nid, pin_nid,
+ snd_hda_jack_detect_enable_callback(codec, pin_nid,
codec->jackpoll_interval > 0 ? jack_callback : NULL);
}
return 0;
@@ -2428,7 +2434,7 @@ static int simple_playback_init(struct hda_codec *codec)
if (get_wcaps(codec, pin) & AC_WCAP_OUT_AMP)
snd_hda_codec_write(codec, pin, 0, AC_VERB_SET_AMP_GAIN_MUTE,
AMP_OUT_UNMUTE);
- snd_hda_jack_detect_enable(codec, pin, pin);
+ snd_hda_jack_detect_enable(codec, pin);
return 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 1ba22fb527c2..bc86c36b4bfa 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -40,9 +40,6 @@
/* keep halting ALC5505 DSP, for power saving */
#define HALT_REALTEK_ALC5505
-/* unsol event tags */
-#define ALC_DCVOL_EVENT 0x08
-
/* for GPIO Poll */
#define GPIO_MASK 0x03
@@ -93,11 +90,6 @@ struct alc_spec {
struct alc_customize_define cdefine;
unsigned int parse_flags; /* flag for snd_hda_parse_pin_defcfg() */
- /* inverted dmic fix */
- unsigned int inv_dmic_fixup:1; /* has inverted digital-mic workaround */
- unsigned int inv_dmic_muted:1; /* R-ch of inv d-mic is muted? */
- hda_nid_t inv_dmic_pin;
-
/* mute LED for HP laptops, see alc269_fixup_mic_mute_hook() */
int mute_led_polarity;
hda_nid_t mute_led_nid;
@@ -129,6 +121,83 @@ struct alc_spec {
};
/*
+ * COEF access helper functions
+ */
+
+static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int coef_idx)
+{
+ unsigned int val;
+
+ snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_COEF_INDEX, coef_idx);
+ val = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PROC_COEF, 0);
+ return val;
+}
+
+#define alc_read_coef_idx(codec, coef_idx) \
+ alc_read_coefex_idx(codec, 0x20, coef_idx)
+
+static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int coef_idx, unsigned int coef_val)
+{
+ snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_COEF_INDEX, coef_idx);
+ snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PROC_COEF, coef_val);
+}
+
+#define alc_write_coef_idx(codec, coef_idx, coef_val) \
+ alc_write_coefex_idx(codec, 0x20, coef_idx, coef_val)
+
+static void alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int coef_idx, unsigned int mask,
+ unsigned int bits_set)
+{
+ unsigned int val = alc_read_coefex_idx(codec, nid, coef_idx);
+
+ if (val != -1)
+ alc_write_coefex_idx(codec, nid, coef_idx,
+ (val & ~mask) | bits_set);
+}
+
+#define alc_update_coef_idx(codec, coef_idx, mask, bits_set) \
+ alc_update_coefex_idx(codec, 0x20, coef_idx, mask, bits_set)
+
+/* a special bypass for COEF 0; read the cached value at the second time */
+static unsigned int alc_get_coef0(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (!spec->coef0)
+ spec->coef0 = alc_read_coef_idx(codec, 0);
+ return spec->coef0;
+}
+
+/* coef writes/updates batch */
+struct coef_fw {
+ unsigned char nid;
+ unsigned char idx;
+ unsigned short mask;
+ unsigned short val;
+};
+
+#define UPDATE_COEFEX(_nid, _idx, _mask, _val) \
+ { .nid = (_nid), .idx = (_idx), .mask = (_mask), .val = (_val) }
+#define WRITE_COEFEX(_nid, _idx, _val) UPDATE_COEFEX(_nid, _idx, -1, _val)
+#define WRITE_COEF(_idx, _val) WRITE_COEFEX(0x20, _idx, _val)
+#define UPDATE_COEF(_idx, _mask, _val) UPDATE_COEFEX(0x20, _idx, _mask, _val)
+
+static void alc_process_coef_fw(struct hda_codec *codec,
+ const struct coef_fw *fw)
+{
+ for (; fw->nid; fw++) {
+ if (fw->mask == (unsigned short)-1)
+ alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val);
+ else
+ alc_update_coefex_idx(codec, fw->nid, fw->idx,
+ fw->mask, fw->val);
+ }
+}
+
+/*
* Append the given mixer and verb elements for the later use
* The mixer array is referred in build_controls(), and init_verbs are
* called in init().
@@ -173,20 +242,10 @@ static const struct hda_verb alc_gpio3_init_verbs[] = {
static void alc_fix_pll(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- unsigned int val;
- if (!spec->pll_nid)
- return;
- snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_COEF_INDEX,
- spec->pll_coef_idx);
- val = snd_hda_codec_read(codec, spec->pll_nid, 0,
- AC_VERB_GET_PROC_COEF, 0);
- if (val == -1)
- return;
- snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_COEF_INDEX,
- spec->pll_coef_idx);
- snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_PROC_COEF,
- val & ~(1 << spec->pll_coef_bit));
+ if (spec->pll_nid)
+ alc_update_coefex_idx(codec, spec->pll_nid, spec->pll_coef_idx,
+ 1 << spec->pll_coef_bit, 0);
}
static void alc_fix_pll_init(struct hda_codec *codec, hda_nid_t nid,
@@ -200,7 +259,8 @@ static void alc_fix_pll_init(struct hda_codec *codec, hda_nid_t nid,
}
/* update the master volume per volume-knob's unsol event */
-static void alc_update_knob_master(struct hda_codec *codec, struct hda_jack_tbl *jack)
+static void alc_update_knob_master(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
{
unsigned int val;
struct snd_kcontrol *kctl;
@@ -212,7 +272,7 @@ static void alc_update_knob_master(struct hda_codec *codec, struct hda_jack_tbl
uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
if (!uctl)
return;
- val = snd_hda_codec_read(codec, jack->nid, 0,
+ val = snd_hda_codec_read(codec, jack->tbl->nid, 0,
AC_VERB_GET_VOLUME_KNOB_CONTROL, 0);
val &= HDA_AMP_VOLMASK;
uctl->value.integer.value[0] = val;
@@ -231,30 +291,18 @@ static void alc880_unsol_event(struct hda_codec *codec, unsigned int res)
/* additional initialization for ALC888 variants */
static void alc888_coef_init(struct hda_codec *codec)
{
- unsigned int tmp;
-
- snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 0);
- tmp = snd_hda_codec_read(codec, 0x20, 0, AC_VERB_GET_PROC_COEF, 0);
- snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 7);
- if ((tmp & 0xf0) == 0x20)
+ if (alc_get_coef0(codec) == 0x20)
/* alc888S-VC */
- snd_hda_codec_read(codec, 0x20, 0,
- AC_VERB_SET_PROC_COEF, 0x830);
+ alc_write_coef_idx(codec, 7, 0x830);
else
/* alc888-VB */
- snd_hda_codec_read(codec, 0x20, 0,
- AC_VERB_SET_PROC_COEF, 0x3030);
+ alc_write_coef_idx(codec, 7, 0x3030);
}
/* additional initialization for ALC889 variants */
static void alc889_coef_init(struct hda_codec *codec)
{
- unsigned int tmp;
-
- snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 7);
- tmp = snd_hda_codec_read(codec, 0x20, 0, AC_VERB_GET_PROC_COEF, 0);
- snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 7);
- snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_PROC_COEF, tmp|0x2010);
+ alc_update_coef_idx(codec, 7, 0, 0x2010);
}
/* turn on/off EAPD control (only if available) */
@@ -295,8 +343,6 @@ static void alc_eapd_shutup(struct hda_codec *codec)
/* generic EAPD initialization */
static void alc_auto_init_amp(struct hda_codec *codec, int type)
{
- unsigned int tmp;
-
alc_auto_setup_eapd(codec, true);
switch (type) {
case ALC_INIT_GPIO1:
@@ -311,15 +357,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
case ALC_INIT_DEFAULT:
switch (codec->vendor_id) {
case 0x10ec0260:
- snd_hda_codec_write(codec, 0x1a, 0,
- AC_VERB_SET_COEF_INDEX, 7);
- tmp = snd_hda_codec_read(codec, 0x1a, 0,
- AC_VERB_GET_PROC_COEF, 0);
- snd_hda_codec_write(codec, 0x1a, 0,
- AC_VERB_SET_COEF_INDEX, 7);
- snd_hda_codec_write(codec, 0x1a, 0,
- AC_VERB_SET_PROC_COEF,
- tmp | 0x2010);
+ alc_update_coefex_idx(codec, 0x1a, 7, 0, 0x2010);
break;
case 0x10ec0262:
case 0x10ec0880:
@@ -337,15 +375,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
#if 0 /* XXX: This may cause the silent output on speaker on some machines */
case 0x10ec0267:
case 0x10ec0268:
- snd_hda_codec_write(codec, 0x20, 0,
- AC_VERB_SET_COEF_INDEX, 7);
- tmp = snd_hda_codec_read(codec, 0x20, 0,
- AC_VERB_GET_PROC_COEF, 0);
- snd_hda_codec_write(codec, 0x20, 0,
- AC_VERB_SET_COEF_INDEX, 7);
- snd_hda_codec_write(codec, 0x20, 0,
- AC_VERB_SET_PROC_COEF,
- tmp | 0x3000);
+ alc_update_coef_idx(codec, 7, 0, 0x3000);
break;
#endif /* XXX */
}
@@ -588,190 +618,14 @@ static void alc_ssid_check(struct hda_codec *codec, const hda_nid_t *ports)
}
/*
- * COEF access helper functions
- */
-
-static int alc_read_coefex_idx(struct hda_codec *codec,
- hda_nid_t nid,
- unsigned int coef_idx)
-{
- unsigned int val;
- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_COEF_INDEX,
- coef_idx);
- val = snd_hda_codec_read(codec, nid, 0,
- AC_VERB_GET_PROC_COEF, 0);
- return val;
-}
-
-#define alc_read_coef_idx(codec, coef_idx) \
- alc_read_coefex_idx(codec, 0x20, coef_idx)
-
-static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
- unsigned int coef_idx,
- unsigned int coef_val)
-{
- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_COEF_INDEX,
- coef_idx);
- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PROC_COEF,
- coef_val);
-}
-
-#define alc_write_coef_idx(codec, coef_idx, coef_val) \
- alc_write_coefex_idx(codec, 0x20, coef_idx, coef_val)
-
-/* a special bypass for COEF 0; read the cached value at the second time */
-static unsigned int alc_get_coef0(struct hda_codec *codec)
-{
- struct alc_spec *spec = codec->spec;
- if (!spec->coef0)
- spec->coef0 = alc_read_coef_idx(codec, 0);
- return spec->coef0;
-}
-
-/*
*/
-static hda_nid_t get_adc_nid(struct hda_codec *codec, int adc_idx, int imux_idx)
-{
- struct hda_gen_spec *spec = codec->spec;
- if (spec->dyn_adc_switch)
- adc_idx = spec->dyn_adc_idx[imux_idx];
- return spec->adc_nids[adc_idx];
-}
-
-static void alc_inv_dmic_sync_adc(struct hda_codec *codec, int adc_idx)
-{
- struct alc_spec *spec = codec->spec;
- struct hda_input_mux *imux = &spec->gen.input_mux;
- struct nid_path *path;
- hda_nid_t nid;
- int i, dir, parm;
- unsigned int val;
-
- for (i = 0; i < imux->num_items; i++) {
- if (spec->gen.imux_pins[i] == spec->inv_dmic_pin)
- break;
- }
- if (i >= imux->num_items)
- return;
-
- path = snd_hda_get_nid_path(codec, spec->inv_dmic_pin,
- get_adc_nid(codec, adc_idx, i));
- val = path->ctls[NID_PATH_MUTE_CTL];
- if (!val)
- return;
- nid = get_amp_nid_(val);
- dir = get_amp_direction_(val);
- parm = AC_AMP_SET_RIGHT |
- (dir == HDA_OUTPUT ? AC_AMP_SET_OUTPUT : AC_AMP_SET_INPUT);
-
- /* flush all cached amps at first */
- snd_hda_codec_flush_cache(codec);
-
- /* we care only right channel */
- val = snd_hda_codec_amp_read(codec, nid, 1, dir, 0);
- if (val & 0x80) /* if already muted, we don't need to touch */
- return;
- val |= 0x80;
- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE,
- parm | val);
-}
-
-/*
- * Inverted digital-mic handling
- *
- * First off, it's a bit tricky. The "Inverted Internal Mic Capture Switch"
- * gives the additional mute only to the right channel of the digital mic
- * capture stream. This is a workaround for avoiding the almost silence
- * by summing the stereo stream from some (known to be ForteMedia)
- * digital mic unit.
- *
- * The logic is to call alc_inv_dmic_sync() after each action (possibly)
- * modifying ADC amp. When the mute flag is set, it mutes the R-channel
- * without caching so that the cache can still keep the original value.
- * The cached value is then restored when the flag is set off or any other
- * than d-mic is used as the current input source.
- */
-static void alc_inv_dmic_sync(struct hda_codec *codec, bool force)
-{
- struct alc_spec *spec = codec->spec;
- int src, nums;
-
- if (!spec->inv_dmic_fixup)
- return;
- if (!spec->inv_dmic_muted && !force)
- return;
- nums = spec->gen.dyn_adc_switch ? 1 : spec->gen.num_adc_nids;
- for (src = 0; src < nums; src++) {
- bool dmic_fixup = false;
-
- if (spec->inv_dmic_muted &&
- spec->gen.imux_pins[spec->gen.cur_mux[src]] == spec->inv_dmic_pin)
- dmic_fixup = true;
- if (!dmic_fixup && !force)
- continue;
- alc_inv_dmic_sync_adc(codec, src);
- }
-}
-
-static void alc_inv_dmic_hook(struct hda_codec *codec,
- struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- alc_inv_dmic_sync(codec, false);
-}
-
-static int alc_inv_dmic_sw_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct alc_spec *spec = codec->spec;
-
- ucontrol->value.integer.value[0] = !spec->inv_dmic_muted;
- return 0;
-}
-
-static int alc_inv_dmic_sw_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct alc_spec *spec = codec->spec;
- unsigned int val = !ucontrol->value.integer.value[0];
-
- if (val == spec->inv_dmic_muted)
- return 0;
- spec->inv_dmic_muted = val;
- alc_inv_dmic_sync(codec, true);
- return 0;
-}
-
-static const struct snd_kcontrol_new alc_inv_dmic_sw = {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Inverted Internal Mic Capture Switch",
- .info = snd_ctl_boolean_mono_info,
- .get = alc_inv_dmic_sw_get,
- .put = alc_inv_dmic_sw_put,
-};
-
-static int alc_add_inv_dmic_mixer(struct hda_codec *codec, hda_nid_t nid)
+static void alc_fixup_inv_dmic(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
{
struct alc_spec *spec = codec->spec;
- if (!snd_hda_gen_add_kctl(&spec->gen, NULL, &alc_inv_dmic_sw))
- return -ENOMEM;
- spec->inv_dmic_fixup = 1;
- spec->inv_dmic_muted = 0;
- spec->inv_dmic_pin = nid;
- spec->gen.cap_sync_hook = alc_inv_dmic_hook;
- return 0;
-}
-
-/* typically the digital mic is put at node 0x12 */
-static void alc_fixup_inv_dmic_0x12(struct hda_codec *codec,
- const struct hda_fixup *fix, int action)
-{
- if (action == HDA_FIXUP_ACT_PROBE)
- alc_add_inv_dmic_mixer(codec, 0x12);
+ spec->gen.inv_dmic_split = 1;
}
@@ -880,7 +734,6 @@ static int alc_resume(struct hda_codec *codec)
codec->patch_ops.init(codec);
snd_hda_codec_resume_amp(codec);
snd_hda_codec_resume_cache(codec);
- alc_inv_dmic_sync(codec, true);
hda_call_check_power_status(codec, 0x01);
return 0;
}
@@ -1134,7 +987,8 @@ static void alc880_fixup_vol_knob(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
if (action == HDA_FIXUP_ACT_PROBE)
- snd_hda_jack_detect_enable_callback(codec, 0x21, ALC_DCVOL_EVENT, alc_update_knob_master);
+ snd_hda_jack_detect_enable_callback(codec, 0x21,
+ alc_update_knob_master);
}
static const struct hda_fixup alc880_fixups[] = {
@@ -1597,7 +1451,7 @@ static void alc260_fixup_gpio1_toggle(struct hda_codec *codec,
spec->gen.detect_hp = 1;
spec->gen.automute_speaker = 1;
spec->gen.autocfg.hp_pins[0] = 0x0f; /* copy it for automute */
- snd_hda_jack_detect_enable_callback(codec, 0x0f, HDA_GEN_HP_EVENT,
+ snd_hda_jack_detect_enable_callback(codec, 0x0f,
snd_hda_gen_hp_automute);
snd_hda_add_verbs(codec, alc_gpio1_init_verbs);
}
@@ -2222,7 +2076,7 @@ static const struct hda_fixup alc882_fixups[] = {
},
[ALC882_FIXUP_INV_DMIC] = {
.type = HDA_FIXUP_FUNC,
- .v.func = alc_fixup_inv_dmic_0x12,
+ .v.func = alc_fixup_inv_dmic,
},
[ALC882_FIXUP_NO_PRIMARY_HP] = {
.type = HDA_FIXUP_FUNC,
@@ -2473,7 +2327,7 @@ static const struct hda_fixup alc262_fixups[] = {
},
[ALC262_FIXUP_INV_DMIC] = {
.type = HDA_FIXUP_FUNC,
- .v.func = alc_fixup_inv_dmic_0x12,
+ .v.func = alc_fixup_inv_dmic,
},
[ALC262_FIXUP_INTEL_BAYLEYBAY] = {
.type = HDA_FIXUP_FUNC,
@@ -2517,13 +2371,7 @@ static int patch_alc262(struct hda_codec *codec)
/* pshou 07/11/05 set a zero PCM sample to DAC when FIFO is
* under-run
*/
- {
- int tmp;
- snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_COEF_INDEX, 7);
- tmp = snd_hda_codec_read(codec, 0x20, 0, AC_VERB_GET_PROC_COEF, 0);
- snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_COEF_INDEX, 7);
- snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_PROC_COEF, tmp | 0x80);
- }
+ alc_update_coefex_idx(codec, 0x1a, 7, 0, 0x80);
#endif
alc_fix_pll_init(codec, 0x20, 0x0a, 10);
@@ -2592,7 +2440,7 @@ enum {
static const struct hda_fixup alc268_fixups[] = {
[ALC268_FIXUP_INV_DMIC] = {
.type = HDA_FIXUP_FUNC,
- .v.func = alc_fixup_inv_dmic_0x12,
+ .v.func = alc_fixup_inv_dmic,
},
[ALC268_FIXUP_HP_EAPD] = {
.type = HDA_FIXUP_VERBS,
@@ -2809,14 +2657,7 @@ static void alc286_shutup(struct hda_codec *codec)
static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
{
- int val = alc_read_coef_idx(codec, 0x04);
- if (val == -1)
- return;
- if (power_up)
- val |= 1 << 11;
- else
- val &= ~(1 << 11);
- alc_write_coef_idx(codec, 0x04, val);
+ alc_update_coef_idx(codec, 0x04, 1 << 11, power_up ? (1 << 11) : 0);
}
static void alc269_shutup(struct hda_codec *codec)
@@ -2832,79 +2673,42 @@ static void alc269_shutup(struct hda_codec *codec)
snd_hda_shutup_pins(codec);
}
+static struct coef_fw alc282_coefs[] = {
+ WRITE_COEF(0x03, 0x0002), /* Power Down Control */
+ WRITE_COEF(0x05, 0x0700), /* FIFO and filter clock */
+ WRITE_COEF(0x07, 0x0200), /* DMIC control */
+ UPDATE_COEF(0x06, 0x00f0, 0), /* Analog clock */
+ UPDATE_COEF(0x08, 0xfffc, 0x0c2c), /* JD */
+ WRITE_COEF(0x0a, 0xcccc), /* JD offset1 */
+ WRITE_COEF(0x0b, 0xcccc), /* JD offset2 */
+ WRITE_COEF(0x0e, 0x6e00), /* LDO1/2/3, DAC/ADC */
+ UPDATE_COEF(0x0f, 0xf800, 0x1000), /* JD */
+ UPDATE_COEF(0x10, 0xfc00, 0x0c00), /* Capless */
+ WRITE_COEF(0x6f, 0x0), /* Class D test 4 */
+ UPDATE_COEF(0x0c, 0xfe00, 0), /* IO power down directly */
+ WRITE_COEF(0x34, 0xa0c0), /* ANC */
+ UPDATE_COEF(0x16, 0x0008, 0), /* AGC MUX */
+ UPDATE_COEF(0x1d, 0x00e0, 0), /* DAC simple content protection */
+ UPDATE_COEF(0x1f, 0x00e0, 0), /* ADC simple content protection */
+ WRITE_COEF(0x21, 0x8804), /* DAC ADC Zero Detection */
+ WRITE_COEF(0x63, 0x2902), /* PLL */
+ WRITE_COEF(0x68, 0xa080), /* capless control 2 */
+ WRITE_COEF(0x69, 0x3400), /* capless control 3 */
+ WRITE_COEF(0x6a, 0x2f3e), /* capless control 4 */
+ WRITE_COEF(0x6b, 0x0), /* capless control 5 */
+ UPDATE_COEF(0x6d, 0x0fff, 0x0900), /* class D test 2 */
+ WRITE_COEF(0x6e, 0x110a), /* class D test 3 */
+ UPDATE_COEF(0x70, 0x00f8, 0x00d8), /* class D test 5 */
+ WRITE_COEF(0x71, 0x0014), /* class D test 6 */
+ WRITE_COEF(0x72, 0xc2ba), /* classD OCP */
+ UPDATE_COEF(0x77, 0x0f80, 0), /* classD pure DC test */
+ WRITE_COEF(0x6c, 0xfc06), /* Class D amp control */
+ {}
+};
+
static void alc282_restore_default_value(struct hda_codec *codec)
{
- int val;
-
- /* Power Down Control */
- alc_write_coef_idx(codec, 0x03, 0x0002);
- /* FIFO and filter clock */
- alc_write_coef_idx(codec, 0x05, 0x0700);
- /* DMIC control */
- alc_write_coef_idx(codec, 0x07, 0x0200);
- /* Analog clock */
- val = alc_read_coef_idx(codec, 0x06);
- alc_write_coef_idx(codec, 0x06, (val & ~0x00f0) | 0x0);
- /* JD */
- val = alc_read_coef_idx(codec, 0x08);
- alc_write_coef_idx(codec, 0x08, (val & ~0xfffc) | 0x0c2c);
- /* JD offset1 */
- alc_write_coef_idx(codec, 0x0a, 0xcccc);
- /* JD offset2 */
- alc_write_coef_idx(codec, 0x0b, 0xcccc);
- /* LDO1/2/3, DAC/ADC */
- alc_write_coef_idx(codec, 0x0e, 0x6e00);
- /* JD */
- val = alc_read_coef_idx(codec, 0x0f);
- alc_write_coef_idx(codec, 0x0f, (val & ~0xf800) | 0x1000);
- /* Capless */
- val = alc_read_coef_idx(codec, 0x10);
- alc_write_coef_idx(codec, 0x10, (val & ~0xfc00) | 0x0c00);
- /* Class D test 4 */
- alc_write_coef_idx(codec, 0x6f, 0x0);
- /* IO power down directly */
- val = alc_read_coef_idx(codec, 0x0c);
- alc_write_coef_idx(codec, 0x0c, (val & ~0xfe00) | 0x0);
- /* ANC */
- alc_write_coef_idx(codec, 0x34, 0xa0c0);
- /* AGC MUX */
- val = alc_read_coef_idx(codec, 0x16);
- alc_write_coef_idx(codec, 0x16, (val & ~0x0008) | 0x0);
- /* DAC simple content protection */
- val = alc_read_coef_idx(codec, 0x1d);
- alc_write_coef_idx(codec, 0x1d, (val & ~0x00e0) | 0x0);
- /* ADC simple content protection */
- val = alc_read_coef_idx(codec, 0x1f);
- alc_write_coef_idx(codec, 0x1f, (val & ~0x00e0) | 0x0);
- /* DAC ADC Zero Detection */
- alc_write_coef_idx(codec, 0x21, 0x8804);
- /* PLL */
- alc_write_coef_idx(codec, 0x63, 0x2902);
- /* capless control 2 */
- alc_write_coef_idx(codec, 0x68, 0xa080);
- /* capless control 3 */
- alc_write_coef_idx(codec, 0x69, 0x3400);
- /* capless control 4 */
- alc_write_coef_idx(codec, 0x6a, 0x2f3e);
- /* capless control 5 */
- alc_write_coef_idx(codec, 0x6b, 0x0);
- /* class D test 2 */
- val = alc_read_coef_idx(codec, 0x6d);
- alc_write_coef_idx(codec, 0x6d, (val & ~0x0fff) | 0x0900);
- /* class D test 3 */
- alc_write_coef_idx(codec, 0x6e, 0x110a);
- /* class D test 5 */
- val = alc_read_coef_idx(codec, 0x70);
- alc_write_coef_idx(codec, 0x70, (val & ~0x00f8) | 0x00d8);
- /* class D test 6 */
- alc_write_coef_idx(codec, 0x71, 0x0014);
- /* classD OCP */
- alc_write_coef_idx(codec, 0x72, 0xc2ba);
- /* classD pure DC test */
- val = alc_read_coef_idx(codec, 0x77);
- alc_write_coef_idx(codec, 0x77, (val & ~0x0f80) | 0x0);
- /* Class D amp control */
- alc_write_coef_idx(codec, 0x6c, 0xfc06);
+ alc_process_coef_fw(codec, alc282_coefs);
}
static void alc282_init(struct hda_codec *codec)
@@ -2980,87 +2784,45 @@ static void alc282_shutup(struct hda_codec *codec)
alc_write_coef_idx(codec, 0x78, coef78);
}
+static struct coef_fw alc283_coefs[] = {
+ WRITE_COEF(0x03, 0x0002), /* Power Down Control */
+ WRITE_COEF(0x05, 0x0700), /* FIFO and filter clock */
+ WRITE_COEF(0x07, 0x0200), /* DMIC control */
+ UPDATE_COEF(0x06, 0x00f0, 0), /* Analog clock */
+ UPDATE_COEF(0x08, 0xfffc, 0x0c2c), /* JD */
+ WRITE_COEF(0x0a, 0xcccc), /* JD offset1 */
+ WRITE_COEF(0x0b, 0xcccc), /* JD offset2 */
+ WRITE_COEF(0x0e, 0x6fc0), /* LDO1/2/3, DAC/ADC */
+ UPDATE_COEF(0x0f, 0xf800, 0x1000), /* JD */
+ UPDATE_COEF(0x10, 0xfc00, 0x0c00), /* Capless */
+ WRITE_COEF(0x3a, 0x0), /* Class D test 4 */
+ UPDATE_COEF(0x0c, 0xfe00, 0x0), /* IO power down directly */
+ WRITE_COEF(0x22, 0xa0c0), /* ANC */
+ UPDATE_COEFEX(0x53, 0x01, 0x000f, 0x0008), /* AGC MUX */
+ UPDATE_COEF(0x1d, 0x00e0, 0), /* DAC simple content protection */
+ UPDATE_COEF(0x1f, 0x00e0, 0), /* ADC simple content protection */
+ WRITE_COEF(0x21, 0x8804), /* DAC ADC Zero Detection */
+ WRITE_COEF(0x2e, 0x2902), /* PLL */
+ WRITE_COEF(0x33, 0xa080), /* capless control 2 */
+ WRITE_COEF(0x34, 0x3400), /* capless control 3 */
+ WRITE_COEF(0x35, 0x2f3e), /* capless control 4 */
+ WRITE_COEF(0x36, 0x0), /* capless control 5 */
+ UPDATE_COEF(0x38, 0x0fff, 0x0900), /* class D test 2 */
+ WRITE_COEF(0x39, 0x110a), /* class D test 3 */
+ UPDATE_COEF(0x3b, 0x00f8, 0x00d8), /* class D test 5 */
+ WRITE_COEF(0x3c, 0x0014), /* class D test 6 */
+ WRITE_COEF(0x3d, 0xc2ba), /* classD OCP */
+ UPDATE_COEF(0x42, 0x0f80, 0x0), /* classD pure DC test */
+ WRITE_COEF(0x49, 0x0), /* test mode */
+ UPDATE_COEF(0x40, 0xf800, 0x9800), /* Class D DC enable */
+ UPDATE_COEF(0x42, 0xf000, 0x2000), /* DC offset */
+ WRITE_COEF(0x37, 0xfc06), /* Class D amp control */
+ {}
+};
+
static void alc283_restore_default_value(struct hda_codec *codec)
{
- int val;
-
- /* Power Down Control */
- alc_write_coef_idx(codec, 0x03, 0x0002);
- /* FIFO and filter clock */
- alc_write_coef_idx(codec, 0x05, 0x0700);
- /* DMIC control */
- alc_write_coef_idx(codec, 0x07, 0x0200);
- /* Analog clock */
- val = alc_read_coef_idx(codec, 0x06);
- alc_write_coef_idx(codec, 0x06, (val & ~0x00f0) | 0x0);
- /* JD */
- val = alc_read_coef_idx(codec, 0x08);
- alc_write_coef_idx(codec, 0x08, (val & ~0xfffc) | 0x0c2c);
- /* JD offset1 */
- alc_write_coef_idx(codec, 0x0a, 0xcccc);
- /* JD offset2 */
- alc_write_coef_idx(codec, 0x0b, 0xcccc);
- /* LDO1/2/3, DAC/ADC */
- alc_write_coef_idx(codec, 0x0e, 0x6fc0);
- /* JD */
- val = alc_read_coef_idx(codec, 0x0f);
- alc_write_coef_idx(codec, 0x0f, (val & ~0xf800) | 0x1000);
- /* Capless */
- val = alc_read_coef_idx(codec, 0x10);
- alc_write_coef_idx(codec, 0x10, (val & ~0xfc00) | 0x0c00);
- /* Class D test 4 */
- alc_write_coef_idx(codec, 0x3a, 0x0);
- /* IO power down directly */
- val = alc_read_coef_idx(codec, 0x0c);
- alc_write_coef_idx(codec, 0x0c, (val & ~0xfe00) | 0x0);
- /* ANC */
- alc_write_coef_idx(codec, 0x22, 0xa0c0);
- /* AGC MUX */
- val = alc_read_coefex_idx(codec, 0x53, 0x01);
- alc_write_coefex_idx(codec, 0x53, 0x01, (val & ~0x000f) | 0x0008);
- /* DAC simple content protection */
- val = alc_read_coef_idx(codec, 0x1d);
- alc_write_coef_idx(codec, 0x1d, (val & ~0x00e0) | 0x0);
- /* ADC simple content protection */
- val = alc_read_coef_idx(codec, 0x1f);
- alc_write_coef_idx(codec, 0x1f, (val & ~0x00e0) | 0x0);
- /* DAC ADC Zero Detection */
- alc_write_coef_idx(codec, 0x21, 0x8804);
- /* PLL */
- alc_write_coef_idx(codec, 0x2e, 0x2902);
- /* capless control 2 */
- alc_write_coef_idx(codec, 0x33, 0xa080);
- /* capless control 3 */
- alc_write_coef_idx(codec, 0x34, 0x3400);
- /* capless control 4 */
- alc_write_coef_idx(codec, 0x35, 0x2f3e);
- /* capless control 5 */
- alc_write_coef_idx(codec, 0x36, 0x0);
- /* class D test 2 */
- val = alc_read_coef_idx(codec, 0x38);
- alc_write_coef_idx(codec, 0x38, (val & ~0x0fff) | 0x0900);
- /* class D test 3 */
- alc_write_coef_idx(codec, 0x39, 0x110a);
- /* class D test 5 */
- val = alc_read_coef_idx(codec, 0x3b);
- alc_write_coef_idx(codec, 0x3b, (val & ~0x00f8) | 0x00d8);
- /* class D test 6 */
- alc_write_coef_idx(codec, 0x3c, 0x0014);
- /* classD OCP */
- alc_write_coef_idx(codec, 0x3d, 0xc2ba);
- /* classD pure DC test */
- val = alc_read_coef_idx(codec, 0x42);
- alc_write_coef_idx(codec, 0x42, (val & ~0x0f80) | 0x0);
- /* test mode */
- alc_write_coef_idx(codec, 0x49, 0x0);
- /* Class D DC enable */
- val = alc_read_coef_idx(codec, 0x40);
- alc_write_coef_idx(codec, 0x40, (val & ~0xf800) | 0x9800);
- /* DC offset */
- val = alc_read_coef_idx(codec, 0x42);
- alc_write_coef_idx(codec, 0x42, (val & ~0xf000) | 0x2000);
- /* Class D amp control */
- alc_write_coef_idx(codec, 0x37, 0xfc06);
+ alc_process_coef_fw(codec, alc283_coefs);
}
static void alc283_init(struct hda_codec *codec)
@@ -3068,7 +2830,6 @@ static void alc283_init(struct hda_codec *codec)
struct alc_spec *spec = codec->spec;
hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
bool hp_pin_sense;
- int val;
if (!spec->gen.autocfg.hp_outs) {
if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
@@ -3098,8 +2859,7 @@ static void alc283_init(struct hda_codec *codec)
msleep(85);
/* Index 0x46 Combo jack auto switch control 2 */
/* 3k pull low control for Headset jack. */
- val = alc_read_coef_idx(codec, 0x46);
- alc_write_coef_idx(codec, 0x46, val & ~(3 << 12));
+ alc_update_coef_idx(codec, 0x46, 3 << 12, 0);
/* Headphone capless set to normal mode */
alc_write_coef_idx(codec, 0x43, 0x9614);
}
@@ -3109,7 +2869,6 @@ static void alc283_shutup(struct hda_codec *codec)
struct alc_spec *spec = codec->spec;
hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
bool hp_pin_sense;
- int val;
if (!spec->gen.autocfg.hp_outs) {
if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
@@ -3134,8 +2893,7 @@ static void alc283_shutup(struct hda_codec *codec)
snd_hda_codec_write(codec, hp_pin, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
- val = alc_read_coef_idx(codec, 0x46);
- alc_write_coef_idx(codec, 0x46, val | (3 << 12));
+ alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
if (hp_pin_sense)
msleep(100);
@@ -3268,7 +3026,6 @@ static int alc269_resume(struct hda_codec *codec)
snd_hda_codec_resume_amp(codec);
snd_hda_codec_resume_cache(codec);
- alc_inv_dmic_sync(codec, true);
hda_call_check_power_status(codec, 0x01);
/* on some machine, the BIOS will clear the codec gpio data when enter
@@ -3298,12 +3055,8 @@ static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
static void alc269_fixup_hweq(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
- int coef;
-
- if (action != HDA_FIXUP_ACT_INIT)
- return;
- coef = alc_read_coef_idx(codec, 0x1e);
- alc_write_coef_idx(codec, 0x1e, coef | 0x80);
+ if (action == HDA_FIXUP_ACT_INIT)
+ alc_update_coef_idx(codec, 0x1e, 0, 0x80);
}
static void alc269_fixup_headset_mic(struct hda_codec *codec,
@@ -3351,32 +3104,21 @@ static void alc269_fixup_pcm_44k(struct hda_codec *codec,
static void alc269_fixup_stereo_dmic(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
- int coef;
-
- if (action != HDA_FIXUP_ACT_INIT)
- return;
/* The digital-mic unit sends PDM (differential signal) instead of
* the standard PCM, thus you can't record a valid mono stream as is.
* Below is a workaround specific to ALC269 to control the dmic
* signal source as mono.
*/
- coef = alc_read_coef_idx(codec, 0x07);
- alc_write_coef_idx(codec, 0x07, coef | 0x80);
+ if (action == HDA_FIXUP_ACT_INIT)
+ alc_update_coef_idx(codec, 0x07, 0, 0x80);
}
static void alc269_quanta_automute(struct hda_codec *codec)
{
snd_hda_gen_update_outputs(codec);
- snd_hda_codec_write(codec, 0x20, 0,
- AC_VERB_SET_COEF_INDEX, 0x0c);
- snd_hda_codec_write(codec, 0x20, 0,
- AC_VERB_SET_PROC_COEF, 0x680);
-
- snd_hda_codec_write(codec, 0x20, 0,
- AC_VERB_SET_COEF_INDEX, 0x0c);
- snd_hda_codec_write(codec, 0x20, 0,
- AC_VERB_SET_PROC_COEF, 0x480);
+ alc_write_coef_idx(codec, 0x0c, 0x680);
+ alc_write_coef_idx(codec, 0x0c, 0x480);
}
static void alc269_fixup_quanta_mute(struct hda_codec *codec,
@@ -3389,7 +3131,7 @@ static void alc269_fixup_quanta_mute(struct hda_codec *codec,
}
static void alc269_x101_hp_automute_hook(struct hda_codec *codec,
- struct hda_jack_tbl *jack)
+ struct hda_jack_callback *jack)
{
struct alc_spec *spec = codec->spec;
int vref;
@@ -3622,61 +3364,62 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
static void alc_headset_mode_unplugged(struct hda_codec *codec)
{
- int val;
+ static struct coef_fw coef0255[] = {
+ WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
+ WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
+ UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
+ WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
+ WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
+ {}
+ };
+ static struct coef_fw coef0233[] = {
+ WRITE_COEF(0x1b, 0x0c0b),
+ WRITE_COEF(0x45, 0xc429),
+ UPDATE_COEF(0x35, 0x4000, 0),
+ WRITE_COEF(0x06, 0x2104),
+ WRITE_COEF(0x1a, 0x0001),
+ WRITE_COEF(0x26, 0x0004),
+ WRITE_COEF(0x32, 0x42a3),
+ {}
+ };
+ static struct coef_fw coef0292[] = {
+ WRITE_COEF(0x76, 0x000e),
+ WRITE_COEF(0x6c, 0x2400),
+ WRITE_COEF(0x18, 0x7308),
+ WRITE_COEF(0x6b, 0xc429),
+ {}
+ };
+ static struct coef_fw coef0293[] = {
+ UPDATE_COEF(0x10, 7<<8, 6<<8), /* SET Line1 JD to 0 */
+ UPDATE_COEFEX(0x57, 0x05, 1<<15|1<<13, 0x0), /* SET charge pump by verb */
+ UPDATE_COEFEX(0x57, 0x03, 1<<10, 1<<10), /* SET EN_OSW to 1 */
+ UPDATE_COEF(0x1a, 1<<3, 1<<3), /* Combo JD gating with LINE1-VREFO */
+ WRITE_COEF(0x45, 0xc429), /* Set to TRS type */
+ UPDATE_COEF(0x4a, 0x000f, 0x000e), /* Combo Jack auto detect */
+ {}
+ };
+ static struct coef_fw coef0668[] = {
+ WRITE_COEF(0x15, 0x0d40),
+ WRITE_COEF(0xb7, 0x802b),
+ {}
+ };
switch (codec->vendor_id) {
case 0x10ec0255:
- /* LDO and MISC control */
- alc_write_coef_idx(codec, 0x1b, 0x0c0b);
- /* UAJ function set to menual mode */
- alc_write_coef_idx(codec, 0x45, 0xd089);
- /* Direct Drive HP Amp control(Set to verb control)*/
- val = alc_read_coefex_idx(codec, 0x57, 0x05);
- alc_write_coefex_idx(codec, 0x57, 0x05, val & ~(1<<14));
- /* Set MIC2 Vref gate with HP */
- alc_write_coef_idx(codec, 0x06, 0x6104);
- /* Direct Drive HP Amp control */
- alc_write_coefex_idx(codec, 0x57, 0x03, 0x8aa6);
+ alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0233:
case 0x10ec0283:
- alc_write_coef_idx(codec, 0x1b, 0x0c0b);
- alc_write_coef_idx(codec, 0x45, 0xc429);
- val = alc_read_coef_idx(codec, 0x35);
- alc_write_coef_idx(codec, 0x35, val & 0xbfff);
- alc_write_coef_idx(codec, 0x06, 0x2104);
- alc_write_coef_idx(codec, 0x1a, 0x0001);
- alc_write_coef_idx(codec, 0x26, 0x0004);
- alc_write_coef_idx(codec, 0x32, 0x42a3);
+ alc_process_coef_fw(codec, coef0233);
break;
case 0x10ec0292:
- alc_write_coef_idx(codec, 0x76, 0x000e);
- alc_write_coef_idx(codec, 0x6c, 0x2400);
- alc_write_coef_idx(codec, 0x18, 0x7308);
- alc_write_coef_idx(codec, 0x6b, 0xc429);
+ alc_process_coef_fw(codec, coef0292);
break;
case 0x10ec0293:
- /* SET Line1 JD to 0 */
- val = alc_read_coef_idx(codec, 0x10);
- alc_write_coef_idx(codec, 0x10, (val & ~(7<<8)) | 6<<8);
- /* SET charge pump by verb */
- val = alc_read_coefex_idx(codec, 0x57, 0x05);
- alc_write_coefex_idx(codec, 0x57, 0x05, (val & ~(1<<15|1<<13)) | 0x0);
- /* SET EN_OSW to 1 */
- val = alc_read_coefex_idx(codec, 0x57, 0x03);
- alc_write_coefex_idx(codec, 0x57, 0x03, (val & ~(1<<10)) | (1<<10) );
- /* Combo JD gating with LINE1-VREFO */
- val = alc_read_coef_idx(codec, 0x1a);
- alc_write_coef_idx(codec, 0x1a, (val & ~(1<<3)) | (1<<3));
- /* Set to TRS type */
- alc_write_coef_idx(codec, 0x45, 0xc429);
- /* Combo Jack auto detect */
- val = alc_read_coef_idx(codec, 0x4a);
- alc_write_coef_idx(codec, 0x4a, (val & 0xfff0) | 0x000e);
+ alc_process_coef_fw(codec, coef0293);
break;
case 0x10ec0668:
- alc_write_coef_idx(codec, 0x15, 0x0d40);
- alc_write_coef_idx(codec, 0xb7, 0x802b);
+ alc_process_coef_fw(codec, coef0668);
break;
}
codec_dbg(codec, "Headset jack set to unplugged mode.\n");
@@ -3686,55 +3429,65 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
hda_nid_t mic_pin)
{
- int val;
+ static struct coef_fw coef0255[] = {
+ WRITE_COEFEX(0x57, 0x03, 0x8aa6),
+ WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */
+ {}
+ };
+ static struct coef_fw coef0233[] = {
+ UPDATE_COEF(0x35, 0, 1<<14),
+ WRITE_COEF(0x06, 0x2100),
+ WRITE_COEF(0x1a, 0x0021),
+ WRITE_COEF(0x26, 0x008c),
+ {}
+ };
+ static struct coef_fw coef0292[] = {
+ WRITE_COEF(0x19, 0xa208),
+ WRITE_COEF(0x2e, 0xacf0),
+ {}
+ };
+ static struct coef_fw coef0293[] = {
+ UPDATE_COEFEX(0x57, 0x05, 0, 1<<15|1<<13), /* SET charge pump by verb */
+ UPDATE_COEFEX(0x57, 0x03, 1<<10, 0), /* SET EN_OSW to 0 */
+ UPDATE_COEF(0x1a, 1<<3, 0), /* Combo JD gating without LINE1-VREFO */
+ {}
+ };
+ static struct coef_fw coef0688[] = {
+ WRITE_COEF(0xb7, 0x802b),
+ WRITE_COEF(0xb5, 0x1040),
+ UPDATE_COEF(0xc3, 0, 1<<12),
+ {}
+ };
switch (codec->vendor_id) {
case 0x10ec0255:
alc_write_coef_idx(codec, 0x45, 0xc489);
snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
- alc_write_coefex_idx(codec, 0x57, 0x03, 0x8aa6);
- /* Set MIC2 Vref gate to normal */
- alc_write_coef_idx(codec, 0x06, 0x6100);
+ alc_process_coef_fw(codec, coef0255);
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
break;
case 0x10ec0233:
case 0x10ec0283:
alc_write_coef_idx(codec, 0x45, 0xc429);
snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
- val = alc_read_coef_idx(codec, 0x35);
- alc_write_coef_idx(codec, 0x35, val | 1<<14);
- alc_write_coef_idx(codec, 0x06, 0x2100);
- alc_write_coef_idx(codec, 0x1a, 0x0021);
- alc_write_coef_idx(codec, 0x26, 0x008c);
+ alc_process_coef_fw(codec, coef0233);
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
break;
case 0x10ec0292:
snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
- alc_write_coef_idx(codec, 0x19, 0xa208);
- alc_write_coef_idx(codec, 0x2e, 0xacf0);
+ alc_process_coef_fw(codec, coef0292);
break;
case 0x10ec0293:
/* Set to TRS mode */
alc_write_coef_idx(codec, 0x45, 0xc429);
snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
- /* SET charge pump by verb */
- val = alc_read_coefex_idx(codec, 0x57, 0x05);
- alc_write_coefex_idx(codec, 0x57, 0x05, (val & ~(1<<15|1<<13)) | (1<<15|1<<13));
- /* SET EN_OSW to 0 */
- val = alc_read_coefex_idx(codec, 0x57, 0x03);
- alc_write_coefex_idx(codec, 0x57, 0x03, (val & ~(1<<10)) | 0x0);
- /* Combo JD gating without LINE1-VREFO */
- val = alc_read_coef_idx(codec, 0x1a);
- alc_write_coef_idx(codec, 0x1a, (val & ~(1<<3)) | 0x0);
+ alc_process_coef_fw(codec, coef0293);
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
break;
case 0x10ec0668:
alc_write_coef_idx(codec, 0x11, 0x0001);
snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
- alc_write_coef_idx(codec, 0xb7, 0x802b);
- alc_write_coef_idx(codec, 0xb5, 0x1040);
- val = alc_read_coef_idx(codec, 0xc3);
- alc_write_coef_idx(codec, 0xc3, val | 1<<12);
+ alc_process_coef_fw(codec, coef0688);
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
break;
}
@@ -3743,40 +3496,54 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
static void alc_headset_mode_default(struct hda_codec *codec)
{
- int val;
+ static struct coef_fw coef0255[] = {
+ WRITE_COEF(0x45, 0xc089),
+ WRITE_COEF(0x45, 0xc489),
+ WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+ WRITE_COEF(0x49, 0x0049),
+ {}
+ };
+ static struct coef_fw coef0233[] = {
+ WRITE_COEF(0x06, 0x2100),
+ WRITE_COEF(0x32, 0x4ea3),
+ {}
+ };
+ static struct coef_fw coef0292[] = {
+ WRITE_COEF(0x76, 0x000e),
+ WRITE_COEF(0x6c, 0x2400),
+ WRITE_COEF(0x6b, 0xc429),
+ WRITE_COEF(0x18, 0x7308),
+ {}
+ };
+ static struct coef_fw coef0293[] = {
+ UPDATE_COEF(0x4a, 0x000f, 0x000e), /* Combo Jack auto detect */
+ WRITE_COEF(0x45, 0xC429), /* Set to TRS type */
+ UPDATE_COEF(0x1a, 1<<3, 0), /* Combo JD gating without LINE1-VREFO */
+ {}
+ };
+ static struct coef_fw coef0688[] = {
+ WRITE_COEF(0x11, 0x0041),
+ WRITE_COEF(0x15, 0x0d40),
+ WRITE_COEF(0xb7, 0x802b),
+ {}
+ };
switch (codec->vendor_id) {
case 0x10ec0255:
- alc_write_coef_idx(codec, 0x45, 0xc089);
- alc_write_coef_idx(codec, 0x45, 0xc489);
- alc_write_coefex_idx(codec, 0x57, 0x03, 0x8ea6);
- alc_write_coef_idx(codec, 0x49, 0x0049);
+ alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0233:
case 0x10ec0283:
- alc_write_coef_idx(codec, 0x06, 0x2100);
- alc_write_coef_idx(codec, 0x32, 0x4ea3);
+ alc_process_coef_fw(codec, coef0233);
break;
case 0x10ec0292:
- alc_write_coef_idx(codec, 0x76, 0x000e);
- alc_write_coef_idx(codec, 0x6c, 0x2400);
- alc_write_coef_idx(codec, 0x6b, 0xc429);
- alc_write_coef_idx(codec, 0x18, 0x7308);
+ alc_process_coef_fw(codec, coef0292);
break;
case 0x10ec0293:
- /* Combo Jack auto detect */
- val = alc_read_coef_idx(codec, 0x4a);
- alc_write_coef_idx(codec, 0x4a, (val & 0xfff0) | 0x000e);
- /* Set to TRS type */
- alc_write_coef_idx(codec, 0x45, 0xC429);
- /* Combo JD gating without LINE1-VREFO */
- val = alc_read_coef_idx(codec, 0x1a);
- alc_write_coef_idx(codec, 0x1a, (val & ~(1<<3)) | 0x0);
+ alc_process_coef_fw(codec, coef0293);
break;
case 0x10ec0668:
- alc_write_coef_idx(codec, 0x11, 0x0041);
- alc_write_coef_idx(codec, 0x15, 0x0d40);
- alc_write_coef_idx(codec, 0xb7, 0x802b);
+ alc_process_coef_fw(codec, coef0688);
break;
}
codec_dbg(codec, "Headset jack set to headphone (default) mode.\n");
@@ -3785,37 +3552,52 @@ static void alc_headset_mode_default(struct hda_codec *codec)
/* Iphone type */
static void alc_headset_mode_ctia(struct hda_codec *codec)
{
- int val;
+ static struct coef_fw coef0255[] = {
+ WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
+ WRITE_COEF(0x1b, 0x0c2b),
+ WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+ {}
+ };
+ static struct coef_fw coef0233[] = {
+ WRITE_COEF(0x45, 0xd429),
+ WRITE_COEF(0x1b, 0x0c2b),
+ WRITE_COEF(0x32, 0x4ea3),
+ {}
+ };
+ static struct coef_fw coef0292[] = {
+ WRITE_COEF(0x6b, 0xd429),
+ WRITE_COEF(0x76, 0x0008),
+ WRITE_COEF(0x18, 0x7388),
+ {}
+ };
+ static struct coef_fw coef0293[] = {
+ WRITE_COEF(0x45, 0xd429), /* Set to ctia type */
+ UPDATE_COEF(0x10, 7<<8, 7<<8), /* SET Line1 JD to 1 */
+ {}
+ };
+ static struct coef_fw coef0688[] = {
+ WRITE_COEF(0x11, 0x0001),
+ WRITE_COEF(0x15, 0x0d60),
+ WRITE_COEF(0xc3, 0x0000),
+ {}
+ };
switch (codec->vendor_id) {
case 0x10ec0255:
- /* Set to CTIA type */
- alc_write_coef_idx(codec, 0x45, 0xd489);
- alc_write_coef_idx(codec, 0x1b, 0x0c2b);
- alc_write_coefex_idx(codec, 0x57, 0x03, 0x8ea6);
+ alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0233:
case 0x10ec0283:
- alc_write_coef_idx(codec, 0x45, 0xd429);
- alc_write_coef_idx(codec, 0x1b, 0x0c2b);
- alc_write_coef_idx(codec, 0x32, 0x4ea3);
+ alc_process_coef_fw(codec, coef0233);
break;
case 0x10ec0292:
- alc_write_coef_idx(codec, 0x6b, 0xd429);
- alc_write_coef_idx(codec, 0x76, 0x0008);
- alc_write_coef_idx(codec, 0x18, 0x7388);
+ alc_process_coef_fw(codec, coef0292);
break;
case 0x10ec0293:
- /* Set to ctia type */
- alc_write_coef_idx(codec, 0x45, 0xd429);
- /* SET Line1 JD to 1 */
- val = alc_read_coef_idx(codec, 0x10);
- alc_write_coef_idx(codec, 0x10, (val & ~(7<<8)) | 7<<8);
+ alc_process_coef_fw(codec, coef0293);
break;
case 0x10ec0668:
- alc_write_coef_idx(codec, 0x11, 0x0001);
- alc_write_coef_idx(codec, 0x15, 0x0d60);
- alc_write_coef_idx(codec, 0xc3, 0x0000);
+ alc_process_coef_fw(codec, coef0688);
break;
}
codec_dbg(codec, "Headset jack set to iPhone-style headset mode.\n");
@@ -3824,37 +3606,52 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
/* Nokia type */
static void alc_headset_mode_omtp(struct hda_codec *codec)
{
- int val;
+ static struct coef_fw coef0255[] = {
+ WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
+ WRITE_COEF(0x1b, 0x0c2b),
+ WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+ {}
+ };
+ static struct coef_fw coef0233[] = {
+ WRITE_COEF(0x45, 0xe429),
+ WRITE_COEF(0x1b, 0x0c2b),
+ WRITE_COEF(0x32, 0x4ea3),
+ {}
+ };
+ static struct coef_fw coef0292[] = {
+ WRITE_COEF(0x6b, 0xe429),
+ WRITE_COEF(0x76, 0x0008),
+ WRITE_COEF(0x18, 0x7388),
+ {}
+ };
+ static struct coef_fw coef0293[] = {
+ WRITE_COEF(0x45, 0xe429), /* Set to omtp type */
+ UPDATE_COEF(0x10, 7<<8, 7<<8), /* SET Line1 JD to 1 */
+ {}
+ };
+ static struct coef_fw coef0688[] = {
+ WRITE_COEF(0x11, 0x0001),
+ WRITE_COEF(0x15, 0x0d50),
+ WRITE_COEF(0xc3, 0x0000),
+ {}
+ };
switch (codec->vendor_id) {
case 0x10ec0255:
- /* Set to OMTP Type */
- alc_write_coef_idx(codec, 0x45, 0xe489);
- alc_write_coef_idx(codec, 0x1b, 0x0c2b);
- alc_write_coefex_idx(codec, 0x57, 0x03, 0x8ea6);
+ alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0233:
case 0x10ec0283:
- alc_write_coef_idx(codec, 0x45, 0xe429);
- alc_write_coef_idx(codec, 0x1b, 0x0c2b);
- alc_write_coef_idx(codec, 0x32, 0x4ea3);
+ alc_process_coef_fw(codec, coef0233);
break;
case 0x10ec0292:
- alc_write_coef_idx(codec, 0x6b, 0xe429);
- alc_write_coef_idx(codec, 0x76, 0x0008);
- alc_write_coef_idx(codec, 0x18, 0x7388);
+ alc_process_coef_fw(codec, coef0292);
break;
case 0x10ec0293:
- /* Set to omtp type */
- alc_write_coef_idx(codec, 0x45, 0xe429);
- /* SET Line1 JD to 1 */
- val = alc_read_coef_idx(codec, 0x10);
- alc_write_coef_idx(codec, 0x10, (val & ~(7<<8)) | 7<<8);
+ alc_process_coef_fw(codec, coef0293);
break;
case 0x10ec0668:
- alc_write_coef_idx(codec, 0x11, 0x0001);
- alc_write_coef_idx(codec, 0x15, 0x0d50);
- alc_write_coef_idx(codec, 0xc3, 0x0000);
+ alc_process_coef_fw(codec, coef0688);
break;
}
codec_dbg(codec, "Headset jack set to Nokia-style headset mode.\n");
@@ -3865,13 +3662,28 @@ static void alc_determine_headset_type(struct hda_codec *codec)
int val;
bool is_ctia = false;
struct alc_spec *spec = codec->spec;
+ static struct coef_fw coef0255[] = {
+ WRITE_COEF(0x45, 0xd089), /* combo jack auto switch control(Check type)*/
+ WRITE_COEF(0x49, 0x0149), /* combo jack auto switch control(Vref
+ conteol) */
+ {}
+ };
+ static struct coef_fw coef0293[] = {
+ UPDATE_COEF(0x4a, 0x000f, 0x0008), /* Combo Jack auto detect */
+ WRITE_COEF(0x45, 0xD429), /* Set to ctia type */
+ {}
+ };
+ static struct coef_fw coef0688[] = {
+ WRITE_COEF(0x11, 0x0001),
+ WRITE_COEF(0xb7, 0x802b),
+ WRITE_COEF(0x15, 0x0d60),
+ WRITE_COEF(0xc3, 0x0c00),
+ {}
+ };
switch (codec->vendor_id) {
case 0x10ec0255:
- /* combo jack auto switch control(Check type)*/
- alc_write_coef_idx(codec, 0x45, 0xd089);
- /* combo jack auto switch control(Vref conteol) */
- alc_write_coef_idx(codec, 0x49, 0x0149);
+ alc_process_coef_fw(codec, coef0255);
msleep(300);
val = alc_read_coef_idx(codec, 0x46);
is_ctia = (val & 0x0070) == 0x0070;
@@ -3890,20 +3702,13 @@ static void alc_determine_headset_type(struct hda_codec *codec)
is_ctia = (val & 0x001c) == 0x001c;
break;
case 0x10ec0293:
- /* Combo Jack auto detect */
- val = alc_read_coef_idx(codec, 0x4a);
- alc_write_coef_idx(codec, 0x4a, (val & 0xfff0) | 0x0008);
- /* Set to ctia type */
- alc_write_coef_idx(codec, 0x45, 0xD429);
+ alc_process_coef_fw(codec, coef0293);
msleep(300);
val = alc_read_coef_idx(codec, 0x46);
is_ctia = (val & 0x0070) == 0x0070;
break;
case 0x10ec0668:
- alc_write_coef_idx(codec, 0x11, 0x0001);
- alc_write_coef_idx(codec, 0xb7, 0x802b);
- alc_write_coef_idx(codec, 0x15, 0x0d60);
- alc_write_coef_idx(codec, 0xc3, 0x0c00);
+ alc_process_coef_fw(codec, coef0688);
msleep(300);
val = alc_read_coef_idx(codec, 0xbe);
is_ctia = (val & 0x1c02) == 0x1c02;
@@ -3980,7 +3785,8 @@ static void alc_update_headset_mode_hook(struct hda_codec *codec,
alc_update_headset_mode(codec);
}
-static void alc_update_headset_jack_cb(struct hda_codec *codec, struct hda_jack_tbl *jack)
+static void alc_update_headset_jack_cb(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
{
struct alc_spec *spec = codec->spec;
spec->current_headset_type = ALC_HEADSET_TYPE_UNKNOWN;
@@ -4039,11 +3845,15 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
static void alc255_set_default_jack_type(struct hda_codec *codec)
{
/* Set to iphone type */
- alc_write_coef_idx(codec, 0x1b, 0x880b);
- alc_write_coef_idx(codec, 0x45, 0xd089);
- alc_write_coef_idx(codec, 0x1b, 0x080b);
- alc_write_coef_idx(codec, 0x46, 0x0004);
- alc_write_coef_idx(codec, 0x1b, 0x0c0b);
+ static struct coef_fw fw[] = {
+ WRITE_COEF(0x1b, 0x880b),
+ WRITE_COEF(0x45, 0xd089),
+ WRITE_COEF(0x1b, 0x080b),
+ WRITE_COEF(0x46, 0x0004),
+ WRITE_COEF(0x1b, 0x0c0b),
+ {}
+ };
+ alc_process_coef_fw(codec, fw);
msleep(30);
}
@@ -4138,10 +3948,8 @@ static void alc_fixup_headset_mode_alc668(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- int val;
alc_write_coef_idx(codec, 0xc4, 0x8000);
- val = alc_read_coef_idx(codec, 0xc2);
- alc_write_coef_idx(codec, 0xc2, val & 0xfe);
+ alc_update_coef_idx(codec, 0xc2, ~0xfe, 0);
snd_hda_set_pin_ctl_cache(codec, 0x18, 0);
}
alc_fixup_headset_mode(codec, fix, action);
@@ -4218,7 +4026,7 @@ static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec,
}
static void alc283_hp_automute_hook(struct hda_codec *codec,
- struct hda_jack_tbl *jack)
+ struct hda_jack_callback *jack)
{
struct alc_spec *spec = codec->spec;
int vref;
@@ -4237,7 +4045,6 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
struct alc_spec *spec = codec->spec;
- int val;
switch (action) {
case HDA_FIXUP_ACT_PRE_PROBE:
@@ -4248,11 +4055,9 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
case HDA_FIXUP_ACT_INIT:
/* MIC2-VREF control */
/* Set to manual mode */
- val = alc_read_coef_idx(codec, 0x06);
- alc_write_coef_idx(codec, 0x06, val & ~0x000c);
+ alc_update_coef_idx(codec, 0x06, 0x000c, 0);
/* Enable Line1 input control by verb */
- val = alc_read_coef_idx(codec, 0x1a);
- alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
+ alc_update_coef_idx(codec, 0x1a, 0, 1 << 4);
break;
}
}
@@ -4261,7 +4066,6 @@ static void alc283_fixup_sense_combo_jack(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
struct alc_spec *spec = codec->spec;
- int val;
switch (action) {
case HDA_FIXUP_ACT_PRE_PROBE:
@@ -4270,8 +4074,7 @@ static void alc283_fixup_sense_combo_jack(struct hda_codec *codec,
case HDA_FIXUP_ACT_INIT:
/* MIC2-VREF control */
/* Set to manual mode */
- val = alc_read_coef_idx(codec, 0x06);
- alc_write_coef_idx(codec, 0x06, val & ~0x000c);
+ alc_update_coef_idx(codec, 0x06, 0x000c, 0);
break;
}
}
@@ -4309,7 +4112,6 @@ static void alc282_fixup_asus_tx300(struct hda_codec *codec,
spec->gen.auto_mute_via_amp = 1;
spec->gen.automute_hook = asus_tx300_automute;
snd_hda_jack_detect_enable_callback(codec, 0x1b,
- HDA_GEN_HP_EVENT,
snd_hda_gen_hp_automute);
break;
case HDA_FIXUP_ACT_BUILD:
@@ -4576,7 +4378,7 @@ static const struct hda_fixup alc269_fixups[] = {
},
[ALC269_FIXUP_INV_DMIC] = {
.type = HDA_FIXUP_FUNC,
- .v.func = alc_fixup_inv_dmic_0x12,
+ .v.func = alc_fixup_inv_dmic,
},
[ALC269_FIXUP_NO_SHUTUP] = {
.type = HDA_FIXUP_FUNC,
@@ -4887,122 +4689,45 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
- SND_PCI_QUIRK(0x1028, 0x05bd, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05be, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05c4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05c5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05c6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05c7, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05c8, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05c9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER),
- SND_PCI_QUIRK(0x1028, 0x05de, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05e0, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05ec, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05ed, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05ee, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05f3, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05f8, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05f9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x05fb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED),
- SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED),
SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
- SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x0668, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x0669, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x0684, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
/* ALC282 */
- SND_PCI_QUIRK(0x103c, 0x21f8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x220d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x220e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x220f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x2211, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x2212, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x2213, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x2234, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x2235, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2237, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2238, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2239, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x2246, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x2247, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x2248, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x2249, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x224a, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x224b, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x224c, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x224d, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x2266, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x2267, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x2269, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x226c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x226d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x226f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x227a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x227b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22a0, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22c0, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22c1, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22c2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22cd, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22ce, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22d0, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22da, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x8004, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
/* ALC290 */
SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x221c, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x221d, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x2220, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x2222, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x2223, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x2224, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2246, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2247, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
@@ -5017,8 +4742,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x2261, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x2262, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -5026,23 +4749,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2277, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2278, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x227d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x227e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x2280, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x2281, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x2289, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x228a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x228b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x228c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x228d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x228e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22c5, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22c6, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22c7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- SND_PCI_QUIRK(0x103c, 0x22c3, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2334, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -5070,6 +4783,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+ SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -5085,12 +4799,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
@@ -5173,28 +4888,58 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{}
};
+#define ALC255_STANDARD_PINS \
+ {0x18, 0x411111f0}, \
+ {0x19, 0x411111f0}, \
+ {0x1a, 0x411111f0}, \
+ {0x1b, 0x411111f0}, \
+ {0x1e, 0x411111f0}
+
+#define ALC282_STANDARD_PINS \
+ {0x14, 0x90170110}, \
+ {0x18, 0x411111f0}, \
+ {0x1a, 0x411111f0}, \
+ {0x1b, 0x411111f0}, \
+ {0x1e, 0x411111f0}
+
+#define ALC290_STANDARD_PINS \
+ {0x12, 0x99a30130}, \
+ {0x13, 0x40000000}, \
+ {0x16, 0x411111f0}, \
+ {0x17, 0x411111f0}, \
+ {0x19, 0x411111f0}, \
+ {0x1b, 0x411111f0}, \
+ {0x1e, 0x411111f0}
+
+#define ALC292_STANDARD_PINS \
+ {0x14, 0x90170110}, \
+ {0x15, 0x0221401f}, \
+ {0x1a, 0x411111f0}, \
+ {0x1b, 0x411111f0}, \
+ {0x1d, 0x40700001}, \
+ {0x1e, 0x411111f0}
+
static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
+ ALC255_STANDARD_PINS,
+ {0x12, 0x40300000},
+ {0x14, 0x90170110},
+ {0x17, 0x411111f0},
+ {0x1d, 0x40538029},
+ {0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC255_STANDARD_PINS,
{0x12, 0x90a60140},
{0x14, 0x90170110},
{0x17, 0x40000000},
- {0x18, 0x411111f0},
- {0x19, 0x411111f0},
- {0x1a, 0x411111f0},
- {0x1b, 0x411111f0},
{0x1d, 0x40700001},
- {0x1e, 0x411111f0},
{0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC255_STANDARD_PINS,
{0x12, 0x90a60160},
{0x14, 0x90170120},
{0x17, 0x40000000},
- {0x18, 0x411111f0},
- {0x19, 0x411111f0},
- {0x1a, 0x411111f0},
- {0x1b, 0x411111f0},
{0x1d, 0x40700001},
- {0x1e, 0x411111f0},
{0x21, 0x02211030}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60160},
@@ -5208,70 +4953,101 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x1e, 0x411111f0},
{0x21, 0x0321102f}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC255_STANDARD_PINS,
{0x12, 0x90a60160},
{0x14, 0x90170130},
{0x17, 0x40000000},
- {0x18, 0x411111f0},
- {0x19, 0x411111f0},
- {0x1a, 0x411111f0},
- {0x1b, 0x411111f0},
{0x1d, 0x40700001},
- {0x1e, 0x411111f0},
{0x21, 0x02211040}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC255_STANDARD_PINS,
{0x12, 0x90a60160},
{0x14, 0x90170140},
{0x17, 0x40000000},
- {0x18, 0x411111f0},
- {0x19, 0x411111f0},
- {0x1a, 0x411111f0},
- {0x1b, 0x411111f0},
{0x1d, 0x40700001},
- {0x1e, 0x411111f0},
{0x21, 0x02211050}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC255_STANDARD_PINS,
{0x12, 0x90a60170},
{0x14, 0x90170120},
{0x17, 0x40000000},
- {0x18, 0x411111f0},
- {0x19, 0x411111f0},
- {0x1a, 0x411111f0},
- {0x1b, 0x411111f0},
{0x1d, 0x40700001},
- {0x1e, 0x411111f0},
{0x21, 0x02211030}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC255_STANDARD_PINS,
{0x12, 0x90a60170},
{0x14, 0x90170130},
{0x17, 0x40000000},
- {0x18, 0x411111f0},
- {0x19, 0x411111f0},
- {0x1a, 0x411111f0},
- {0x1b, 0x411111f0},
{0x1d, 0x40700001},
- {0x1e, 0x411111f0},
{0x21, 0x02211040}),
+ SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED,
+ {0x12, 0x90a60140},
+ {0x13, 0x40000000},
+ {0x14, 0x90170110},
+ {0x15, 0x0421101f},
+ {0x16, 0x411111f0},
+ {0x17, 0x411111f0},
+ {0x18, 0x02811030},
+ {0x19, 0x411111f0},
+ {0x1a, 0x04a1103f},
+ {0x1b, 0x02011020},
+ {0x1d, 0x40700001},
+ {0x1e, 0x411111f0}),
SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP 15 Touchsmart", ALC269_FIXUP_HP_MUTE_LED_MIC1,
+ ALC282_STANDARD_PINS,
{0x12, 0x99a30130},
- {0x14, 0x90170110},
{0x17, 0x40000000},
- {0x18, 0x411111f0},
{0x19, 0x03a11020},
- {0x1a, 0x411111f0},
- {0x1b, 0x411111f0},
{0x1d, 0x40f41905},
- {0x1e, 0x411111f0},
{0x21, 0x0321101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
+ ALC282_STANDARD_PINS,
+ {0x12, 0x99a30130},
+ {0x17, 0x40020008},
+ {0x19, 0x03a11020},
+ {0x1d, 0x40e00001},
+ {0x21, 0x03211040}),
+ SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
+ ALC282_STANDARD_PINS,
+ {0x12, 0x99a30130},
+ {0x17, 0x40000000},
+ {0x19, 0x03a11030},
+ {0x1d, 0x40e00001},
+ {0x21, 0x03211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
+ ALC282_STANDARD_PINS,
+ {0x12, 0x99a30130},
+ {0x17, 0x40000000},
+ {0x19, 0x03a11030},
+ {0x1d, 0x40f00001},
+ {0x21, 0x03211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
+ ALC282_STANDARD_PINS,
+ {0x12, 0x99a30130},
+ {0x17, 0x40000000},
+ {0x19, 0x04a11020},
+ {0x1d, 0x40f00001},
+ {0x21, 0x0421101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
+ ALC282_STANDARD_PINS,
+ {0x12, 0x99a30130},
+ {0x17, 0x40000000},
+ {0x19, 0x03a11030},
+ {0x1d, 0x40f00001},
+ {0x21, 0x04211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED,
+ ALC282_STANDARD_PINS,
+ {0x12, 0x90a60140},
+ {0x17, 0x40000000},
+ {0x19, 0x04a11030},
+ {0x1d, 0x40f00001},
+ {0x21, 0x04211020}),
SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC282_STANDARD_PINS,
{0x12, 0x90a60130},
- {0x14, 0x90170110},
{0x17, 0x40020008},
- {0x18, 0x411111f0},
{0x19, 0x411111f0},
- {0x1a, 0x411111f0},
- {0x1b, 0x411111f0},
{0x1d, 0x40e00001},
- {0x1e, 0x411111f0},
{0x21, 0x0321101f}),
SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60160},
@@ -5284,42 +5060,97 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x1d, 0x40700001},
{0x1e, 0x411111f0},
{0x21, 0x02211030}),
+ SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC282_STANDARD_PINS,
+ {0x12, 0x90a60130},
+ {0x17, 0x40020008},
+ {0x19, 0x03a11020},
+ {0x1d, 0x40e00001},
+ {0x21, 0x0321101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
+ ALC290_STANDARD_PINS,
+ {0x14, 0x411111f0},
+ {0x15, 0x04211040},
+ {0x18, 0x90170112},
+ {0x1a, 0x04a11020},
+ {0x1d, 0x4075812d}),
+ SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
+ ALC290_STANDARD_PINS,
+ {0x14, 0x411111f0},
+ {0x15, 0x04211040},
+ {0x18, 0x90170110},
+ {0x1a, 0x04a11020},
+ {0x1d, 0x4075812d}),
+ SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
+ ALC290_STANDARD_PINS,
+ {0x14, 0x411111f0},
+ {0x15, 0x0421101f},
+ {0x18, 0x411111f0},
+ {0x1a, 0x04a11020},
+ {0x1d, 0x4075812d}),
+ SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
+ ALC290_STANDARD_PINS,
+ {0x14, 0x411111f0},
+ {0x15, 0x04211020},
+ {0x18, 0x411111f0},
+ {0x1a, 0x04a11040},
+ {0x1d, 0x4076a12d}),
+ SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
+ ALC290_STANDARD_PINS,
+ {0x14, 0x90170110},
+ {0x15, 0x04211020},
+ {0x18, 0x411111f0},
+ {0x1a, 0x04a11040},
+ {0x1d, 0x4076a12d}),
+ SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
+ ALC290_STANDARD_PINS,
+ {0x14, 0x90170110},
+ {0x15, 0x04211020},
+ {0x18, 0x411111f0},
+ {0x1a, 0x04a11020},
+ {0x1d, 0x4076a12d}),
+ SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
+ ALC290_STANDARD_PINS,
+ {0x14, 0x90170110},
+ {0x15, 0x0421101f},
+ {0x18, 0x411111f0},
+ {0x1a, 0x04a11020},
+ {0x1d, 0x4075812d}),
+ SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
+ ALC292_STANDARD_PINS,
+ {0x12, 0x90a60140},
+ {0x13, 0x411111f0},
+ {0x16, 0x01014020},
+ {0x18, 0x411111f0},
+ {0x19, 0x01a19030}),
+ SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
+ ALC292_STANDARD_PINS,
+ {0x12, 0x90a60140},
+ {0x13, 0x411111f0},
+ {0x16, 0x01014020},
+ {0x18, 0x02a19031},
+ {0x19, 0x01a1903e}),
SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
+ ALC292_STANDARD_PINS,
{0x12, 0x90a60140},
{0x13, 0x411111f0},
- {0x14, 0x90170110},
- {0x15, 0x0221401f},
{0x16, 0x411111f0},
{0x18, 0x411111f0},
- {0x19, 0x411111f0},
- {0x1a, 0x411111f0},
- {0x1b, 0x411111f0},
- {0x1d, 0x40700001},
- {0x1e, 0x411111f0}),
+ {0x19, 0x411111f0}),
SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC292_STANDARD_PINS,
{0x12, 0x40000000},
{0x13, 0x90a60140},
- {0x14, 0x90170110},
- {0x15, 0x0221401f},
{0x16, 0x21014020},
{0x18, 0x411111f0},
- {0x19, 0x21a19030},
- {0x1a, 0x411111f0},
- {0x1b, 0x411111f0},
- {0x1d, 0x40700001},
- {0x1e, 0x411111f0}),
+ {0x19, 0x21a19030}),
SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC292_STANDARD_PINS,
{0x12, 0x40000000},
{0x13, 0x90a60140},
- {0x14, 0x90170110},
- {0x15, 0x0221401f},
{0x16, 0x411111f0},
{0x18, 0x411111f0},
- {0x19, 0x411111f0},
- {0x1a, 0x411111f0},
- {0x1b, 0x411111f0},
- {0x1d, 0x40700001},
- {0x1e, 0x411111f0}),
+ {0x19, 0x411111f0}),
{}
};
@@ -5342,10 +5173,8 @@ static void alc269_fill_coef(struct hda_codec *codec)
}
if ((alc_get_coef0(codec) & 0x00ff) == 0x017) {
- val = alc_read_coef_idx(codec, 0x04);
/* Power up output pin */
- if (val != -1)
- alc_write_coef_idx(codec, 0x04, val | (1<<11));
+ alc_update_coef_idx(codec, 0x04, 0, 1<<11);
}
if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
@@ -5361,13 +5190,11 @@ static void alc269_fill_coef(struct hda_codec *codec)
}
}
- val = alc_read_coef_idx(codec, 0xd); /* Class D */
- if (val != -1)
- alc_write_coef_idx(codec, 0xd, val | (1<<14));
+ /* Class D */
+ alc_update_coef_idx(codec, 0xd, 0, 1<<14);
- val = alc_read_coef_idx(codec, 0x4); /* HP */
- if (val != -1)
- alc_write_coef_idx(codec, 0x4, val | (1<<11));
+ /* HP */
+ alc_update_coef_idx(codec, 0x4, 0, 1<<11);
}
/*
@@ -6012,7 +5839,7 @@ static const struct hda_fixup alc662_fixups[] = {
},
[ALC662_FIXUP_INV_DMIC] = {
.type = HDA_FIXUP_FUNC,
- .v.func = alc_fixup_inv_dmic_0x12,
+ .v.func = alc_fixup_inv_dmic,
},
[ALC668_FIXUP_DELL_XPS13] = {
.type = HDA_FIXUP_FUNC,
@@ -6247,16 +6074,14 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
static void alc662_fill_coef(struct hda_codec *codec)
{
- int val, coef;
+ int coef;
coef = alc_get_coef0(codec);
switch (codec->vendor_id) {
case 0x10ec0662:
- if ((coef & 0x00f0) == 0x0030) {
- val = alc_read_coef_idx(codec, 0x4); /* EAPD Ctrl */
- alc_write_coef_idx(codec, 0x4, val & ~(1<<10));
- }
+ if ((coef & 0x00f0) == 0x0030)
+ alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */
break;
case 0x10ec0272:
case 0x10ec0273:
@@ -6265,8 +6090,7 @@ static void alc662_fill_coef(struct hda_codec *codec)
case 0x10ec0670:
case 0x10ec0671:
case 0x10ec0672:
- val = alc_read_coef_idx(codec, 0xd); /* EAPD Ctrl */
- alc_write_coef_idx(codec, 0xd, val | (1<<14));
+ alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
break;
}
}
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 98cd1908c039..4f6413e01c13 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -32,7 +32,6 @@
#include <linux/module.h>
#include <sound/core.h>
#include <sound/jack.h>
-#include <sound/tlv.h>
#include "hda_codec.h"
#include "hda_local.h"
#include "hda_auto_parser.h"
@@ -41,11 +40,6 @@
#include "hda_generic.h"
enum {
- STAC_VREF_EVENT = 8,
- STAC_PWR_EVENT,
-};
-
-enum {
STAC_REF,
STAC_9200_OQO,
STAC_9200_DELL_D21,
@@ -487,7 +481,7 @@ static void stac_toggle_power_map(struct hda_codec *codec, hda_nid_t nid,
/* update power bit per jack plug/unplug */
static void jack_update_power(struct hda_codec *codec,
- struct hda_jack_tbl *jack)
+ struct hda_jack_callback *jack)
{
struct sigmatel_spec *spec = codec->spec;
int i;
@@ -495,9 +489,9 @@ static void jack_update_power(struct hda_codec *codec,
if (!spec->num_pwrs)
return;
- if (jack && jack->nid) {
- stac_toggle_power_map(codec, jack->nid,
- snd_hda_jack_detect(codec, jack->nid),
+ if (jack && jack->tbl->nid) {
+ stac_toggle_power_map(codec, jack->tbl->nid,
+ snd_hda_jack_detect(codec, jack->tbl->nid),
true);
return;
}
@@ -505,42 +499,19 @@ static void jack_update_power(struct hda_codec *codec,
/* update all jacks */
for (i = 0; i < spec->num_pwrs; i++) {
hda_nid_t nid = spec->pwr_nids[i];
- jack = snd_hda_jack_tbl_get(codec, nid);
- if (!jack || !jack->action)
+ if (!snd_hda_jack_tbl_get(codec, nid))
continue;
- if (jack->action == STAC_PWR_EVENT ||
- jack->action <= HDA_GEN_LAST_EVENT)
- stac_toggle_power_map(codec, nid,
- snd_hda_jack_detect(codec, nid),
- false);
+ stac_toggle_power_map(codec, nid,
+ snd_hda_jack_detect(codec, nid),
+ false);
}
snd_hda_codec_write(codec, codec->afg, 0, AC_VERB_IDT_SET_POWER_MAP,
spec->power_map_bits);
}
-static void stac_hp_automute(struct hda_codec *codec,
- struct hda_jack_tbl *jack)
-{
- snd_hda_gen_hp_automute(codec, jack);
- jack_update_power(codec, jack);
-}
-
-static void stac_line_automute(struct hda_codec *codec,
- struct hda_jack_tbl *jack)
-{
- snd_hda_gen_line_automute(codec, jack);
- jack_update_power(codec, jack);
-}
-
-static void stac_mic_autoswitch(struct hda_codec *codec,
- struct hda_jack_tbl *jack)
-{
- snd_hda_gen_mic_autoswitch(codec, jack);
- jack_update_power(codec, jack);
-}
-
-static void stac_vref_event(struct hda_codec *codec, struct hda_jack_tbl *event)
+static void stac_vref_event(struct hda_codec *codec,
+ struct hda_jack_callback *event)
{
unsigned int data;
@@ -563,13 +534,10 @@ static void stac_init_power_map(struct hda_codec *codec)
hda_nid_t nid = spec->pwr_nids[i];
unsigned int def_conf = snd_hda_codec_get_pincfg(codec, nid);
def_conf = get_defcfg_connect(def_conf);
- if (snd_hda_jack_tbl_get(codec, nid))
- continue;
if (def_conf == AC_JACK_PORT_COMPLEX &&
spec->vref_mute_led_nid != nid &&
is_jack_detectable(codec, nid)) {
snd_hda_jack_detect_enable_callback(codec, nid,
- STAC_PWR_EVENT,
jack_update_power);
} else {
if (def_conf == AC_JACK_PORT_NONE)
@@ -3020,7 +2988,7 @@ static void stac92hd71bxx_fixup_hp_m4(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
struct sigmatel_spec *spec = codec->spec;
- struct hda_jack_tbl *jack;
+ struct hda_jack_callback *jack;
if (action != HDA_FIXUP_ACT_PRE_PROBE)
return;
@@ -3028,11 +2996,9 @@ static void stac92hd71bxx_fixup_hp_m4(struct hda_codec *codec,
/* Enable VREF power saving on GPIO1 detect */
snd_hda_codec_write_cache(codec, codec->afg, 0,
AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x02);
- snd_hda_jack_detect_enable_callback(codec, codec->afg,
- STAC_VREF_EVENT,
- stac_vref_event);
- jack = snd_hda_jack_tbl_get(codec, codec->afg);
- if (jack)
+ jack = snd_hda_jack_detect_enable_callback(codec, codec->afg,
+ stac_vref_event);
+ if (!IS_ERR(jack))
jack->private_data = 0x02;
spec->gpio_mask |= 0x02;
@@ -4044,7 +4010,7 @@ static void stac9205_fixup_dell_m43(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
struct sigmatel_spec *spec = codec->spec;
- struct hda_jack_tbl *jack;
+ struct hda_jack_callback *jack;
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
snd_hda_apply_pincfgs(codec, dell_9205_m43_pin_configs);
@@ -4052,11 +4018,9 @@ static void stac9205_fixup_dell_m43(struct hda_codec *codec,
/* Enable unsol response for GPIO4/Dock HP connection */
snd_hda_codec_write_cache(codec, codec->afg, 0,
AC_VERB_SET_GPIO_UNSOLICITED_RSP_MASK, 0x10);
- snd_hda_jack_detect_enable_callback(codec, codec->afg,
- STAC_VREF_EVENT,
- stac_vref_event);
- jack = snd_hda_jack_tbl_get(codec, codec->afg);
- if (jack)
+ jack = snd_hda_jack_detect_enable_callback(codec, codec->afg,
+ stac_vref_event);
+ if (!IS_ERR(jack))
jack->private_data = 0x01;
spec->gpio_dir = 0x0b;
@@ -4219,17 +4183,11 @@ static int stac_parse_auto_config(struct hda_codec *codec)
spec->gen.pcm_capture_hook = stac_capture_pcm_hook;
spec->gen.automute_hook = stac_update_outputs;
- spec->gen.hp_automute_hook = stac_hp_automute;
- spec->gen.line_automute_hook = stac_line_automute;
- spec->gen.mic_autoswitch_hook = stac_mic_autoswitch;
err = snd_hda_gen_parse_auto_config(codec, &spec->gen.autocfg);
if (err < 0)
return err;
- /* minimum value is actually mute */
- spec->gen.vmaster_tlv[3] |= TLV_DB_SCALE_MUTE;
-
/* setup analog beep controls */
if (spec->anabeep_nid > 0) {
err = stac_auto_create_beep_ctls(codec,
@@ -4276,16 +4234,8 @@ static int stac_parse_auto_config(struct hda_codec *codec)
return err;
}
- return 0;
-}
-
-static int stac_build_controls(struct hda_codec *codec)
-{
- int err = snd_hda_gen_build_controls(codec);
-
- if (err < 0)
- return err;
stac_init_power_map(codec);
+
return 0;
}
@@ -4399,7 +4349,7 @@ static int stac_suspend(struct hda_codec *codec)
#endif /* CONFIG_PM */
static const struct hda_codec_ops stac_patch_ops = {
- .build_controls = stac_build_controls,
+ .build_controls = snd_hda_gen_build_controls,
.build_pcms = snd_hda_gen_build_pcms,
.init = stac_init,
.free = stac_free,
@@ -4420,6 +4370,7 @@ static int alloc_stac_spec(struct hda_codec *codec)
snd_hda_gen_spec_init(&spec->gen);
codec->spec = spec;
codec->no_trigger_sense = 1; /* seems common with STAC/IDT codecs */
+ spec->gen.dac_min_mute = true;
return 0;
}
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 778166259b3e..6c206b6c8d65 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -118,7 +118,6 @@ static void via_playback_pcm_hook(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream,
int action);
-static void via_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *tbl);
static struct via_spec *via_new_spec(struct hda_codec *codec)
{
@@ -575,25 +574,12 @@ static const struct snd_kcontrol_new vt1708_jack_detect_ctl[] = {
{} /* terminator */
};
-static void via_hp_automute(struct hda_codec *codec, struct hda_jack_tbl *tbl)
+static void via_jack_powerstate_event(struct hda_codec *codec,
+ struct hda_jack_callback *tbl)
{
set_widgets_power_state(codec);
- snd_hda_gen_hp_automute(codec, tbl);
}
-static void via_line_automute(struct hda_codec *codec, struct hda_jack_tbl *tbl)
-{
- set_widgets_power_state(codec);
- snd_hda_gen_line_automute(codec, tbl);
-}
-
-static void via_jack_powerstate_event(struct hda_codec *codec, struct hda_jack_tbl *tbl)
-{
- set_widgets_power_state(codec);
-}
-
-#define VIA_JACK_EVENT (HDA_GEN_LAST_EVENT + 1)
-
static void via_set_jack_unsol_events(struct hda_codec *codec)
{
struct via_spec *spec = codec->spec;
@@ -601,25 +587,17 @@ static void via_set_jack_unsol_events(struct hda_codec *codec)
hda_nid_t pin;
int i;
- spec->gen.hp_automute_hook = via_hp_automute;
- if (cfg->speaker_pins[0])
- spec->gen.line_automute_hook = via_line_automute;
-
for (i = 0; i < cfg->line_outs; i++) {
pin = cfg->line_out_pins[i];
- if (pin && !snd_hda_jack_tbl_get(codec, pin) &&
- is_jack_detectable(codec, pin))
+ if (pin && is_jack_detectable(codec, pin))
snd_hda_jack_detect_enable_callback(codec, pin,
- VIA_JACK_EVENT,
via_jack_powerstate_event);
}
for (i = 0; i < cfg->num_inputs; i++) {
pin = cfg->line_out_pins[i];
- if (pin && !snd_hda_jack_tbl_get(codec, pin) &&
- is_jack_detectable(codec, pin))
+ if (pin && is_jack_detectable(codec, pin))
snd_hda_jack_detect_enable_callback(codec, pin,
- VIA_JACK_EVENT,
via_jack_powerstate_event);
}
}
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index 87f7fc41d4f2..206ed2cbcef9 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -2528,7 +2528,7 @@ static int snd_ice1712_free(struct snd_ice1712 *ice)
if (!ice->port)
goto __hw_end;
/* mask all interrupts */
- outb(0xc0, ICEMT(ice, IRQ));
+ outb(ICE1712_MULTI_CAPTURE | ICE1712_MULTI_PLAYBACK, ICEMT(ice, IRQ));
outb(0xff, ICEREG(ice, IRQMASK));
/* --- */
__hw_end:
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index a671f0865f71..601315a1f58f 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -279,7 +279,6 @@ static snd_pcm_uframes_t lx_pcm_stream_pointer(struct snd_pcm_substream
{
struct lx6464es *chip = snd_pcm_substream_chip(substream);
snd_pcm_uframes_t pos;
- unsigned long flags;
int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
struct lx_stream *lx_stream = is_capture ? &chip->capture_stream :
@@ -287,9 +286,9 @@ static snd_pcm_uframes_t lx_pcm_stream_pointer(struct snd_pcm_substream
dev_dbg(chip->card->dev, "->lx_pcm_stream_pointer\n");
- spin_lock_irqsave(&chip->lock, flags);
+ mutex_lock(&chip->lock);
pos = lx_stream->frame_pos * substream->runtime->period_size;
- spin_unlock_irqrestore(&chip->lock, flags);
+ mutex_unlock(&chip->lock);
dev_dbg(chip->card->dev, "stream_pointer at %ld\n", pos);
return pos;
@@ -485,8 +484,8 @@ static void lx_trigger_stop(struct lx6464es *chip, struct lx_stream *lx_stream)
}
-static void lx_trigger_tasklet_dispatch_stream(struct lx6464es *chip,
- struct lx_stream *lx_stream)
+static void lx_trigger_dispatch_stream(struct lx6464es *chip,
+ struct lx_stream *lx_stream)
{
switch (lx_stream->status) {
case LX_STREAM_STATUS_SCHEDULE_RUN:
@@ -502,24 +501,12 @@ static void lx_trigger_tasklet_dispatch_stream(struct lx6464es *chip,
}
}
-static void lx_trigger_tasklet(unsigned long data)
-{
- struct lx6464es *chip = (struct lx6464es *)data;
- unsigned long flags;
-
- dev_dbg(chip->card->dev, "->lx_trigger_tasklet\n");
-
- spin_lock_irqsave(&chip->lock, flags);
- lx_trigger_tasklet_dispatch_stream(chip, &chip->capture_stream);
- lx_trigger_tasklet_dispatch_stream(chip, &chip->playback_stream);
- spin_unlock_irqrestore(&chip->lock, flags);
-}
-
static int lx_pcm_trigger_dispatch(struct lx6464es *chip,
struct lx_stream *lx_stream, int cmd)
{
int err = 0;
+ mutex_lock(&chip->lock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
lx_stream->status = LX_STREAM_STATUS_SCHEDULE_RUN;
@@ -533,9 +520,12 @@ static int lx_pcm_trigger_dispatch(struct lx6464es *chip,
err = -EINVAL;
goto exit;
}
- tasklet_schedule(&chip->trigger_tasklet);
+
+ lx_trigger_dispatch_stream(chip, &chip->capture_stream);
+ lx_trigger_dispatch_stream(chip, &chip->playback_stream);
exit:
+ mutex_unlock(&chip->lock);
return err;
}
@@ -861,6 +851,7 @@ static int lx_pcm_create(struct lx6464es *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &lx_ops_capture);
pcm->info_flags = 0;
+ pcm->nonatomic = true;
strcpy(pcm->name, card_name);
err = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
@@ -1009,15 +1000,9 @@ static int snd_lx6464es_create(struct snd_card *card,
chip->irq = -1;
/* initialize synchronization structs */
- spin_lock_init(&chip->lock);
- spin_lock_init(&chip->msg_lock);
+ mutex_init(&chip->lock);
+ mutex_init(&chip->msg_lock);
mutex_init(&chip->setup_mutex);
- tasklet_init(&chip->trigger_tasklet, lx_trigger_tasklet,
- (unsigned long)chip);
- tasklet_init(&chip->tasklet_capture, lx_tasklet_capture,
- (unsigned long)chip);
- tasklet_init(&chip->tasklet_playback, lx_tasklet_playback,
- (unsigned long)chip);
/* request resources */
err = pci_request_regions(pci, card_name);
@@ -1032,8 +1017,8 @@ static int snd_lx6464es_create(struct snd_card *card,
/* dsp port */
chip->port_dsp_bar = pci_ioremap_bar(pci, 2);
- err = request_irq(pci->irq, lx_interrupt, IRQF_SHARED,
- KBUILD_MODNAME, chip);
+ err = request_threaded_irq(pci->irq, lx_interrupt, lx_threaded_irq,
+ IRQF_SHARED, KBUILD_MODNAME, chip);
if (err) {
dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
goto request_irq_failed;
diff --git a/sound/pci/lx6464es/lx6464es.h b/sound/pci/lx6464es/lx6464es.h
index 6792eda9c9a5..1bec187d772f 100644
--- a/sound/pci/lx6464es/lx6464es.h
+++ b/sound/pci/lx6464es/lx6464es.h
@@ -71,14 +71,10 @@ struct lx6464es {
u8 mac_address[6];
- spinlock_t lock; /* interrupt spinlock */
+ struct mutex lock; /* interrupt lock */
struct mutex setup_mutex; /* mutex used in hw_params, open
* and close */
- struct tasklet_struct trigger_tasklet; /* trigger tasklet */
- struct tasklet_struct tasklet_capture;
- struct tasklet_struct tasklet_playback;
-
/* ports */
unsigned long port_plx; /* io port (size=256) */
void __iomem *port_plx_remapped; /* remapped plx port */
@@ -87,8 +83,9 @@ struct lx6464es {
* size=8K) */
/* messaging */
- spinlock_t msg_lock; /* message spinlock */
+ struct mutex msg_lock; /* message lock */
struct lx_rmh rmh;
+ u32 irqsrc;
/* configuration */
uint freq_ratio : 2;
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index e8f38e5df10a..f3d62020ef66 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -332,27 +332,25 @@ polling_successful:
int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
{
u16 ret;
- unsigned long flags;
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
ret = lx_message_send_atomic(chip, &chip->rmh);
*rdsp_version = chip->rmh.stat[1];
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return ret;
}
int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq)
{
u16 ret = 0;
- unsigned long flags;
u32 freq_raw = 0;
u32 freq = 0;
u32 frequency = 0;
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
ret = lx_message_send_atomic(chip, &chip->rmh);
@@ -370,7 +368,7 @@ int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq)
frequency = 48000;
}
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
*rfreq = frequency * chip->freq_ratio;
@@ -398,25 +396,23 @@ int lx_dsp_get_mac(struct lx6464es *chip)
int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran)
{
- unsigned long flags;
int ret;
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_02_SET_GRANULARITY);
chip->rmh.cmd[0] |= gran;
ret = lx_message_send_atomic(chip, &chip->rmh);
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return ret;
}
int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data)
{
- unsigned long flags;
int ret;
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_04_GET_EVENT);
chip->rmh.stat_len = 9; /* we don't necessarily need the full length */
@@ -426,7 +422,7 @@ int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data)
if (!ret)
memcpy(data, chip->rmh.stat, chip->rmh.stat_len * sizeof(u32));
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return ret;
}
@@ -440,18 +436,16 @@ int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture,
int channels)
{
int err;
- unsigned long flags;
-
u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_06_ALLOCATE_PIPE);
chip->rmh.cmd[0] |= pipe_cmd;
chip->rmh.cmd[0] |= channels;
err = lx_message_send_atomic(chip, &chip->rmh);
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
if (err != 0)
dev_err(chip->card->dev, "could not allocate pipe\n");
@@ -462,17 +456,15 @@ int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture,
int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture)
{
int err;
- unsigned long flags;
-
u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_07_RELEASE_PIPE);
chip->rmh.cmd[0] |= pipe_cmd;
err = lx_message_send_atomic(chip, &chip->rmh);
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return err;
}
@@ -481,8 +473,6 @@ int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
u32 *r_needed, u32 *r_freed, u32 *size_array)
{
int err;
- unsigned long flags;
-
u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
#ifdef CONFIG_SND_DEBUG
@@ -493,7 +483,7 @@ int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
*r_needed = 0;
*r_freed = 0;
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_08_ASK_BUFFERS);
chip->rmh.cmd[0] |= pipe_cmd;
@@ -527,7 +517,7 @@ int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
}
}
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return err;
}
@@ -535,36 +525,32 @@ int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture)
{
int err;
- unsigned long flags;
-
u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_09_STOP_PIPE);
chip->rmh.cmd[0] |= pipe_cmd;
err = lx_message_send_atomic(chip, &chip->rmh);
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return err;
}
static int lx_pipe_toggle_state(struct lx6464es *chip, u32 pipe, int is_capture)
{
int err;
- unsigned long flags;
-
u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_0B_TOGGLE_PIPE_STATE);
chip->rmh.cmd[0] |= pipe_cmd;
err = lx_message_send_atomic(chip, &chip->rmh);
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return err;
}
@@ -600,11 +586,9 @@ int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture,
u64 *rsample_count)
{
int err;
- unsigned long flags;
-
u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
chip->rmh.cmd[0] |= pipe_cmd;
@@ -621,18 +605,16 @@ int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture,
+ chip->rmh.stat[1]; /* lo part */
}
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return err;
}
int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate)
{
int err;
- unsigned long flags;
-
u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
chip->rmh.cmd[0] |= pipe_cmd;
@@ -644,7 +626,7 @@ int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate)
else
*rstate = (chip->rmh.stat[0] >> PSTATE_OFFSET) & 0x0F;
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return err;
}
@@ -686,18 +668,16 @@ int lx_stream_set_state(struct lx6464es *chip, u32 pipe,
int is_capture, enum stream_state_t state)
{
int err;
- unsigned long flags;
-
u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_13_SET_STREAM_STATE);
chip->rmh.cmd[0] |= pipe_cmd;
chip->rmh.cmd[0] |= state;
err = lx_message_send_atomic(chip, &chip->rmh);
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return err;
}
@@ -706,17 +686,14 @@ int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime,
u32 pipe, int is_capture)
{
int err;
- unsigned long flags;
-
u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
-
u32 channels = runtime->channels;
if (runtime->channels != channels)
dev_err(chip->card->dev, "channel count mismatch: %d vs %d",
runtime->channels, channels);
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM);
chip->rmh.cmd[0] |= pipe_cmd;
@@ -732,7 +709,7 @@ int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime,
chip->rmh.cmd[0] |= channels-1;
err = lx_message_send_atomic(chip, &chip->rmh);
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return err;
}
@@ -741,11 +718,9 @@ int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture,
int *rstate)
{
int err;
- unsigned long flags;
-
u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
chip->rmh.cmd[0] |= pipe_cmd;
@@ -754,7 +729,7 @@ int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture,
*rstate = (chip->rmh.stat[0] & SF_START) ? START_STATE : PAUSE_STATE;
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return err;
}
@@ -762,11 +737,9 @@ int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture,
u64 *r_bytepos)
{
int err;
- unsigned long flags;
-
u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
chip->rmh.cmd[0] |= pipe_cmd;
@@ -777,7 +750,7 @@ int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture,
<< 32) /* hi part */
+ chip->rmh.stat[1]; /* lo part */
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return err;
}
@@ -787,11 +760,9 @@ int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture,
u32 *r_buffer_index)
{
int err;
- unsigned long flags;
-
u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_0F_UPDATE_BUFFER);
chip->rmh.cmd[0] |= pipe_cmd;
@@ -828,7 +799,7 @@ int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture,
"lx_buffer_give EB_CMD_REFUSED\n");
done:
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return err;
}
@@ -836,11 +807,9 @@ int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture,
u32 *r_buffer_size)
{
int err;
- unsigned long flags;
-
u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
chip->rmh.cmd[0] |= pipe_cmd;
@@ -852,7 +821,7 @@ int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture,
if (err == 0)
*r_buffer_size = chip->rmh.stat[0] & MASK_DATA_SIZE;
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return err;
}
@@ -860,11 +829,9 @@ int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture,
u32 buffer_index)
{
int err;
- unsigned long flags;
-
u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
chip->rmh.cmd[0] |= pipe_cmd;
@@ -872,7 +839,7 @@ int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture,
err = lx_message_send_atomic(chip, &chip->rmh);
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return err;
}
@@ -885,12 +852,10 @@ int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture,
int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute)
{
int err;
- unsigned long flags;
-
/* bit set to 1: channel muted */
u64 mute_mask = unmute ? 0 : 0xFFFFFFFFFFFFFFFFLLU;
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
lx_message_init(&chip->rmh, CMD_0D_SET_MUTE);
chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, 0);
@@ -904,7 +869,7 @@ int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute)
err = lx_message_send_atomic(chip, &chip->rmh);
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return err;
}
@@ -931,10 +896,9 @@ int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels,
u32 *r_levels)
{
int err = 0;
- unsigned long flags;
int i;
- spin_lock_irqsave(&chip->msg_lock, flags);
+ mutex_lock(&chip->msg_lock);
for (i = 0; i < channels; i += 4) {
u32 s0, s1, s2, s3;
@@ -959,7 +923,7 @@ int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels,
r_levels += 4;
}
- spin_unlock_irqrestore(&chip->msg_lock, flags);
+ mutex_unlock(&chip->msg_lock);
return err;
}
@@ -1075,7 +1039,6 @@ static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
struct snd_pcm_substream *substream = lx_stream->stream;
const unsigned int is_capture = lx_stream->is_capture;
int err;
- unsigned long flags;
const u32 channels = substream->runtime->channels;
const u32 bytes_per_frame = channels * 3;
@@ -1095,7 +1058,7 @@ static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
dev_dbg(chip->card->dev, "->lx_interrupt_request_new_buffer\n");
- spin_lock_irqsave(&chip->lock, flags);
+ mutex_lock(&chip->lock);
err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
dev_dbg(chip->card->dev,
@@ -1109,85 +1072,28 @@ static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
buffer_index, (unsigned long)buf, period_bytes);
lx_stream->frame_pos = next_pos;
- spin_unlock_irqrestore(&chip->lock, flags);
+ mutex_unlock(&chip->lock);
return err;
}
-void lx_tasklet_playback(unsigned long data)
-{
- struct lx6464es *chip = (struct lx6464es *)data;
- struct lx_stream *lx_stream = &chip->playback_stream;
- int err;
-
- dev_dbg(chip->card->dev, "->lx_tasklet_playback\n");
-
- err = lx_interrupt_request_new_buffer(chip, lx_stream);
- if (err < 0)
- dev_err(chip->card->dev,
- "cannot request new buffer for playback\n");
-
- snd_pcm_period_elapsed(lx_stream->stream);
-}
-
-void lx_tasklet_capture(unsigned long data)
-{
- struct lx6464es *chip = (struct lx6464es *)data;
- struct lx_stream *lx_stream = &chip->capture_stream;
- int err;
-
- dev_dbg(chip->card->dev, "->lx_tasklet_capture\n");
- err = lx_interrupt_request_new_buffer(chip, lx_stream);
- if (err < 0)
- dev_err(chip->card->dev,
- "cannot request new buffer for capture\n");
-
- snd_pcm_period_elapsed(lx_stream->stream);
-}
-
-
-
-static int lx_interrupt_handle_audio_transfer(struct lx6464es *chip,
- u64 notified_in_pipe_mask,
- u64 notified_out_pipe_mask)
-{
- int err = 0;
-
- if (notified_in_pipe_mask) {
- dev_dbg(chip->card->dev,
- "requesting audio transfer for capture\n");
- tasklet_hi_schedule(&chip->tasklet_capture);
- }
-
- if (notified_out_pipe_mask) {
- dev_dbg(chip->card->dev,
- "requesting audio transfer for playback\n");
- tasklet_hi_schedule(&chip->tasklet_playback);
- }
-
- return err;
-}
-
-
irqreturn_t lx_interrupt(int irq, void *dev_id)
{
struct lx6464es *chip = dev_id;
int async_pending, async_escmd;
u32 irqsrc;
-
- spin_lock(&chip->lock);
+ bool wake_thread = false;
dev_dbg(chip->card->dev,
"**************************************************\n");
if (!lx_interrupt_ack(chip, &irqsrc, &async_pending, &async_escmd)) {
- spin_unlock(&chip->lock);
dev_dbg(chip->card->dev, "IRQ_NONE\n");
return IRQ_NONE; /* this device did not cause the interrupt */
}
if (irqsrc & MASK_SYS_STATUS_CMD_DONE)
- goto exit;
+ return IRQ_HANDLED;
if (irqsrc & MASK_SYS_STATUS_EOBI)
dev_dbg(chip->card->dev, "interrupt: EOBI\n");
@@ -1202,27 +1108,8 @@ irqreturn_t lx_interrupt(int irq, void *dev_id)
dev_dbg(chip->card->dev, "interrupt: ORUN\n");
if (async_pending) {
- u64 notified_in_pipe_mask = 0;
- u64 notified_out_pipe_mask = 0;
- int freq_changed;
- int err;
-
- /* handle async events */
- err = lx_interrupt_handle_async_events(chip, irqsrc,
- &freq_changed,
- &notified_in_pipe_mask,
- &notified_out_pipe_mask);
- if (err)
- dev_err(chip->card->dev,
- "error handling async events\n");
-
- err = lx_interrupt_handle_audio_transfer(chip,
- notified_in_pipe_mask,
- notified_out_pipe_mask
- );
- if (err)
- dev_err(chip->card->dev,
- "error during audio transfer\n");
+ wake_thread = true;
+ chip->irqsrc = irqsrc;
}
if (async_escmd) {
@@ -1235,9 +1122,50 @@ irqreturn_t lx_interrupt(int irq, void *dev_id)
dev_dbg(chip->card->dev, "interrupt requests escmd handling\n");
}
-exit:
- spin_unlock(&chip->lock);
- return IRQ_HANDLED; /* this device caused the interrupt */
+ return wake_thread ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+}
+
+irqreturn_t lx_threaded_irq(int irq, void *dev_id)
+{
+ struct lx6464es *chip = dev_id;
+ u64 notified_in_pipe_mask = 0;
+ u64 notified_out_pipe_mask = 0;
+ int freq_changed;
+ int err;
+
+ /* handle async events */
+ err = lx_interrupt_handle_async_events(chip, chip->irqsrc,
+ &freq_changed,
+ &notified_in_pipe_mask,
+ &notified_out_pipe_mask);
+ if (err)
+ dev_err(chip->card->dev, "error handling async events\n");
+
+ if (notified_in_pipe_mask) {
+ struct lx_stream *lx_stream = &chip->capture_stream;
+
+ dev_dbg(chip->card->dev,
+ "requesting audio transfer for capture\n");
+ err = lx_interrupt_request_new_buffer(chip, lx_stream);
+ if (err < 0)
+ dev_err(chip->card->dev,
+ "cannot request new buffer for capture\n");
+ snd_pcm_period_elapsed(lx_stream->stream);
+ }
+
+ if (notified_out_pipe_mask) {
+ struct lx_stream *lx_stream = &chip->playback_stream;
+
+ dev_dbg(chip->card->dev,
+ "requesting audio transfer for playback\n");
+ err = lx_interrupt_request_new_buffer(chip, lx_stream);
+ if (err < 0)
+ dev_err(chip->card->dev,
+ "cannot request new buffer for playback\n");
+ snd_pcm_period_elapsed(lx_stream->stream);
+ }
+
+ return IRQ_HANDLED;
}
diff --git a/sound/pci/lx6464es/lx_core.h b/sound/pci/lx6464es/lx_core.h
index 5ec5e04da1a5..0cc140ca98e3 100644
--- a/sound/pci/lx6464es/lx_core.h
+++ b/sound/pci/lx6464es/lx_core.h
@@ -181,12 +181,10 @@ int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels,
/* interrupt handling */
irqreturn_t lx_interrupt(int irq, void *dev_id);
+irqreturn_t lx_threaded_irq(int irq, void *dev_id);
void lx_irq_enable(struct lx6464es *chip);
void lx_irq_disable(struct lx6464es *chip);
-void lx_tasklet_capture(unsigned long data);
-void lx_tasklet_playback(unsigned long data);
-
/* Stream Format Header Defines (for LIN and IEEE754) */
#define HEADER_FMT_BASE HEADER_FMT_BASE_LIN
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index 75fc342cff2a..1faf47e81570 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -986,6 +986,7 @@ static int snd_mixart_pcm_analog(struct snd_mixart *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_mixart_capture_ops);
pcm->info_flags = 0;
+ pcm->nonatomic = true;
strcpy(pcm->name, name);
preallocate_buffers(chip, pcm);
@@ -1018,6 +1019,7 @@ static int snd_mixart_pcm_digital(struct snd_mixart *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_mixart_capture_ops);
pcm->info_flags = 0;
+ pcm->nonatomic = true;
strcpy(pcm->name, name);
preallocate_buffers(chip, pcm);
@@ -1303,8 +1305,9 @@ static int snd_mixart_probe(struct pci_dev *pci,
}
}
- if (request_irq(pci->irq, snd_mixart_interrupt, IRQF_SHARED,
- KBUILD_MODNAME, mgr)) {
+ if (request_threaded_irq(pci->irq, snd_mixart_interrupt,
+ snd_mixart_threaded_irq, IRQF_SHARED,
+ KBUILD_MODNAME, mgr)) {
dev_err(&pci->dev, "unable to grab IRQ %d\n", pci->irq);
snd_mixart_free(mgr);
return -EBUSY;
@@ -1314,24 +1317,18 @@ static int snd_mixart_probe(struct pci_dev *pci,
sprintf(mgr->shortname, "Digigram miXart");
sprintf(mgr->longname, "%s at 0x%lx & 0x%lx, irq %i", mgr->shortname, mgr->mem[0].phys, mgr->mem[1].phys, mgr->irq);
- /* ISR spinlock */
- spin_lock_init(&mgr->lock);
-
/* init mailbox */
mgr->msg_fifo_readptr = 0;
mgr->msg_fifo_writeptr = 0;
- spin_lock_init(&mgr->msg_lock);
- mutex_init(&mgr->msg_mutex);
+ mutex_init(&mgr->lock);
+ mutex_init(&mgr->msg_lock);
init_waitqueue_head(&mgr->msg_sleep);
atomic_set(&mgr->msg_processed, 0);
/* init setup mutex*/
mutex_init(&mgr->setup_mutex);
- /* init message taslket */
- tasklet_init(&mgr->msg_taskq, snd_mixart_msg_tasklet, (unsigned long) mgr);
-
/* card assignment */
mgr->num_cards = MIXART_MAX_CARDS; /* 4 FIXME: configurable? */
for (i = 0; i < mgr->num_cards; i++) {
diff --git a/sound/pci/mixart/mixart.h b/sound/pci/mixart/mixart.h
index 561634d5c007..0cc17e0ea34a 100644
--- a/sound/pci/mixart/mixart.h
+++ b/sound/pci/mixart/mixart.h
@@ -78,22 +78,18 @@ struct mixart_mgr {
char shortname[32]; /* short name of this soundcard */
char longname[80]; /* name of this soundcard */
- /* message tasklet */
- struct tasklet_struct msg_taskq;
-
/* one and only blocking message or notification may be pending */
u32 pending_event;
wait_queue_head_t msg_sleep;
- /* messages stored for tasklet */
+ /* messages fifo */
u32 msg_fifo[MSG_FIFO_SIZE];
int msg_fifo_readptr;
int msg_fifo_writeptr;
atomic_t msg_processed; /* number of messages to be processed in takslet */
- spinlock_t lock; /* interrupt spinlock */
- spinlock_t msg_lock; /* mailbox spinlock */
- struct mutex msg_mutex; /* mutex for blocking_requests */
+ struct mutex lock; /* interrupt lock */
+ struct mutex msg_lock; /* mailbox lock */
struct mutex setup_mutex; /* mutex used in hw_params, open and close */
diff --git a/sound/pci/mixart/mixart_core.c b/sound/pci/mixart/mixart_core.c
index 84f67450924e..fe80313674d9 100644
--- a/sound/pci/mixart/mixart_core.c
+++ b/sound/pci/mixart/mixart_core.c
@@ -76,7 +76,6 @@ static int retrieve_msg_frame(struct mixart_mgr *mgr, u32 *msg_frame)
static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp,
u32 msg_frame_address )
{
- unsigned long flags;
u32 headptr;
u32 size;
int err;
@@ -84,7 +83,7 @@ static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp,
unsigned int i;
#endif
- spin_lock_irqsave(&mgr->msg_lock, flags);
+ mutex_lock(&mgr->msg_lock);
err = 0;
/* copy message descriptor from miXart to driver */
@@ -133,7 +132,7 @@ static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp,
writel_be(headptr, MIXART_MEM(mgr, MSG_OUTBOUND_FREE_HEAD));
_clean_exit:
- spin_unlock_irqrestore(&mgr->msg_lock, flags);
+ mutex_unlock(&mgr->msg_lock);
return err;
}
@@ -243,28 +242,24 @@ int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int
wait_queue_t wait;
long timeout;
- mutex_lock(&mgr->msg_mutex);
-
init_waitqueue_entry(&wait, current);
- spin_lock_irq(&mgr->msg_lock);
+ mutex_lock(&mgr->msg_lock);
/* send the message */
err = send_msg(mgr, request, max_resp_size, 1, &msg_frame); /* send and mark the answer pending */
if (err) {
- spin_unlock_irq(&mgr->msg_lock);
- mutex_unlock(&mgr->msg_mutex);
+ mutex_unlock(&mgr->msg_lock);
return err;
}
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&mgr->msg_sleep, &wait);
- spin_unlock_irq(&mgr->msg_lock);
+ mutex_unlock(&mgr->msg_lock);
timeout = schedule_timeout(MSG_TIMEOUT_JIFFIES);
remove_wait_queue(&mgr->msg_sleep, &wait);
if (! timeout) {
/* error - no ack */
- mutex_unlock(&mgr->msg_mutex);
dev_err(&mgr->pci->dev,
"error: no response on msg %x\n", msg_frame);
return -EIO;
@@ -281,7 +276,6 @@ int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int
if( request->message_id != resp.message_id )
dev_err(&mgr->pci->dev, "RESPONSE ERROR!\n");
- mutex_unlock(&mgr->msg_mutex);
return err;
}
@@ -300,34 +294,29 @@ int snd_mixart_send_msg_wait_notif(struct mixart_mgr *mgr,
if (snd_BUG_ON(notif_event & MSG_CANCEL_NOTIFY_MASK))
return -EINVAL;
- mutex_lock(&mgr->msg_mutex);
-
init_waitqueue_entry(&wait, current);
- spin_lock_irq(&mgr->msg_lock);
+ mutex_lock(&mgr->msg_lock);
/* send the message */
err = send_msg(mgr, request, MSG_DEFAULT_SIZE, 1, &notif_event); /* send and mark the notification event pending */
if(err) {
- spin_unlock_irq(&mgr->msg_lock);
- mutex_unlock(&mgr->msg_mutex);
+ mutex_unlock(&mgr->msg_lock);
return err;
}
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&mgr->msg_sleep, &wait);
- spin_unlock_irq(&mgr->msg_lock);
+ mutex_unlock(&mgr->msg_lock);
timeout = schedule_timeout(MSG_TIMEOUT_JIFFIES);
remove_wait_queue(&mgr->msg_sleep, &wait);
if (! timeout) {
/* error - no ack */
- mutex_unlock(&mgr->msg_mutex);
dev_err(&mgr->pci->dev,
"error: notification %x not received\n", notif_event);
return -EIO;
}
- mutex_unlock(&mgr->msg_mutex);
return 0;
}
@@ -335,13 +324,12 @@ int snd_mixart_send_msg_wait_notif(struct mixart_mgr *mgr,
int snd_mixart_send_msg_nonblock(struct mixart_mgr *mgr, struct mixart_msg *request)
{
u32 message_frame;
- unsigned long flags;
int err;
/* just send the message (do not mark it as a pending one) */
- spin_lock_irqsave(&mgr->msg_lock, flags);
+ mutex_lock(&mgr->msg_lock);
err = send_msg(mgr, request, MSG_DEFAULT_SIZE, 0, &message_frame);
- spin_unlock_irqrestore(&mgr->msg_lock, flags);
+ mutex_unlock(&mgr->msg_lock);
/* the answer will be handled by snd_struct mixart_msgasklet() */
atomic_inc(&mgr->msg_processed);
@@ -350,19 +338,16 @@ int snd_mixart_send_msg_nonblock(struct mixart_mgr *mgr, struct mixart_msg *requ
}
-/* common buffer of tasklet and interrupt to send/receive messages */
+/* common buffer of interrupt to send/receive messages */
static u32 mixart_msg_data[MSG_DEFAULT_SIZE / 4];
-void snd_mixart_msg_tasklet(unsigned long arg)
+static void snd_mixart_process_msg(struct mixart_mgr *mgr)
{
- struct mixart_mgr *mgr = ( struct mixart_mgr*)(arg);
struct mixart_msg resp;
u32 msg, addr, type;
int err;
- spin_lock(&mgr->lock);
-
while (mgr->msg_fifo_readptr != mgr->msg_fifo_writeptr) {
msg = mgr->msg_fifo[mgr->msg_fifo_readptr];
mgr->msg_fifo_readptr++;
@@ -381,7 +366,7 @@ void snd_mixart_msg_tasklet(unsigned long arg)
err = get_msg(mgr, &resp, addr);
if( err < 0 ) {
dev_err(&mgr->pci->dev,
- "tasklet: error(%d) reading mf %x\n",
+ "error(%d) reading mf %x\n",
err, msg);
break;
}
@@ -393,12 +378,12 @@ void snd_mixart_msg_tasklet(unsigned long arg)
case MSG_STREAM_STOP_OUTPUT_STAGE_PACKET:
if(mixart_msg_data[0])
dev_err(&mgr->pci->dev,
- "tasklet : error MSG_STREAM_ST***_***PUT_STAGE_PACKET status=%x\n",
+ "error MSG_STREAM_ST***_***PUT_STAGE_PACKET status=%x\n",
mixart_msg_data[0]);
break;
default:
dev_dbg(&mgr->pci->dev,
- "tasklet received mf(%x) : msg_id(%x) uid(%x, %x) size(%zd)\n",
+ "received mf(%x) : msg_id(%x) uid(%x, %x) size(%zd)\n",
msg, resp.message_id, resp.uid.object_id, resp.uid.desc, resp.size);
break;
}
@@ -409,7 +394,7 @@ void snd_mixart_msg_tasklet(unsigned long arg)
/* get_msg() necessary */
default:
dev_err(&mgr->pci->dev,
- "tasklet doesn't know what to do with message %x\n",
+ "doesn't know what to do with message %x\n",
msg);
} /* switch type */
@@ -417,26 +402,17 @@ void snd_mixart_msg_tasklet(unsigned long arg)
atomic_dec(&mgr->msg_processed);
} /* while there is a msg in fifo */
-
- spin_unlock(&mgr->lock);
}
irqreturn_t snd_mixart_interrupt(int irq, void *dev_id)
{
struct mixart_mgr *mgr = dev_id;
- int err;
- struct mixart_msg resp;
-
- u32 msg;
u32 it_reg;
- spin_lock(&mgr->lock);
-
it_reg = readl_le(MIXART_REG(mgr, MIXART_PCI_OMISR_OFFSET));
if( !(it_reg & MIXART_OIDI) ) {
/* this device did not cause the interrupt */
- spin_unlock(&mgr->lock);
return IRQ_NONE;
}
@@ -450,6 +426,17 @@ irqreturn_t snd_mixart_interrupt(int irq, void *dev_id)
/* clear interrupt */
writel_le( MIXART_OIDI, MIXART_REG(mgr, MIXART_PCI_OMISR_OFFSET) );
+ return IRQ_WAKE_THREAD;
+}
+
+irqreturn_t snd_mixart_threaded_irq(int irq, void *dev_id)
+{
+ struct mixart_mgr *mgr = dev_id;
+ int err;
+ struct mixart_msg resp;
+ u32 msg;
+
+ mutex_lock(&mgr->lock);
/* process interrupt */
while (retrieve_msg_frame(mgr, &msg)) {
@@ -518,9 +505,9 @@ irqreturn_t snd_mixart_interrupt(int irq, void *dev_id)
stream->buf_period_frag = (u32)( sample_count - stream->abs_period_elapsed );
if(elapsed) {
- spin_unlock(&mgr->lock);
+ mutex_unlock(&mgr->lock);
snd_pcm_period_elapsed(stream->substream);
- spin_lock(&mgr->lock);
+ mutex_lock(&mgr->lock);
}
}
}
@@ -556,7 +543,7 @@ irqreturn_t snd_mixart_interrupt(int irq, void *dev_id)
/* no break, continue ! */
case MSG_TYPE_ANSWER:
/* answer or notification to a message we are waiting for*/
- spin_lock(&mgr->msg_lock);
+ mutex_lock(&mgr->msg_lock);
if( (msg & ~MSG_TYPE_MASK) == mgr->pending_event ) {
wake_up(&mgr->msg_sleep);
mgr->pending_event = 0;
@@ -566,9 +553,9 @@ irqreturn_t snd_mixart_interrupt(int irq, void *dev_id)
mgr->msg_fifo[mgr->msg_fifo_writeptr] = msg;
mgr->msg_fifo_writeptr++;
mgr->msg_fifo_writeptr %= MSG_FIFO_SIZE;
- tasklet_schedule(&mgr->msg_taskq);
+ snd_mixart_process_msg(mgr);
}
- spin_unlock(&mgr->msg_lock);
+ mutex_unlock(&mgr->msg_lock);
break;
case MSG_TYPE_REQUEST:
default:
@@ -582,7 +569,7 @@ irqreturn_t snd_mixart_interrupt(int irq, void *dev_id)
/* allow interrupt again */
writel_le( MIXART_ALLOW_OUTBOUND_DOORBELL, MIXART_REG( mgr, MIXART_PCI_OMIMR_OFFSET));
- spin_unlock(&mgr->lock);
+ mutex_unlock(&mgr->lock);
return IRQ_HANDLED;
}
diff --git a/sound/pci/mixart/mixart_core.h b/sound/pci/mixart/mixart_core.h
index c919b734756f..d1722e575409 100644
--- a/sound/pci/mixart/mixart_core.h
+++ b/sound/pci/mixart/mixart_core.h
@@ -564,7 +564,7 @@ int snd_mixart_send_msg_wait_notif(struct mixart_mgr *mgr, struct mixart_msg *r
int snd_mixart_send_msg_nonblock(struct mixart_mgr *mgr, struct mixart_msg *request);
irqreturn_t snd_mixart_interrupt(int irq, void *dev_id);
-void snd_mixart_msg_tasklet(unsigned long arg);
+irqreturn_t snd_mixart_threaded_irq(int irq, void *dev_id);
void snd_mixart_reset_board(struct mixart_mgr *mgr);
diff --git a/sound/pci/oxygen/oxygen_pcm.c b/sound/pci/oxygen/oxygen_pcm.c
index cc0bcd9f3350..02828240ba15 100644
--- a/sound/pci/oxygen/oxygen_pcm.c
+++ b/sound/pci/oxygen/oxygen_pcm.c
@@ -29,6 +29,9 @@
/* the multichannel DMA channel has a 24-bit counter */
#define BUFFER_BYTES_MAX_MULTICH ((1 << 24) * 4)
+#define FIFO_BYTES 256
+#define FIFO_BYTES_MULTICH 1024
+
#define PERIOD_BYTES_MIN 64
#define DEFAULT_BUFFER_BYTES (BUFFER_BYTES_MAX / 2)
@@ -60,6 +63,7 @@ static const struct snd_pcm_hardware oxygen_stereo_hardware = {
.period_bytes_max = BUFFER_BYTES_MAX,
.periods_min = 1,
.periods_max = BUFFER_BYTES_MAX / PERIOD_BYTES_MIN,
+ .fifo_size = FIFO_BYTES,
};
static const struct snd_pcm_hardware oxygen_multichannel_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
@@ -87,6 +91,7 @@ static const struct snd_pcm_hardware oxygen_multichannel_hardware = {
.period_bytes_max = BUFFER_BYTES_MAX_MULTICH,
.periods_min = 1,
.periods_max = BUFFER_BYTES_MAX_MULTICH / PERIOD_BYTES_MIN,
+ .fifo_size = FIFO_BYTES_MULTICH,
};
static const struct snd_pcm_hardware oxygen_ac97_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
@@ -106,6 +111,7 @@ static const struct snd_pcm_hardware oxygen_ac97_hardware = {
.period_bytes_max = BUFFER_BYTES_MAX,
.periods_min = 1,
.periods_max = BUFFER_BYTES_MAX / PERIOD_BYTES_MIN,
+ .fifo_size = FIFO_BYTES,
};
static const struct snd_pcm_hardware *const oxygen_hardware[PCM_COUNT] = {
@@ -141,6 +147,10 @@ static int oxygen_open(struct snd_pcm_substream *substream,
runtime->hw.rates &= ~(SNDRV_PCM_RATE_32000 |
SNDRV_PCM_RATE_64000);
runtime->hw.rate_min = 44100;
+ /* fall through */
+ case PCM_A:
+ case PCM_B:
+ runtime->hw.fifo_size = 0;
break;
case PCM_MULTICH:
runtime->hw.channels_max = chip->model.dac_channels_pcm;
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c
index 7b317a28a19c..83de6fb01021 100644
--- a/sound/pci/oxygen/virtuoso.c
+++ b/sound/pci/oxygen/virtuoso.c
@@ -52,6 +52,7 @@ static const struct pci_device_id xonar_ids[] = {
{ OXYGEN_PCI_SUBID(0x1043, 0x835d) },
{ OXYGEN_PCI_SUBID(0x1043, 0x835e) },
{ OXYGEN_PCI_SUBID(0x1043, 0x838e) },
+ { OXYGEN_PCI_SUBID(0x1043, 0x8428) },
{ OXYGEN_PCI_SUBID(0x1043, 0x8522) },
{ OXYGEN_PCI_SUBID(0x1043, 0x85f4) },
{ OXYGEN_PCI_SUBID_BROKEN_EEPROM },
diff --git a/sound/pci/oxygen/xonar_pcm179x.c b/sound/pci/oxygen/xonar_pcm179x.c
index e02605931669..24109d37ca09 100644
--- a/sound/pci/oxygen/xonar_pcm179x.c
+++ b/sound/pci/oxygen/xonar_pcm179x.c
@@ -212,6 +212,9 @@
#define GPIO_ST_MAGIC 0x0040
#define GPIO_ST_HP 0x0080
+#define GPIO_XENSE_OUTPUT_ENABLE (0x0001 | 0x0010 | 0x0020)
+#define GPIO_XENSE_SPEAKERS 0x0080
+
#define I2C_DEVICE_PCM1796(i) (0x98 + ((i) << 1)) /* 10011, ii, /W=0 */
#define I2C_DEVICE_CS2000 0x9c /* 100111, 0, /W=0 */
@@ -419,6 +422,7 @@ static void xonar_st_init_common(struct oxygen *chip)
data->generic.output_enable_bit = GPIO_ST_OUTPUT_ENABLE;
data->dacs = chip->model.dac_channels_mixer / 2;
+ data->h6 = chip->model.dac_channels_mixer > 2;
data->hp_gain_offset = 2*-18;
pcm1796_init(chip);
@@ -499,6 +503,51 @@ static void xonar_stx_init(struct oxygen *chip)
xonar_st_init_common(chip);
}
+static void xonar_xense_init(struct oxygen *chip)
+{
+ struct xonar_pcm179x *data = chip->model_data;
+
+ data->generic.ext_power_reg = OXYGEN_GPI_DATA;
+ data->generic.ext_power_int_reg = OXYGEN_GPI_INTERRUPT_MASK;
+ data->generic.ext_power_bit = GPI_EXT_POWER;
+ xonar_init_ext_power(chip);
+
+ data->generic.anti_pop_delay = 100;
+ data->has_cs2000 = 1;
+ data->cs2000_regs[CS2000_FUN_CFG_1] = CS2000_REF_CLK_DIV_1;
+
+ oxygen_write16(chip, OXYGEN_I2S_A_FORMAT,
+ OXYGEN_RATE_48000 |
+ OXYGEN_I2S_FORMAT_I2S |
+ OXYGEN_I2S_MCLK(MCLK_512) |
+ OXYGEN_I2S_BITS_16 |
+ OXYGEN_I2S_MASTER |
+ OXYGEN_I2S_BCLK_64);
+
+ xonar_st_init_i2c(chip);
+ cs2000_registers_init(chip);
+
+ data->generic.output_enable_bit = GPIO_XENSE_OUTPUT_ENABLE;
+ data->dacs = 1;
+ data->hp_gain_offset = 2*-18;
+
+ pcm1796_init(chip);
+
+ oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
+ GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR |
+ GPIO_ST_MAGIC | GPIO_XENSE_SPEAKERS);
+ oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA,
+ GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR |
+ GPIO_XENSE_SPEAKERS);
+
+ xonar_init_cs53x1(chip);
+ xonar_enable_output(chip);
+
+ snd_component_add(chip->card, "PCM1796");
+ snd_component_add(chip->card, "CS5381");
+ snd_component_add(chip->card, "CS2000");
+}
+
static void xonar_d2_cleanup(struct oxygen *chip)
{
xonar_disable_output(chip);
@@ -795,11 +844,11 @@ static int st_output_switch_put(struct snd_kcontrol *ctl,
static int st_hp_volume_offset_info(struct snd_kcontrol *ctl,
struct snd_ctl_elem_info *info)
{
- static const char *const names[3] = {
- "< 64 ohms", "64-300 ohms", "300-600 ohms"
+ static const char *const names[4] = {
+ "< 32 ohms", "32-64 ohms", "64-300 ohms", "300-600 ohms"
};
- return snd_ctl_enum_info(info, 1, 3, names);
+ return snd_ctl_enum_info(info, 1, 4, names);
}
static int st_hp_volume_offset_get(struct snd_kcontrol *ctl,
@@ -809,12 +858,14 @@ static int st_hp_volume_offset_get(struct snd_kcontrol *ctl,
struct xonar_pcm179x *data = chip->model_data;
mutex_lock(&chip->mutex);
- if (data->hp_gain_offset < 2*-6)
+ if (data->hp_gain_offset < 2*-12)
value->value.enumerated.item[0] = 0;
- else if (data->hp_gain_offset < 0)
+ else if (data->hp_gain_offset < 2*-6)
value->value.enumerated.item[0] = 1;
- else
+ else if (data->hp_gain_offset < 0)
value->value.enumerated.item[0] = 2;
+ else
+ value->value.enumerated.item[0] = 3;
mutex_unlock(&chip->mutex);
return 0;
}
@@ -823,13 +874,13 @@ static int st_hp_volume_offset_get(struct snd_kcontrol *ctl,
static int st_hp_volume_offset_put(struct snd_kcontrol *ctl,
struct snd_ctl_elem_value *value)
{
- static const s8 offsets[] = { 2*-18, 2*-6, 0 };
+ static const s8 offsets[] = { 2*-18, 2*-12, 2*-6, 0 };
struct oxygen *chip = ctl->private_data;
struct xonar_pcm179x *data = chip->model_data;
s8 offset;
int changed;
- if (value->value.enumerated.item[0] > 2)
+ if (value->value.enumerated.item[0] > 3)
return -EINVAL;
offset = offsets[value->value.enumerated.item[0]];
mutex_lock(&chip->mutex);
@@ -859,6 +910,67 @@ static const struct snd_kcontrol_new st_controls[] = {
},
};
+static int xense_output_switch_get(struct snd_kcontrol *ctl,
+ struct snd_ctl_elem_value *value)
+{
+ struct oxygen *chip = ctl->private_data;
+ u16 gpio;
+
+ gpio = oxygen_read16(chip, OXYGEN_GPIO_DATA);
+ if (gpio & GPIO_XENSE_SPEAKERS)
+ value->value.enumerated.item[0] = 0;
+ else if (!(gpio & GPIO_XENSE_SPEAKERS) && (gpio & GPIO_ST_HP_REAR))
+ value->value.enumerated.item[0] = 1;
+ else
+ value->value.enumerated.item[0] = 2;
+ return 0;
+}
+
+static int xense_output_switch_put(struct snd_kcontrol *ctl,
+ struct snd_ctl_elem_value *value)
+{
+ struct oxygen *chip = ctl->private_data;
+ struct xonar_pcm179x *data = chip->model_data;
+ u16 gpio_old, gpio;
+
+ mutex_lock(&chip->mutex);
+ gpio_old = oxygen_read16(chip, OXYGEN_GPIO_DATA);
+ gpio = gpio_old;
+ switch (value->value.enumerated.item[0]) {
+ case 0:
+ gpio |= GPIO_XENSE_SPEAKERS | GPIO_ST_HP_REAR;
+ break;
+ case 1:
+ gpio = (gpio | GPIO_ST_HP_REAR) & ~GPIO_XENSE_SPEAKERS;
+ break;
+ case 2:
+ gpio &= ~(GPIO_XENSE_SPEAKERS | GPIO_ST_HP_REAR);
+ break;
+ }
+ oxygen_write16(chip, OXYGEN_GPIO_DATA, gpio);
+ data->hp_active = !(gpio & GPIO_XENSE_SPEAKERS);
+ update_pcm1796_volume(chip);
+ mutex_unlock(&chip->mutex);
+ return gpio != gpio_old;
+}
+
+static const struct snd_kcontrol_new xense_controls[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Analog Output",
+ .info = st_output_switch_info,
+ .get = xense_output_switch_get,
+ .put = xense_output_switch_put,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Headphones Impedance Playback Enum",
+ .info = st_hp_volume_offset_info,
+ .get = st_hp_volume_offset_get,
+ .put = st_hp_volume_offset_put,
+ },
+};
+
static void xonar_line_mic_ac97_switch(struct oxygen *chip,
unsigned int reg, unsigned int mute)
{
@@ -946,6 +1058,23 @@ static int xonar_st_mixer_init(struct oxygen *chip)
return 0;
}
+static int xonar_xense_mixer_init(struct oxygen *chip)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(xense_controls); ++i) {
+ err = snd_ctl_add(chip->card,
+ snd_ctl_new1(&xense_controls[i], chip));
+ if (err < 0)
+ return err;
+ }
+ err = add_pcm1796_controls(chip);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
static void dump_pcm1796_registers(struct oxygen *chip,
struct snd_info_buffer *buffer)
{
@@ -1140,12 +1269,29 @@ int get_xonar_pcm179x_model(struct oxygen *chip,
break;
case 0x85f4:
chip->model = model_xonar_st;
- /* TODO: daughterboard support */
- chip->model.shortname = "Xonar STX II";
+ oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_DB_MASK);
+ switch (oxygen_read16(chip, OXYGEN_GPIO_DATA) & GPIO_DB_MASK) {
+ default:
+ chip->model.shortname = "Xonar STX II";
+ break;
+ case GPIO_DB_H6:
+ chip->model.shortname = "Xonar STX II+H6";
+ chip->model.dac_channels_pcm = 8;
+ chip->model.dac_channels_mixer = 8;
+ chip->model.dac_mclks = OXYGEN_MCLKS(256, 128, 128);
+ break;
+ }
chip->model.init = xonar_stx_init;
chip->model.resume = xonar_stx_resume;
chip->model.set_dac_params = set_pcm1796_params;
break;
+ case 0x8428:
+ chip->model = model_xonar_st;
+ chip->model.shortname = "Xonar Xense";
+ chip->model.chip = "AV100";
+ chip->model.init = xonar_xense_init;
+ chip->model.mixer_init = xonar_xense_mixer_init;
+ break;
default:
return -EINVAL;
}
diff --git a/sound/pci/pcxhr/pcxhr.c b/sound/pci/pcxhr/pcxhr.c
index 68a37a7906c1..b854fc5e01f5 100644
--- a/sound/pci/pcxhr/pcxhr.c
+++ b/sound/pci/pcxhr/pcxhr.c
@@ -702,13 +702,11 @@ static inline int pcxhr_stream_scheduled_get_pipe(struct pcxhr_stream *stream,
return 0;
}
-static void pcxhr_trigger_tasklet(unsigned long arg)
+static void pcxhr_start_linked_stream(struct pcxhr_mgr *mgr)
{
- unsigned long flags;
int i, j, err;
struct pcxhr_pipe *pipe;
struct snd_pcxhr *chip;
- struct pcxhr_mgr *mgr = (struct pcxhr_mgr*)(arg);
int capture_mask = 0;
int playback_mask = 0;
@@ -736,11 +734,11 @@ static void pcxhr_trigger_tasklet(unsigned long arg)
}
if (capture_mask == 0 && playback_mask == 0) {
mutex_unlock(&mgr->setup_mutex);
- dev_err(&mgr->pci->dev, "pcxhr_trigger_tasklet : no pipes\n");
+ dev_err(&mgr->pci->dev, "pcxhr_start_linked_stream : no pipes\n");
return;
}
- dev_dbg(&mgr->pci->dev, "pcxhr_trigger_tasklet : "
+ dev_dbg(&mgr->pci->dev, "pcxhr_start_linked_stream : "
"playback_mask=%x capture_mask=%x\n",
playback_mask, capture_mask);
@@ -748,7 +746,7 @@ static void pcxhr_trigger_tasklet(unsigned long arg)
err = pcxhr_set_pipe_state(mgr, playback_mask, capture_mask, 0);
if (err) {
mutex_unlock(&mgr->setup_mutex);
- dev_err(&mgr->pci->dev, "pcxhr_trigger_tasklet : "
+ dev_err(&mgr->pci->dev, "pcxhr_start_linked_stream : "
"error stop pipes (P%x C%x)\n",
playback_mask, capture_mask);
return;
@@ -793,7 +791,7 @@ static void pcxhr_trigger_tasklet(unsigned long arg)
err = pcxhr_set_pipe_state(mgr, playback_mask, capture_mask, 1);
if (err) {
mutex_unlock(&mgr->setup_mutex);
- dev_err(&mgr->pci->dev, "pcxhr_trigger_tasklet : "
+ dev_err(&mgr->pci->dev, "pcxhr_start_linked_stream : "
"error start pipes (P%x C%x)\n",
playback_mask, capture_mask);
return;
@@ -802,7 +800,7 @@ static void pcxhr_trigger_tasklet(unsigned long arg)
/* put the streams into the running state now
* (increment pointer by interrupt)
*/
- spin_lock_irqsave(&mgr->lock, flags);
+ mutex_lock(&mgr->lock);
for ( i =0; i < mgr->num_cards; i++) {
struct pcxhr_stream *stream;
chip = mgr->chip[i];
@@ -820,13 +818,13 @@ static void pcxhr_trigger_tasklet(unsigned long arg)
}
}
}
- spin_unlock_irqrestore(&mgr->lock, flags);
+ mutex_unlock(&mgr->lock);
mutex_unlock(&mgr->setup_mutex);
#ifdef CONFIG_SND_DEBUG_VERBOSE
do_gettimeofday(&my_tv2);
- dev_dbg(&mgr->pci->dev, "***TRIGGER TASKLET*** TIME = %ld (err = %x)\n",
+ dev_dbg(&mgr->pci->dev, "***TRIGGER START*** TIME = %ld (err = %x)\n",
(long)(my_tv2.tv_usec - my_tv1.tv_usec), err);
#endif
}
@@ -853,7 +851,7 @@ static int pcxhr_trigger(struct snd_pcm_substream *subs, int cmd)
PCXHR_STREAM_STATUS_SCHEDULE_RUN;
snd_pcm_trigger_done(s, subs);
}
- tasklet_schedule(&chip->mgr->trigger_taskq);
+ pcxhr_start_linked_stream(chip->mgr);
} else {
stream = subs->runtime->private_data;
snd_printdd("Only one Substream %c %d\n",
@@ -1127,20 +1125,19 @@ static int pcxhr_close(struct snd_pcm_substream *subs)
static snd_pcm_uframes_t pcxhr_stream_pointer(struct snd_pcm_substream *subs)
{
- unsigned long flags;
u_int32_t timer_period_frag;
int timer_buf_periods;
struct snd_pcxhr *chip = snd_pcm_substream_chip(subs);
struct snd_pcm_runtime *runtime = subs->runtime;
struct pcxhr_stream *stream = runtime->private_data;
- spin_lock_irqsave(&chip->mgr->lock, flags);
+ mutex_lock(&chip->mgr->lock);
/* get the period fragment and the nb of periods in the buffer */
timer_period_frag = stream->timer_period_frag;
timer_buf_periods = stream->timer_buf_periods;
- spin_unlock_irqrestore(&chip->mgr->lock, flags);
+ mutex_unlock(&chip->mgr->lock);
return (snd_pcm_uframes_t)((timer_buf_periods * runtime->period_size) +
timer_period_frag);
@@ -1181,6 +1178,7 @@ int pcxhr_create_pcm(struct snd_pcxhr *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcxhr_ops);
pcm->info_flags = 0;
+ pcm->nonatomic = true;
strcpy(pcm->name, name);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
@@ -1588,8 +1586,9 @@ static int pcxhr_probe(struct pci_dev *pci,
mgr->pci = pci;
mgr->irq = -1;
- if (request_irq(pci->irq, pcxhr_interrupt, IRQF_SHARED,
- KBUILD_MODNAME, mgr)) {
+ if (request_threaded_irq(pci->irq, pcxhr_interrupt,
+ pcxhr_threaded_irq, IRQF_SHARED,
+ KBUILD_MODNAME, mgr)) {
dev_err(&pci->dev, "unable to grab IRQ %d\n", pci->irq);
pcxhr_free(mgr);
return -EBUSY;
@@ -1601,19 +1600,13 @@ static int pcxhr_probe(struct pci_dev *pci,
mgr->shortname,
mgr->port[0], mgr->port[1], mgr->port[2], mgr->irq);
- /* ISR spinlock */
- spin_lock_init(&mgr->lock);
- spin_lock_init(&mgr->msg_lock);
+ /* ISR lock */
+ mutex_init(&mgr->lock);
+ mutex_init(&mgr->msg_lock);
/* init setup mutex*/
mutex_init(&mgr->setup_mutex);
- /* init taslket */
- tasklet_init(&mgr->msg_taskq, pcxhr_msg_tasklet,
- (unsigned long) mgr);
- tasklet_init(&mgr->trigger_taskq, pcxhr_trigger_tasklet,
- (unsigned long) mgr);
-
mgr->prmh = kmalloc(sizeof(*mgr->prmh) +
sizeof(u32) * (PCXHR_SIZE_MAX_LONG_STATUS -
PCXHR_SIZE_MAX_STATUS),
diff --git a/sound/pci/pcxhr/pcxhr.h b/sound/pci/pcxhr/pcxhr.h
index a4c602c45173..9e39e509a3ef 100644
--- a/sound/pci/pcxhr/pcxhr.h
+++ b/sound/pci/pcxhr/pcxhr.h
@@ -78,14 +78,10 @@ struct pcxhr_mgr {
char shortname[32]; /* short name of this soundcard */
char longname[96]; /* name of this soundcard */
- /* message tasklet */
- struct tasklet_struct msg_taskq;
struct pcxhr_rmh *prmh;
- /* trigger tasklet */
- struct tasklet_struct trigger_taskq;
- spinlock_t lock; /* interrupt spinlock */
- spinlock_t msg_lock; /* message spinlock */
+ struct mutex lock; /* interrupt lock */
+ struct mutex msg_lock; /* message lock */
struct mutex setup_mutex; /* mutex used in hw_params, open and close */
struct mutex mixer_mutex; /* mutex for mixer */
diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
index df9371918601..a584acb61c00 100644
--- a/sound/pci/pcxhr/pcxhr_core.c
+++ b/sound/pci/pcxhr/pcxhr_core.c
@@ -767,11 +767,11 @@ void pcxhr_set_pipe_cmd_params(struct pcxhr_rmh *rmh, int capture,
*/
int pcxhr_send_msg(struct pcxhr_mgr *mgr, struct pcxhr_rmh *rmh)
{
- unsigned long flags;
int err;
- spin_lock_irqsave(&mgr->msg_lock, flags);
+
+ mutex_lock(&mgr->msg_lock);
err = pcxhr_send_msg_nolock(mgr, rmh);
- spin_unlock_irqrestore(&mgr->msg_lock, flags);
+ mutex_unlock(&mgr->msg_lock);
return err;
}
@@ -971,17 +971,16 @@ int pcxhr_write_io_num_reg_cont(struct pcxhr_mgr *mgr, unsigned int mask,
unsigned int value, int *changed)
{
struct pcxhr_rmh rmh;
- unsigned long flags;
int err;
- spin_lock_irqsave(&mgr->msg_lock, flags);
+ mutex_lock(&mgr->msg_lock);
if ((mgr->io_num_reg_cont & mask) == value) {
dev_dbg(&mgr->pci->dev,
"IO_NUM_REG_CONT mask %x already is set to %x\n",
mask, value);
if (changed)
*changed = 0;
- spin_unlock_irqrestore(&mgr->msg_lock, flags);
+ mutex_unlock(&mgr->msg_lock);
return 0; /* already programmed */
}
pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_WRITE);
@@ -996,7 +995,7 @@ int pcxhr_write_io_num_reg_cont(struct pcxhr_mgr *mgr, unsigned int mask,
if (changed)
*changed = 1;
}
- spin_unlock_irqrestore(&mgr->msg_lock, flags);
+ mutex_unlock(&mgr->msg_lock);
return err;
}
@@ -1043,22 +1042,21 @@ static int pcxhr_handle_async_err(struct pcxhr_mgr *mgr, u32 err,
}
-void pcxhr_msg_tasklet(unsigned long arg)
+static void pcxhr_msg_thread(struct pcxhr_mgr *mgr)
{
- struct pcxhr_mgr *mgr = (struct pcxhr_mgr *)(arg);
struct pcxhr_rmh *prmh = mgr->prmh;
int err;
int i, j;
if (mgr->src_it_dsp & PCXHR_IRQ_FREQ_CHANGE)
dev_dbg(&mgr->pci->dev,
- "TASKLET : PCXHR_IRQ_FREQ_CHANGE event occurred\n");
+ "PCXHR_IRQ_FREQ_CHANGE event occurred\n");
if (mgr->src_it_dsp & PCXHR_IRQ_TIME_CODE)
dev_dbg(&mgr->pci->dev,
- "TASKLET : PCXHR_IRQ_TIME_CODE event occurred\n");
+ "PCXHR_IRQ_TIME_CODE event occurred\n");
if (mgr->src_it_dsp & PCXHR_IRQ_NOTIFY)
dev_dbg(&mgr->pci->dev,
- "TASKLET : PCXHR_IRQ_NOTIFY event occurred\n");
+ "PCXHR_IRQ_NOTIFY event occurred\n");
if (mgr->src_it_dsp & (PCXHR_IRQ_FREQ_CHANGE | PCXHR_IRQ_TIME_CODE)) {
/* clear events FREQ_CHANGE and TIME_CODE */
pcxhr_init_rmh(prmh, CMD_TEST_IT);
@@ -1068,7 +1066,7 @@ void pcxhr_msg_tasklet(unsigned long arg)
}
if (mgr->src_it_dsp & PCXHR_IRQ_ASYNC) {
dev_dbg(&mgr->pci->dev,
- "TASKLET : PCXHR_IRQ_ASYNC event occurred\n");
+ "PCXHR_IRQ_ASYNC event occurred\n");
pcxhr_init_rmh(prmh, CMD_ASYNC);
prmh->cmd[0] |= 1; /* add SEL_ASYNC_EVENTS */
@@ -1076,7 +1074,7 @@ void pcxhr_msg_tasklet(unsigned long arg)
prmh->stat_len = PCXHR_SIZE_MAX_LONG_STATUS;
err = pcxhr_send_msg(mgr, prmh);
if (err)
- dev_err(&mgr->pci->dev, "ERROR pcxhr_msg_tasklet=%x;\n",
+ dev_err(&mgr->pci->dev, "ERROR pcxhr_msg_thread=%x;\n",
err);
i = 1;
while (i < prmh->stat_len) {
@@ -1220,9 +1218,9 @@ static void pcxhr_update_timer_pos(struct pcxhr_mgr *mgr,
}
if (elapsed) {
- spin_unlock(&mgr->lock);
+ mutex_unlock(&mgr->lock);
snd_pcm_period_elapsed(stream->substream);
- spin_lock(&mgr->lock);
+ mutex_lock(&mgr->lock);
}
}
}
@@ -1231,14 +1229,10 @@ irqreturn_t pcxhr_interrupt(int irq, void *dev_id)
{
struct pcxhr_mgr *mgr = dev_id;
unsigned int reg;
- int i, j;
- struct snd_pcxhr *chip;
-
- spin_lock(&mgr->lock);
+ bool wake_thread = false;
reg = PCXHR_INPL(mgr, PCXHR_PLX_IRQCS);
if (! (reg & PCXHR_IRQCS_ACTIVE_PCIDB)) {
- spin_unlock(&mgr->lock);
/* this device did not cause the interrupt */
return IRQ_NONE;
}
@@ -1250,6 +1244,44 @@ irqreturn_t pcxhr_interrupt(int irq, void *dev_id)
/* timer irq occurred */
if (reg & PCXHR_IRQ_TIMER) {
int timer_toggle = reg & PCXHR_IRQ_TIMER;
+ if (timer_toggle == mgr->timer_toggle) {
+ dev_dbg(&mgr->pci->dev, "ERROR TIMER TOGGLE\n");
+ mgr->dsp_time_err++;
+ }
+
+ mgr->timer_toggle = timer_toggle;
+ mgr->src_it_dsp = reg;
+ wake_thread = true;
+ }
+
+ /* other irq's handled in the thread */
+ if (reg & PCXHR_IRQ_MASK) {
+ if (reg & PCXHR_IRQ_ASYNC) {
+ /* as we didn't request any async notifications,
+ * some kind of xrun error will probably occurred
+ */
+ /* better resynchronize all streams next interrupt : */
+ mgr->dsp_time_last = PCXHR_DSP_TIME_INVALID;
+ }
+ mgr->src_it_dsp = reg;
+ wake_thread = true;
+ }
+#ifdef CONFIG_SND_DEBUG_VERBOSE
+ if (reg & PCXHR_FATAL_DSP_ERR)
+ dev_dbg(&mgr->pci->dev, "FATAL DSP ERROR : %x\n", reg);
+#endif
+
+ return wake_thread ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+}
+
+irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
+{
+ struct pcxhr_mgr *mgr = dev_id;
+ int i, j;
+ struct snd_pcxhr *chip;
+
+ mutex_lock(&mgr->lock);
+ if (mgr->src_it_dsp & PCXHR_IRQ_TIMER) {
/* is a 24 bit counter */
int dsp_time_new =
PCXHR_INPL(mgr, PCXHR_PLX_MBOX4) & PCXHR_DSP_TIME_MASK;
@@ -1290,13 +1322,6 @@ irqreturn_t pcxhr_interrupt(int irq, void *dev_id)
#endif
mgr->dsp_time_last = dsp_time_new;
- if (timer_toggle == mgr->timer_toggle) {
- dev_dbg(&mgr->pci->dev, "ERROR TIMER TOGGLE\n");
- mgr->dsp_time_err++;
- }
- mgr->timer_toggle = timer_toggle;
-
- reg &= ~PCXHR_IRQ_TIMER;
for (i = 0; i < mgr->num_cards; i++) {
chip = mgr->chip[i];
for (j = 0; j < chip->nb_streams_capt; j++)
@@ -1312,22 +1337,7 @@ irqreturn_t pcxhr_interrupt(int irq, void *dev_id)
dsp_time_diff);
}
}
- /* other irq's handled in the tasklet */
- if (reg & PCXHR_IRQ_MASK) {
- if (reg & PCXHR_IRQ_ASYNC) {
- /* as we didn't request any async notifications,
- * some kind of xrun error will probably occurred
- */
- /* better resynchronize all streams next interrupt : */
- mgr->dsp_time_last = PCXHR_DSP_TIME_INVALID;
- }
- mgr->src_it_dsp = reg;
- tasklet_schedule(&mgr->msg_taskq);
- }
-#ifdef CONFIG_SND_DEBUG_VERBOSE
- if (reg & PCXHR_FATAL_DSP_ERR)
- dev_dbg(&mgr->pci->dev, "FATAL DSP ERROR : %x\n", reg);
-#endif
- spin_unlock(&mgr->lock);
- return IRQ_HANDLED; /* this device caused the interrupt */
+
+ pcxhr_msg_thread(mgr);
+ return IRQ_HANDLED;
}
diff --git a/sound/pci/pcxhr/pcxhr_core.h b/sound/pci/pcxhr/pcxhr_core.h
index a81ab6b811e7..dc267e4c1074 100644
--- a/sound/pci/pcxhr/pcxhr_core.h
+++ b/sound/pci/pcxhr/pcxhr_core.h
@@ -200,6 +200,6 @@ int pcxhr_write_io_num_reg_cont(struct pcxhr_mgr *mgr, unsigned int mask,
/* interrupt handling */
irqreturn_t pcxhr_interrupt(int irq, void *dev_id);
-void pcxhr_msg_tasklet(unsigned long arg);
+irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id);
#endif /* __SOUND_PCXHR_CORE_H */
diff --git a/sound/pci/vx222/vx222.c b/sound/pci/vx222/vx222.c
index 3dc4732142ee..c5a25e39e3a8 100644
--- a/sound/pci/vx222/vx222.c
+++ b/sound/pci/vx222/vx222.c
@@ -168,8 +168,9 @@ static int snd_vx222_create(struct snd_card *card, struct pci_dev *pci,
for (i = 0; i < 2; i++)
vx->port[i] = pci_resource_start(pci, i + 1);
- if (request_irq(pci->irq, snd_vx_irq_handler, IRQF_SHARED,
- KBUILD_MODNAME, chip)) {
+ if (request_threaded_irq(pci->irq, snd_vx_irq_handler,
+ snd_vx_threaded_irq_handler, IRQF_SHARED,
+ KBUILD_MODNAME, chip)) {
dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
snd_vx222_free(chip);
return -EBUSY;
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c
index 56bda124cd4a..07f4b33db3af 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c
@@ -61,6 +61,7 @@ static void snd_pdacf_detach(struct pcmcia_device *p_dev);
static void pdacf_release(struct pcmcia_device *link)
{
+ free_irq(link->irq, link->priv);
pcmcia_disable_device(link);
}
@@ -220,11 +221,13 @@ static int pdacf_config(struct pcmcia_device *link)
ret = pcmcia_request_io(link);
if (ret)
- goto failed;
+ goto failed_preirq;
- ret = pcmcia_request_irq(link, pdacf_interrupt);
+ ret = request_threaded_irq(link->irq, pdacf_interrupt,
+ pdacf_threaded_irq,
+ IRQF_SHARED, link->devname, link->priv);
if (ret)
- goto failed;
+ goto failed_preirq;
ret = pcmcia_enable_device(link);
if (ret)
@@ -236,7 +239,9 @@ static int pdacf_config(struct pcmcia_device *link)
return 0;
-failed:
+ failed:
+ free_irq(link->irq, link->priv);
+failed_preirq:
pcmcia_disable_device(link);
return -ENODEV;
}
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.h b/sound/pcmcia/pdaudiocf/pdaudiocf.h
index ea41e57d7179..e9a7d3a784f7 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.h
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.h
@@ -88,10 +88,9 @@ struct snd_pdacf {
unsigned long port;
int irq;
- spinlock_t reg_lock;
+ struct mutex reg_lock;
unsigned short regmap[8];
unsigned short suspend_reg_scr;
- struct tasklet_struct tq;
spinlock_t ak4117_lock;
struct ak4117 *ak4117;
@@ -136,7 +135,7 @@ int snd_pdacf_resume(struct snd_pdacf *chip);
#endif
int snd_pdacf_pcm_new(struct snd_pdacf *chip);
irqreturn_t pdacf_interrupt(int irq, void *dev);
-void pdacf_tasklet(unsigned long private_data);
+irqreturn_t pdacf_threaded_irq(int irq, void *dev);
void pdacf_reinit(struct snd_pdacf *chip, int resume);
#endif /* __PDAUDIOCF_H */
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf_core.c b/sound/pcmcia/pdaudiocf/pdaudiocf_core.c
index ea0adfb984ad..d724ab0653cf 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf_core.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf_core.c
@@ -162,9 +162,8 @@ struct snd_pdacf *snd_pdacf_create(struct snd_card *card)
if (chip == NULL)
return NULL;
chip->card = card;
- spin_lock_init(&chip->reg_lock);
+ mutex_init(&chip->reg_lock);
spin_lock_init(&chip->ak4117_lock);
- tasklet_init(&chip->tq, pdacf_tasklet, (unsigned long)chip);
card->private_data = chip;
pdacf_proc_init(chip);
@@ -174,19 +173,18 @@ struct snd_pdacf *snd_pdacf_create(struct snd_card *card)
static void snd_pdacf_ak4117_change(struct ak4117 *ak4117, unsigned char c0, unsigned char c1)
{
struct snd_pdacf *chip = ak4117->change_callback_private;
- unsigned long flags;
u16 val;
if (!(c0 & AK4117_UNLCK))
return;
- spin_lock_irqsave(&chip->reg_lock, flags);
+ mutex_lock(&chip->reg_lock);
val = chip->regmap[PDAUDIOCF_REG_SCR>>1];
if (ak4117->rcs0 & AK4117_UNLCK)
val |= PDAUDIOCF_BLUE_LED_OFF;
else
val &= ~PDAUDIOCF_BLUE_LED_OFF;
pdacf_reg_write(chip, PDAUDIOCF_REG_SCR, val);
- spin_unlock_irqrestore(&chip->reg_lock, flags);
+ mutex_unlock(&chip->reg_lock);
}
int snd_pdacf_ak4117_create(struct snd_pdacf *chip)
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf_irq.c b/sound/pcmcia/pdaudiocf/pdaudiocf_irq.c
index dcd32201bc8c..ecf0fbd91794 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf_irq.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf_irq.c
@@ -30,6 +30,7 @@ irqreturn_t pdacf_interrupt(int irq, void *dev)
{
struct snd_pdacf *chip = dev;
unsigned short stat;
+ bool wake_thread = false;
if ((chip->chip_status & (PDAUDIOCF_STAT_IS_STALE|
PDAUDIOCF_STAT_IS_CONFIGURED|
@@ -41,13 +42,13 @@ irqreturn_t pdacf_interrupt(int irq, void *dev)
if (stat & PDAUDIOCF_IRQOVR) /* should never happen */
snd_printk(KERN_ERR "PDAUDIOCF SRAM buffer overrun detected!\n");
if (chip->pcm_substream)
- tasklet_schedule(&chip->tq);
+ wake_thread = true;
if (!(stat & PDAUDIOCF_IRQAKM))
stat |= PDAUDIOCF_IRQAKM; /* check rate */
}
if (get_irq_regs() != NULL)
snd_ak4117_check_rate_and_errors(chip->ak4117, 0);
- return IRQ_HANDLED;
+ return wake_thread ? IRQ_WAKE_THREAD : IRQ_HANDLED;
}
static inline void pdacf_transfer_mono16(u16 *dst, u16 xor, unsigned int size, unsigned long rdp_port)
@@ -256,16 +257,16 @@ static void pdacf_transfer(struct snd_pdacf *chip, unsigned int size, unsigned i
}
}
-void pdacf_tasklet(unsigned long private_data)
+irqreturn_t pdacf_threaded_irq(int irq, void *dev)
{
- struct snd_pdacf *chip = (struct snd_pdacf *) private_data;
+ struct snd_pdacf *chip = dev;
int size, off, cont, rdp, wdp;
if ((chip->chip_status & (PDAUDIOCF_STAT_IS_STALE|PDAUDIOCF_STAT_IS_CONFIGURED)) != PDAUDIOCF_STAT_IS_CONFIGURED)
- return;
+ return IRQ_HANDLED;
if (chip->pcm_substream == NULL || chip->pcm_substream->runtime == NULL || !snd_pcm_running(chip->pcm_substream))
- return;
+ return IRQ_HANDLED;
rdp = inw(chip->port + PDAUDIOCF_REG_RDP);
wdp = inw(chip->port + PDAUDIOCF_REG_WDP);
@@ -311,15 +312,15 @@ void pdacf_tasklet(unsigned long private_data)
size -= cont;
}
#endif
- spin_lock(&chip->reg_lock);
+ mutex_lock(&chip->reg_lock);
while (chip->pcm_tdone >= chip->pcm_period) {
chip->pcm_hwptr += chip->pcm_period;
chip->pcm_hwptr %= chip->pcm_size;
chip->pcm_tdone -= chip->pcm_period;
- spin_unlock(&chip->reg_lock);
+ mutex_unlock(&chip->reg_lock);
snd_pcm_period_elapsed(chip->pcm_substream);
- spin_lock(&chip->reg_lock);
+ mutex_lock(&chip->reg_lock);
}
- spin_unlock(&chip->reg_lock);
- /* printk(KERN_DEBUG "TASKLET: end\n"); */
+ mutex_unlock(&chip->reg_lock);
+ return IRQ_HANDLED;
}
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
index 43f995a3f960..b48aa0a78c19 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
@@ -77,7 +77,7 @@ static int pdacf_pcm_trigger(struct snd_pcm_substream *subs, int cmd)
default:
return -EINVAL;
}
- spin_lock(&chip->reg_lock);
+ mutex_lock(&chip->reg_lock);
chip->pcm_running += inc;
tmp = pdacf_reg_read(chip, PDAUDIOCF_REG_SCR);
if (chip->pcm_running) {
@@ -91,7 +91,7 @@ static int pdacf_pcm_trigger(struct snd_pcm_substream *subs, int cmd)
tmp |= val;
pdacf_reg_write(chip, PDAUDIOCF_REG_SCR, tmp);
__end:
- spin_unlock(&chip->reg_lock);
+ mutex_unlock(&chip->reg_lock);
snd_ak4117_check_rate_and_errors(chip->ak4117, AK4117_CHECK_NO_RATE);
return ret;
}
@@ -296,6 +296,7 @@ int snd_pdacf_pcm_new(struct snd_pdacf *chip)
pcm->private_data = chip;
pcm->info_flags = 0;
+ pcm->nonatomic = true;
strcpy(pcm->name, chip->card->shortname);
chip->pcm = pcm;
diff --git a/sound/pcmcia/vx/vxp_ops.c b/sound/pcmcia/vx/vxp_ops.c
index fe33e122e372..281972913c32 100644
--- a/sound/pcmcia/vx/vxp_ops.c
+++ b/sound/pcmcia/vx/vxp_ops.c
@@ -468,12 +468,11 @@ static void vxp_write_codec_reg(struct vx_core *chip, int codec, unsigned int da
void vx_set_mic_boost(struct vx_core *chip, int boost)
{
struct snd_vxpocket *pchip = (struct snd_vxpocket *)chip;
- unsigned long flags;
if (chip->chip_status & VX_STAT_IS_STALE)
return;
- spin_lock_irqsave(&chip->lock, flags);
+ mutex_lock(&chip->lock);
if (pchip->regCDSP & P24_CDSP_MICS_SEL_MASK) {
if (boost) {
/* boost: 38 dB */
@@ -486,7 +485,7 @@ void vx_set_mic_boost(struct vx_core *chip, int boost)
}
vx_outb(chip, CDSP, pchip->regCDSP);
}
- spin_unlock_irqrestore(&chip->lock, flags);
+ mutex_unlock(&chip->lock);
}
/*
@@ -511,17 +510,16 @@ static int vx_compute_mic_level(int level)
void vx_set_mic_level(struct vx_core *chip, int level)
{
struct snd_vxpocket *pchip = (struct snd_vxpocket *)chip;
- unsigned long flags;
if (chip->chip_status & VX_STAT_IS_STALE)
return;
- spin_lock_irqsave(&chip->lock, flags);
+ mutex_lock(&chip->lock);
if (pchip->regCDSP & VXP_CDSP_MIC_SEL_MASK) {
level = vx_compute_mic_level(level);
vx_outb(chip, MICRO, level);
}
- spin_unlock_irqrestore(&chip->lock, flags);
+ mutex_unlock(&chip->lock);
}
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c
index 786e7e139c9e..92ec11456e3a 100644
--- a/sound/pcmcia/vx/vxpocket.c
+++ b/sound/pcmcia/vx/vxpocket.c
@@ -62,6 +62,7 @@ static unsigned int card_alloc;
*/
static void vxpocket_release(struct pcmcia_device *link)
{
+ free_irq(link->irq, link->priv);
pcmcia_disable_device(link);
}
@@ -227,11 +228,13 @@ static int vxpocket_config(struct pcmcia_device *link)
ret = pcmcia_request_io(link);
if (ret)
- goto failed;
+ goto failed_preirq;
- ret = pcmcia_request_irq(link, snd_vx_irq_handler);
+ ret = request_threaded_irq(link->irq, snd_vx_irq_handler,
+ snd_vx_threaded_irq_handler,
+ IRQF_SHARED, link->devname, link->priv);
if (ret)
- goto failed;
+ goto failed_preirq;
ret = pcmcia_enable_device(link);
if (ret)
@@ -245,7 +248,9 @@ static int vxpocket_config(struct pcmcia_device *link)
return 0;
-failed:
+ failed:
+ free_irq(link->irq, link->priv);
+failed_preirq:
pcmcia_disable_device(link);
return -ENODEV;
}
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 922006dd0583..e88a6b67f781 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -146,7 +146,7 @@ struct pm860x_priv {
struct pm860x_det det;
int irq[4];
- unsigned char name[4][MAX_NAME_LEN];
+ unsigned char name[4][MAX_NAME_LEN+1];
};
/* -9450dB to 0dB in 150dB steps ( mute instead of -9450dB) */
@@ -1337,8 +1337,6 @@ static int pm860x_probe(struct snd_soc_codec *codec)
}
}
- pm860x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
return 0;
out:
@@ -1354,7 +1352,6 @@ static int pm860x_remove(struct snd_soc_codec *codec)
for (i = 3; i >= 0; i--)
free_irq(pm860x->irq[i], pm860x);
- pm860x_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 8838838e25ed..a68d1731a8fd 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -43,6 +43,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_ALC5623 if I2C
select SND_SOC_ALC5632 if I2C
select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
+ select SND_SOC_CS35L32 if I2C
select SND_SOC_CS42L51_I2C if I2C
select SND_SOC_CS42L52 if I2C && INPUT
select SND_SOC_CS42L56 if I2C && INPUT
@@ -56,7 +57,10 @@ config SND_SOC_ALL_CODECS
select SND_SOC_DA7213 if I2C
select SND_SOC_DA732X if I2C
select SND_SOC_DA9055 if I2C
+ select SND_SOC_DMIC
select SND_SOC_BT_SCO
+ select SND_SOC_ES8328_SPI if SPI_MASTER
+ select SND_SOC_ES8328_I2C if I2C
select SND_SOC_ISABELLE if I2C
select SND_SOC_JZ4740_CODEC
select SND_SOC_LM4857 if I2C
@@ -90,6 +94,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_SSM2518 if I2C
select SND_SOC_SSM2602_SPI if SPI_MASTER
select SND_SOC_SSM2602_I2C if I2C
+ select SND_SOC_SSM4567 if I2C
select SND_SOC_STA32X if I2C
select SND_SOC_STA350 if I2C
select SND_SOC_STA529 if I2C
@@ -323,6 +328,10 @@ config SND_SOC_ALC5632
config SND_SOC_CQ0093VC
tristate
+config SND_SOC_CS35L32
+ tristate "Cirrus Logic CS35L32 CODEC"
+ depends on I2C
+
config SND_SOC_CS42L51
tristate
@@ -405,6 +414,17 @@ config SND_SOC_DMIC
config SND_SOC_HDMI_CODEC
tristate "HDMI stub CODEC"
+config SND_SOC_ES8328
+ tristate "Everest Semi ES8328 CODEC"
+
+config SND_SOC_ES8328_I2C
+ tristate
+ select SND_SOC_ES8328
+
+config SND_SOC_ES8328_SPI
+ tristate
+ select SND_SOC_ES8328
+
config SND_SOC_ISABELLE
tristate
@@ -464,6 +484,7 @@ config SND_SOC_RL6231
config SND_SOC_RT286
tristate
+ depends on I2C
config SND_SOC_RT5631
tristate
@@ -520,12 +541,20 @@ config SND_SOC_SSM2602
tristate
config SND_SOC_SSM2602_SPI
+ tristate "Analog Devices SSM2602 CODEC - SPI"
+ depends on SPI_MASTER
select SND_SOC_SSM2602
- tristate
+ select REGMAP_SPI
config SND_SOC_SSM2602_I2C
+ tristate "Analog Devices SSM2602 CODEC - I2C"
+ depends on I2C
select SND_SOC_SSM2602
- tristate
+ select REGMAP_I2C
+
+config SND_SOC_SSM4567
+ tristate "Analog Devices ssm4567 amplifier driver support"
+ depends on I2C
config SND_SOC_STA32X
tristate
@@ -712,7 +741,8 @@ config SND_SOC_WM8974
tristate
config SND_SOC_WM8978
- tristate
+ tristate "Wolfson Microelectronics WM8978 codec"
+ depends on I2C
config SND_SOC_WM8983
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 20afe0f0c5be..5dce451661e4 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -32,6 +32,7 @@ snd-soc-ak4671-objs := ak4671.o
snd-soc-ak5386-objs := ak5386.o
snd-soc-arizona-objs := arizona.o
snd-soc-cq93vc-objs := cq93vc.o
+snd-soc-cs35l32-objs := cs35l32.o
snd-soc-cs42l51-objs := cs42l51.o
snd-soc-cs42l51-i2c-objs := cs42l51-i2c.o
snd-soc-cs42l52-objs := cs42l52.o
@@ -49,6 +50,9 @@ snd-soc-da732x-objs := da732x.o
snd-soc-da9055-objs := da9055.o
snd-soc-bt-sco-objs := bt-sco.o
snd-soc-dmic-objs := dmic.o
+snd-soc-es8328-objs := es8328.o
+snd-soc-es8328-i2c-objs := es8328-i2c.o
+snd-soc-es8328-spi-objs := es8328-spi.o
snd-soc-isabelle-objs := isabelle.o
snd-soc-jz4740-codec-objs := jz4740.o
snd-soc-l3-objs := l3.o
@@ -91,6 +95,7 @@ snd-soc-ssm2518-objs := ssm2518.o
snd-soc-ssm2602-objs := ssm2602.o
snd-soc-ssm2602-spi-objs := ssm2602-spi.o
snd-soc-ssm2602-i2c-objs := ssm2602-i2c.o
+snd-soc-ssm4567-objs := ssm4567.o
snd-soc-sta32x-objs := sta32x.o
snd-soc-sta350-objs := sta350.o
snd-soc-sta529-objs := sta529.o
@@ -203,6 +208,7 @@ obj-$(CONFIG_SND_SOC_ALC5623) += snd-soc-alc5623.o
obj-$(CONFIG_SND_SOC_ALC5632) += snd-soc-alc5632.o
obj-$(CONFIG_SND_SOC_ARIZONA) += snd-soc-arizona.o
obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o
+obj-$(CONFIG_SND_SOC_CS35L32) += snd-soc-cs35l32.o
obj-$(CONFIG_SND_SOC_CS42L51) += snd-soc-cs42l51.o
obj-$(CONFIG_SND_SOC_CS42L51_I2C) += snd-soc-cs42l51-i2c.o
obj-$(CONFIG_SND_SOC_CS42L52) += snd-soc-cs42l52.o
@@ -220,6 +226,9 @@ obj-$(CONFIG_SND_SOC_DA732X) += snd-soc-da732x.o
obj-$(CONFIG_SND_SOC_DA9055) += snd-soc-da9055.o
obj-$(CONFIG_SND_SOC_BT_SCO) += snd-soc-bt-sco.o
obj-$(CONFIG_SND_SOC_DMIC) += snd-soc-dmic.o
+obj-$(CONFIG_SND_SOC_ES8328) += snd-soc-es8328.o
+obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o
+obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o
obj-$(CONFIG_SND_SOC_ISABELLE) += snd-soc-isabelle.o
obj-$(CONFIG_SND_SOC_JZ4740_CODEC) += snd-soc-jz4740-codec.o
obj-$(CONFIG_SND_SOC_L3) += snd-soc-l3.o
@@ -258,6 +267,7 @@ obj-$(CONFIG_SND_SOC_SSM2518) += snd-soc-ssm2518.o
obj-$(CONFIG_SND_SOC_SSM2602) += snd-soc-ssm2602.o
obj-$(CONFIG_SND_SOC_SSM2602_SPI) += snd-soc-ssm2602-spi.o
obj-$(CONFIG_SND_SOC_SSM2602_I2C) += snd-soc-ssm2602-i2c.o
+obj-$(CONFIG_SND_SOC_SSM4567) += snd-soc-ssm4567.o
obj-$(CONFIG_SND_SOC_STA32X) += snd-soc-sta32x.o
obj-$(CONFIG_SND_SOC_STA350) += snd-soc-sta350.o
obj-$(CONFIG_SND_SOC_STA529) += snd-soc-sta529.o
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index 1fb4402bf72d..fd43827bb856 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -56,8 +56,7 @@
#define GPIO31_DIR_OUTPUT 0x40
/* Macrocell register definitions */
-#define AB8500_CTRL3_REG 0x0200
-#define AB8500_GPIO_DIR4_REG 0x1013
+#define AB8500_GPIO_DIR4_REG 0x13 /* Bank AB8500_MISC */
/* Nr of FIR/IIR-coeff banks in ANC-block */
#define AB8500_NR_OF_ANC_COEFF_BANKS 2
@@ -126,6 +125,8 @@ struct ab8500_codec_drvdata_dbg {
/* Private data for AB8500 device-driver */
struct ab8500_codec_drvdata {
+ struct regmap *regmap;
+
/* Sidetone */
long *sid_fir_values;
enum sid_state sid_status;
@@ -166,49 +167,35 @@ static inline const char *amic_type_str(enum amic_type type)
*/
/* Read a register from the audio-bank of AB8500 */
-static unsigned int ab8500_codec_read_reg(struct snd_soc_codec *codec,
- unsigned int reg)
+static int ab8500_codec_read_reg(void *context, unsigned int reg,
+ unsigned int *value)
{
+ struct device *dev = context;
int status;
- unsigned int value = 0;
u8 value8;
- status = abx500_get_register_interruptible(codec->dev, AB8500_AUDIO,
- reg, &value8);
- if (status < 0) {
- dev_err(codec->dev,
- "%s: ERROR: Register (0x%02x:0x%02x) read failed (%d).\n",
- __func__, (u8)AB8500_AUDIO, (u8)reg, status);
- } else {
- dev_dbg(codec->dev,
- "%s: Read 0x%02x from register 0x%02x:0x%02x\n",
- __func__, value8, (u8)AB8500_AUDIO, (u8)reg);
- value = (unsigned int)value8;
- }
+ status = abx500_get_register_interruptible(dev, AB8500_AUDIO,
+ reg, &value8);
+ *value = (unsigned int)value8;
- return value;
+ return status;
}
/* Write to a register in the audio-bank of AB8500 */
-static int ab8500_codec_write_reg(struct snd_soc_codec *codec,
- unsigned int reg, unsigned int value)
+static int ab8500_codec_write_reg(void *context, unsigned int reg,
+ unsigned int value)
{
- int status;
-
- status = abx500_set_register_interruptible(codec->dev, AB8500_AUDIO,
- reg, value);
- if (status < 0)
- dev_err(codec->dev,
- "%s: ERROR: Register (%02x:%02x) write failed (%d).\n",
- __func__, (u8)AB8500_AUDIO, (u8)reg, status);
- else
- dev_dbg(codec->dev,
- "%s: Wrote 0x%02x into register %02x:%02x\n",
- __func__, (u8)value, (u8)AB8500_AUDIO, (u8)reg);
+ struct device *dev = context;
- return status;
+ return abx500_set_register_interruptible(dev, AB8500_AUDIO,
+ reg, value);
}
+static const struct regmap_config ab8500_codec_regmap = {
+ .reg_read = ab8500_codec_read_reg,
+ .reg_write = ab8500_codec_write_reg,
+};
+
/*
* Controls - DAPM
*/
@@ -1968,16 +1955,16 @@ static int ab8500_audio_setup_mics(struct snd_soc_codec *codec,
dev_dbg(codec->dev, "%s: Enter.\n", __func__);
/* Set DMic-clocks to outputs */
- status = abx500_get_register_interruptible(codec->dev, (u8)AB8500_MISC,
- (u8)AB8500_GPIO_DIR4_REG,
+ status = abx500_get_register_interruptible(codec->dev, AB8500_MISC,
+ AB8500_GPIO_DIR4_REG,
&value8);
if (status < 0)
return status;
value = value8 | GPIO27_DIR_OUTPUT | GPIO29_DIR_OUTPUT |
GPIO31_DIR_OUTPUT;
status = abx500_set_register_interruptible(codec->dev,
- (u8)AB8500_MISC,
- (u8)AB8500_GPIO_DIR4_REG,
+ AB8500_MISC,
+ AB8500_GPIO_DIR4_REG,
value);
if (status < 0)
return status;
@@ -2565,9 +2552,6 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver ab8500_codec_driver = {
.probe = ab8500_codec_probe,
- .read = ab8500_codec_read_reg,
- .write = ab8500_codec_write_reg,
- .reg_word_size = sizeof(u8),
.controls = ab8500_ctrls,
.num_controls = ARRAY_SIZE(ab8500_ctrls),
.dapm_widgets = ab8500_dapm_widgets,
@@ -2592,6 +2576,15 @@ static int ab8500_codec_driver_probe(struct platform_device *pdev)
drvdata->anc_status = ANC_UNCONFIGURED;
dev_set_drvdata(&pdev->dev, drvdata);
+ drvdata->regmap = devm_regmap_init(&pdev->dev, NULL, &pdev->dev,
+ &ab8500_codec_regmap);
+ if (IS_ERR(drvdata->regmap)) {
+ status = PTR_ERR(drvdata->regmap);
+ dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n",
+ __func__, status);
+ return status;
+ }
+
dev_dbg(&pdev->dev, "%s: Register codec.\n", __func__);
status = snd_soc_register_codec(&pdev->dev, &ab8500_codec_driver,
ab8500_codec_dai,
diff --git a/sound/soc/codecs/ac97.c b/sound/soc/codecs/ac97.c
index e889e1b84192..bd9b1839c8b0 100644
--- a/sound/soc/codecs/ac97.c
+++ b/sound/soc/codecs/ac97.c
@@ -69,19 +69,6 @@ static struct snd_soc_dai_driver ac97_dai = {
.ops = &ac97_dai_ops,
};
-static unsigned int ac97_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- return soc_ac97_ops->read(codec->ac97, reg);
-}
-
-static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int val)
-{
- soc_ac97_ops->write(codec->ac97, reg, val);
- return 0;
-}
-
static int ac97_soc_probe(struct snd_soc_codec *codec)
{
struct snd_ac97_bus *ac97_bus;
@@ -122,8 +109,6 @@ static int ac97_soc_resume(struct snd_soc_codec *codec)
#endif
static struct snd_soc_codec_driver soc_codec_dev_ac97 = {
- .write = ac97_write,
- .read = ac97_read,
.probe = ac97_soc_probe,
.suspend = ac97_soc_suspend,
.resume = ac97_soc_resume,
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index 1ff7d4d027e9..7c784ad3e8b2 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -1448,29 +1448,10 @@ static int adau1373_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
-static int adau1373_remove(struct snd_soc_codec *codec)
-{
- adau1373_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int adau1373_suspend(struct snd_soc_codec *codec)
-{
- struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
- int ret;
-
- ret = adau1373_set_bias_level(codec, SND_SOC_BIAS_OFF);
- regcache_cache_only(adau1373->regmap, true);
-
- return ret;
-}
-
static int adau1373_resume(struct snd_soc_codec *codec)
{
struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
- regcache_cache_only(adau1373->regmap, false);
- adau1373_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
regcache_sync(adau1373->regmap);
return 0;
@@ -1501,8 +1482,6 @@ static const struct regmap_config adau1373_regmap_config = {
static struct snd_soc_codec_driver adau1373_codec_driver = {
.probe = adau1373_probe,
- .remove = adau1373_remove,
- .suspend = adau1373_suspend,
.resume = adau1373_resume,
.set_bias_level = adau1373_set_bias_level,
.idle_bias_off = true,
diff --git a/sound/soc/codecs/adau1761.c b/sound/soc/codecs/adau1761.c
index 848cab839553..5518ebd6947c 100644
--- a/sound/soc/codecs/adau1761.c
+++ b/sound/soc/codecs/adau1761.c
@@ -714,9 +714,9 @@ static int adau1761_codec_probe(struct snd_soc_codec *codec)
static const struct snd_soc_codec_driver adau1761_codec_driver = {
.probe = adau1761_codec_probe,
- .suspend = adau17x1_suspend,
.resume = adau17x1_resume,
.set_bias_level = adau1761_set_bias_level,
+ .suspend_bias_off = true,
.controls = adau1761_controls,
.num_controls = ARRAY_SIZE(adau1761_controls),
diff --git a/sound/soc/codecs/adau1781.c b/sound/soc/codecs/adau1781.c
index 045a61413840..e9fc00fb13dd 100644
--- a/sound/soc/codecs/adau1781.c
+++ b/sound/soc/codecs/adau1781.c
@@ -446,9 +446,9 @@ static int adau1781_codec_probe(struct snd_soc_codec *codec)
static const struct snd_soc_codec_driver adau1781_codec_driver = {
.probe = adau1781_codec_probe,
- .suspend = adau17x1_suspend,
.resume = adau17x1_resume,
.set_bias_level = adau1781_set_bias_level,
+ .suspend_bias_off = true,
.controls = adau1781_controls,
.num_controls = ARRAY_SIZE(adau1781_controls),
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
index 0b659704e60c..3e16c1c64115 100644
--- a/sound/soc/codecs/adau17x1.c
+++ b/sound/soc/codecs/adau17x1.c
@@ -815,13 +815,6 @@ int adau17x1_add_routes(struct snd_soc_codec *codec)
}
EXPORT_SYMBOL_GPL(adau17x1_add_routes);
-int adau17x1_suspend(struct snd_soc_codec *codec)
-{
- codec->driver->set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-EXPORT_SYMBOL_GPL(adau17x1_suspend);
-
int adau17x1_resume(struct snd_soc_codec *codec)
{
struct adau *adau = snd_soc_codec_get_drvdata(codec);
@@ -829,7 +822,6 @@ int adau17x1_resume(struct snd_soc_codec *codec)
if (adau->switch_mode)
adau->switch_mode(codec->dev);
- codec->driver->set_bias_level(codec, SND_SOC_BIAS_STANDBY);
regcache_sync(adau->regmap);
return 0;
diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
index 3ffabaf4c7a8..e4a557fd7155 100644
--- a/sound/soc/codecs/adau17x1.h
+++ b/sound/soc/codecs/adau17x1.h
@@ -52,7 +52,6 @@ int adau17x1_set_micbias_voltage(struct snd_soc_codec *codec,
enum adau17x1_micbias_voltage micbias);
bool adau17x1_readable_register(struct device *dev, unsigned int reg);
bool adau17x1_volatile_register(struct device *dev, unsigned int reg);
-int adau17x1_suspend(struct snd_soc_codec *codec);
int adau17x1_resume(struct snd_soc_codec *codec);
extern const struct snd_soc_dai_ops adau17x1_dai_ops;
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index c43b93fdf0df..ce3cdca9fc62 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -812,42 +812,23 @@ static int adav80x_probe(struct snd_soc_codec *codec)
/* Disable DAC zero flag */
regmap_write(adav80x->regmap, ADAV80X_DAC_CTRL3, 0x6);
- return adav80x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-}
-
-static int adav80x_suspend(struct snd_soc_codec *codec)
-{
- struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
- int ret;
-
- ret = adav80x_set_bias_level(codec, SND_SOC_BIAS_OFF);
- regcache_cache_only(adav80x->regmap, true);
-
- return ret;
+ return 0;
}
static int adav80x_resume(struct snd_soc_codec *codec)
{
struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
- regcache_cache_only(adav80x->regmap, false);
- adav80x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
regcache_sync(adav80x->regmap);
return 0;
}
-static int adav80x_remove(struct snd_soc_codec *codec)
-{
- return adav80x_set_bias_level(codec, SND_SOC_BIAS_OFF);
-}
-
static struct snd_soc_codec_driver adav80x_codec_driver = {
.probe = adav80x_probe,
- .remove = adav80x_remove,
- .suspend = adav80x_suspend,
.resume = adav80x_resume,
.set_bias_level = adav80x_set_bias_level,
+ .suspend_bias_off = true,
.set_pll = adav80x_set_pll,
.set_sysclk = adav80x_set_sysclk,
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
new file mode 100644
index 000000000000..c125925da92e
--- /dev/null
+++ b/sound/soc/codecs/cs35l32.c
@@ -0,0 +1,631 @@
+/*
+ * cs35l32.c -- CS35L32 ALSA SoC audio driver
+ *
+ * Copyright 2014 CirrusLogic, Inc.
+ *
+ * Author: Brian Austin <brian.austin@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <dt-bindings/sound/cs35l32.h>
+
+#include "cs35l32.h"
+
+#define CS35L32_NUM_SUPPLIES 2
+static const char *const cs35l32_supply_names[CS35L32_NUM_SUPPLIES] = {
+ "VA",
+ "VP",
+};
+
+struct cs35l32_private {
+ struct regmap *regmap;
+ struct snd_soc_codec *codec;
+ struct regulator_bulk_data supplies[CS35L32_NUM_SUPPLIES];
+ struct cs35l32_platform_data pdata;
+ struct gpio_desc *reset_gpio;
+};
+
+static const struct reg_default cs35l32_reg_defaults[] = {
+
+ { 0x06, 0x04 }, /* Power Ctl 1 */
+ { 0x07, 0xE8 }, /* Power Ctl 2 */
+ { 0x08, 0x40 }, /* Clock Ctl */
+ { 0x09, 0x20 }, /* Low Battery Threshold */
+ { 0x0A, 0x00 }, /* Voltage Monitor [RO] */
+ { 0x0B, 0x40 }, /* Conv Peak Curr Protection CTL */
+ { 0x0C, 0x07 }, /* IMON Scaling */
+ { 0x0D, 0x03 }, /* Audio/LED Pwr Manager */
+ { 0x0F, 0x20 }, /* Serial Port Control */
+ { 0x10, 0x14 }, /* Class D Amp CTL */
+ { 0x11, 0x00 }, /* Protection Release CTL */
+ { 0x12, 0xFF }, /* Interrupt Mask 1 */
+ { 0x13, 0xFF }, /* Interrupt Mask 2 */
+ { 0x14, 0xFF }, /* Interrupt Mask 3 */
+ { 0x19, 0x00 }, /* LED Flash Mode Current */
+ { 0x1A, 0x00 }, /* LED Movie Mode Current */
+ { 0x1B, 0x20 }, /* LED Flash Timer */
+ { 0x1C, 0x00 }, /* LED Flash Inhibit Current */
+};
+
+static bool cs35l32_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS35L32_DEVID_AB:
+ case CS35L32_DEVID_CD:
+ case CS35L32_DEVID_E:
+ case CS35L32_FAB_ID:
+ case CS35L32_REV_ID:
+ case CS35L32_PWRCTL1:
+ case CS35L32_PWRCTL2:
+ case CS35L32_CLK_CTL:
+ case CS35L32_BATT_THRESHOLD:
+ case CS35L32_VMON:
+ case CS35L32_BST_CPCP_CTL:
+ case CS35L32_IMON_SCALING:
+ case CS35L32_AUDIO_LED_MNGR:
+ case CS35L32_ADSP_CTL:
+ case CS35L32_CLASSD_CTL:
+ case CS35L32_PROTECT_CTL:
+ case CS35L32_INT_MASK_1:
+ case CS35L32_INT_MASK_2:
+ case CS35L32_INT_MASK_3:
+ case CS35L32_INT_STATUS_1:
+ case CS35L32_INT_STATUS_2:
+ case CS35L32_INT_STATUS_3:
+ case CS35L32_LED_STATUS:
+ case CS35L32_FLASH_MODE:
+ case CS35L32_MOVIE_MODE:
+ case CS35L32_FLASH_TIMER:
+ case CS35L32_FLASH_INHIBIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs35l32_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS35L32_DEVID_AB:
+ case CS35L32_DEVID_CD:
+ case CS35L32_DEVID_E:
+ case CS35L32_FAB_ID:
+ case CS35L32_REV_ID:
+ case CS35L32_INT_STATUS_1:
+ case CS35L32_INT_STATUS_2:
+ case CS35L32_INT_STATUS_3:
+ case CS35L32_LED_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs35l32_precious_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS35L32_INT_STATUS_1:
+ case CS35L32_INT_STATUS_2:
+ case CS35L32_INT_STATUS_3:
+ case CS35L32_LED_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static DECLARE_TLV_DB_SCALE(classd_ctl_tlv, 900, 300, 0);
+
+static const struct snd_kcontrol_new imon_ctl =
+ SOC_DAPM_SINGLE("Switch", CS35L32_PWRCTL2, 6, 1, 1);
+
+static const struct snd_kcontrol_new vmon_ctl =
+ SOC_DAPM_SINGLE("Switch", CS35L32_PWRCTL2, 7, 1, 1);
+
+static const struct snd_kcontrol_new vpmon_ctl =
+ SOC_DAPM_SINGLE("Switch", CS35L32_PWRCTL2, 5, 1, 1);
+
+static const struct snd_kcontrol_new cs35l32_snd_controls[] = {
+ SOC_SINGLE_TLV("Speaker Volume", CS35L32_CLASSD_CTL,
+ 3, 0x04, 1, classd_ctl_tlv),
+ SOC_SINGLE("Zero Cross Switch", CS35L32_CLASSD_CTL, 2, 1, 0),
+ SOC_SINGLE("Gain Manager Switch", CS35L32_AUDIO_LED_MNGR, 3, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget cs35l32_dapm_widgets[] = {
+
+ SND_SOC_DAPM_SUPPLY("BOOST", CS35L32_PWRCTL1, 2, 1, NULL, 0),
+ SND_SOC_DAPM_OUT_DRV("Speaker", CS35L32_PWRCTL1, 7, 1, NULL, 0),
+
+ SND_SOC_DAPM_AIF_OUT("SDOUT", NULL, 0, CS35L32_PWRCTL2, 3, 1),
+
+ SND_SOC_DAPM_INPUT("VP"),
+ SND_SOC_DAPM_INPUT("ISENSE"),
+ SND_SOC_DAPM_INPUT("VSENSE"),
+
+ SND_SOC_DAPM_SWITCH("VMON ADC", CS35L32_PWRCTL2, 7, 1, &vmon_ctl),
+ SND_SOC_DAPM_SWITCH("IMON ADC", CS35L32_PWRCTL2, 6, 1, &imon_ctl),
+ SND_SOC_DAPM_SWITCH("VPMON ADC", CS35L32_PWRCTL2, 5, 1, &vpmon_ctl),
+};
+
+static const struct snd_soc_dapm_route cs35l32_audio_map[] = {
+
+ {"Speaker", NULL, "BOOST"},
+
+ {"VMON ADC", NULL, "VSENSE"},
+ {"IMON ADC", NULL, "ISENSE"},
+ {"VPMON ADC", NULL, "VP"},
+
+ {"SDOUT", "Switch", "VMON ADC"},
+ {"SDOUT", "Switch", "IMON ADC"},
+ {"SDOUT", "Switch", "VPMON ADC"},
+
+ {"Capture", NULL, "SDOUT"},
+};
+
+static int cs35l32_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ snd_soc_update_bits(codec, CS35L32_ADSP_CTL,
+ CS35L32_ADSP_MASTER_MASK,
+ CS35L32_ADSP_MASTER_MASK);
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ snd_soc_update_bits(codec, CS35L32_ADSP_CTL,
+ CS35L32_ADSP_MASTER_MASK, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cs35l32_set_tristate(struct snd_soc_dai *dai, int tristate)
+{
+ struct snd_soc_codec *codec = dai->codec;
+
+ return snd_soc_update_bits(codec, CS35L32_PWRCTL2,
+ CS35L32_SDOUT_3ST, tristate << 3);
+}
+
+static const struct snd_soc_dai_ops cs35l32_ops = {
+ .set_fmt = cs35l32_set_dai_fmt,
+ .set_tristate = cs35l32_set_tristate,
+};
+
+static struct snd_soc_dai_driver cs35l32_dai[] = {
+ {
+ .name = "cs35l32-monitor",
+ .id = 0,
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = CS35L32_RATES,
+ .formats = CS35L32_FORMATS,
+ },
+ .ops = &cs35l32_ops,
+ .symmetric_rates = 1,
+ }
+};
+
+static int cs35l32_codec_set_sysclk(struct snd_soc_codec *codec,
+ int clk_id, int source, unsigned int freq, int dir)
+{
+ unsigned int val;
+
+ switch (freq) {
+ case 6000000:
+ val = CS35L32_MCLK_RATIO;
+ break;
+ case 12000000:
+ val = CS35L32_MCLK_DIV2_MASK | CS35L32_MCLK_RATIO;
+ break;
+ case 6144000:
+ val = 0;
+ break;
+ case 12288000:
+ val = CS35L32_MCLK_DIV2_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return snd_soc_update_bits(codec, CS35L32_CLK_CTL,
+ CS35L32_MCLK_DIV2_MASK | CS35L32_MCLK_RATIO_MASK, val);
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_cs35l32 = {
+ .set_sysclk = cs35l32_codec_set_sysclk,
+
+ .dapm_widgets = cs35l32_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs35l32_dapm_widgets),
+ .dapm_routes = cs35l32_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(cs35l32_audio_map),
+
+ .controls = cs35l32_snd_controls,
+ .num_controls = ARRAY_SIZE(cs35l32_snd_controls),
+};
+
+/* Current and threshold powerup sequence Pg37 in datasheet */
+static const struct reg_default cs35l32_monitor_patch[] = {
+
+ { 0x00, 0x99 },
+ { 0x48, 0x17 },
+ { 0x49, 0x56 },
+ { 0x43, 0x01 },
+ { 0x3B, 0x62 },
+ { 0x3C, 0x80 },
+ { 0x00, 0x00 },
+};
+
+static struct regmap_config cs35l32_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = CS35L32_MAX_REGISTER,
+ .reg_defaults = cs35l32_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(cs35l32_reg_defaults),
+ .volatile_reg = cs35l32_volatile_register,
+ .readable_reg = cs35l32_readable_register,
+ .precious_reg = cs35l32_precious_register,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
+ struct cs35l32_platform_data *pdata)
+{
+ struct device_node *np = i2c_client->dev.of_node;
+ unsigned int val;
+
+ if (of_property_read_u32(np, "cirrus,sdout-share", &val) >= 0)
+ pdata->sdout_share = val;
+
+ of_property_read_u32(np, "cirrus,boost-manager", &val);
+ switch (val) {
+ case CS35L32_BOOST_MGR_AUTO:
+ case CS35L32_BOOST_MGR_AUTO_AUDIO:
+ case CS35L32_BOOST_MGR_BYPASS:
+ case CS35L32_BOOST_MGR_FIXED:
+ pdata->boost_mng = val;
+ break;
+ default:
+ dev_err(&i2c_client->dev,
+ "Wrong cirrus,boost-manager DT value %d\n", val);
+ pdata->boost_mng = CS35L32_BOOST_MGR_BYPASS;
+ }
+
+ of_property_read_u32(np, "cirrus,sdout-datacfg", &val);
+ switch (val) {
+ case CS35L32_DATA_CFG_LR_VP:
+ case CS35L32_DATA_CFG_LR_STAT:
+ case CS35L32_DATA_CFG_LR:
+ case CS35L32_DATA_CFG_LR_VPSTAT:
+ pdata->sdout_datacfg = val;
+ break;
+ default:
+ dev_err(&i2c_client->dev,
+ "Wrong cirrus,sdout-datacfg DT value %d\n", val);
+ pdata->sdout_datacfg = CS35L32_DATA_CFG_LR;
+ }
+
+ of_property_read_u32(np, "cirrus,battery-threshold", &val);
+ switch (val) {
+ case CS35L32_BATT_THRESH_3_1V:
+ case CS35L32_BATT_THRESH_3_2V:
+ case CS35L32_BATT_THRESH_3_3V:
+ case CS35L32_BATT_THRESH_3_4V:
+ pdata->batt_thresh = val;
+ break;
+ default:
+ dev_err(&i2c_client->dev,
+ "Wrong cirrus,battery-threshold DT value %d\n", val);
+ pdata->batt_thresh = CS35L32_BATT_THRESH_3_3V;
+ }
+
+ of_property_read_u32(np, "cirrus,battery-recovery", &val);
+ switch (val) {
+ case CS35L32_BATT_RECOV_3_1V:
+ case CS35L32_BATT_RECOV_3_2V:
+ case CS35L32_BATT_RECOV_3_3V:
+ case CS35L32_BATT_RECOV_3_4V:
+ case CS35L32_BATT_RECOV_3_5V:
+ case CS35L32_BATT_RECOV_3_6V:
+ pdata->batt_recov = val;
+ break;
+ default:
+ dev_err(&i2c_client->dev,
+ "Wrong cirrus,battery-recovery DT value %d\n", val);
+ pdata->batt_recov = CS35L32_BATT_RECOV_3_4V;
+ }
+
+ return 0;
+}
+
+static int cs35l32_i2c_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ struct cs35l32_private *cs35l32;
+ struct cs35l32_platform_data *pdata =
+ dev_get_platdata(&i2c_client->dev);
+ int ret, i;
+ unsigned int devid = 0;
+ unsigned int reg;
+
+
+ cs35l32 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs35l32_private),
+ GFP_KERNEL);
+ if (!cs35l32) {
+ dev_err(&i2c_client->dev, "could not allocate codec\n");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(i2c_client, cs35l32);
+
+ cs35l32->regmap = devm_regmap_init_i2c(i2c_client, &cs35l32_regmap);
+ if (IS_ERR(cs35l32->regmap)) {
+ ret = PTR_ERR(cs35l32->regmap);
+ dev_err(&i2c_client->dev, "regmap_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ if (pdata) {
+ cs35l32->pdata = *pdata;
+ } else {
+ pdata = devm_kzalloc(&i2c_client->dev,
+ sizeof(struct cs35l32_platform_data),
+ GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&i2c_client->dev, "could not allocate pdata\n");
+ return -ENOMEM;
+ }
+ if (i2c_client->dev.of_node) {
+ ret = cs35l32_handle_of_data(i2c_client,
+ &cs35l32->pdata);
+ if (ret != 0)
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cs35l32->supplies); i++)
+ cs35l32->supplies[i].supply = cs35l32_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&i2c_client->dev,
+ ARRAY_SIZE(cs35l32->supplies),
+ cs35l32->supplies);
+ if (ret != 0) {
+ dev_err(&i2c_client->dev,
+ "Failed to request supplies: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(cs35l32->supplies),
+ cs35l32->supplies);
+ if (ret != 0) {
+ dev_err(&i2c_client->dev,
+ "Failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ /* Reset the Device */
+ cs35l32->reset_gpio = devm_gpiod_get(&i2c_client->dev,
+ "reset-gpios");
+ if (IS_ERR(cs35l32->reset_gpio)) {
+ ret = PTR_ERR(cs35l32->reset_gpio);
+ if (ret != -ENOENT && ret != -ENOSYS)
+ return ret;
+
+ cs35l32->reset_gpio = NULL;
+ } else {
+ ret = gpiod_direction_output(cs35l32->reset_gpio, 0);
+ if (ret)
+ return ret;
+ gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
+ }
+
+ /* initialize codec */
+ ret = regmap_read(cs35l32->regmap, CS35L32_DEVID_AB, &reg);
+ devid = (reg & 0xFF) << 12;
+
+ ret = regmap_read(cs35l32->regmap, CS35L32_DEVID_CD, &reg);
+ devid |= (reg & 0xFF) << 4;
+
+ ret = regmap_read(cs35l32->regmap, CS35L32_DEVID_E, &reg);
+ devid |= (reg & 0xF0) >> 4;
+
+ if (devid != CS35L32_CHIP_ID) {
+ ret = -ENODEV;
+ dev_err(&i2c_client->dev,
+ "CS35L32 Device ID (%X). Expected %X\n",
+ devid, CS35L32_CHIP_ID);
+ return ret;
+ }
+
+ ret = regmap_read(cs35l32->regmap, CS35L32_REV_ID, &reg);
+ if (ret < 0) {
+ dev_err(&i2c_client->dev, "Get Revision ID failed\n");
+ return ret;
+ }
+
+ ret = regmap_register_patch(cs35l32->regmap, cs35l32_monitor_patch,
+ ARRAY_SIZE(cs35l32_monitor_patch));
+ if (ret < 0) {
+ dev_err(&i2c_client->dev, "Failed to apply errata patch\n");
+ return ret;
+ }
+
+ dev_info(&i2c_client->dev,
+ "Cirrus Logic CS35L32, Revision: %02X\n", reg & 0xFF);
+
+ /* Setup VBOOST Management */
+ if (cs35l32->pdata.boost_mng)
+ regmap_update_bits(cs35l32->regmap, CS35L32_AUDIO_LED_MNGR,
+ CS35L32_BOOST_MASK,
+ cs35l32->pdata.boost_mng);
+
+ /* Setup ADSP Format Config */
+ if (cs35l32->pdata.sdout_share)
+ regmap_update_bits(cs35l32->regmap, CS35L32_ADSP_CTL,
+ CS35L32_ADSP_SHARE_MASK,
+ cs35l32->pdata.sdout_share << 3);
+
+ /* Setup ADSP Data Configuration */
+ if (cs35l32->pdata.sdout_datacfg)
+ regmap_update_bits(cs35l32->regmap, CS35L32_ADSP_CTL,
+ CS35L32_ADSP_DATACFG_MASK,
+ cs35l32->pdata.sdout_datacfg << 4);
+
+ /* Setup Low Battery Recovery */
+ if (cs35l32->pdata.batt_recov)
+ regmap_update_bits(cs35l32->regmap, CS35L32_BATT_THRESHOLD,
+ CS35L32_BATT_REC_MASK,
+ cs35l32->pdata.batt_recov << 1);
+
+ /* Setup Low Battery Threshold */
+ if (cs35l32->pdata.batt_thresh)
+ regmap_update_bits(cs35l32->regmap, CS35L32_BATT_THRESHOLD,
+ CS35L32_BATT_THRESH_MASK,
+ cs35l32->pdata.batt_thresh << 4);
+
+ /* Power down the AMP */
+ regmap_update_bits(cs35l32->regmap, CS35L32_PWRCTL1, CS35L32_PDN_AMP,
+ CS35L32_PDN_AMP);
+
+ /* Clear MCLK Error Bit since we don't have the clock yet */
+ ret = regmap_read(cs35l32->regmap, CS35L32_INT_STATUS_1, &reg);
+
+ ret = snd_soc_register_codec(&i2c_client->dev,
+ &soc_codec_dev_cs35l32, cs35l32_dai,
+ ARRAY_SIZE(cs35l32_dai));
+ if (ret < 0)
+ goto err_disable;
+
+ return 0;
+
+err_disable:
+ regulator_bulk_disable(ARRAY_SIZE(cs35l32->supplies),
+ cs35l32->supplies);
+ return ret;
+}
+
+static int cs35l32_i2c_remove(struct i2c_client *i2c_client)
+{
+ struct cs35l32_private *cs35l32 = i2c_get_clientdata(i2c_client);
+
+ snd_soc_unregister_codec(&i2c_client->dev);
+
+ /* Hold down reset */
+ if (cs35l32->reset_gpio)
+ gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int cs35l32_runtime_suspend(struct device *dev)
+{
+ struct cs35l32_private *cs35l32 = dev_get_drvdata(dev);
+
+ regcache_cache_only(cs35l32->regmap, true);
+ regcache_mark_dirty(cs35l32->regmap);
+
+ /* Hold down reset */
+ if (cs35l32->reset_gpio)
+ gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
+
+ /* remove power */
+ regulator_bulk_disable(ARRAY_SIZE(cs35l32->supplies),
+ cs35l32->supplies);
+
+ return 0;
+}
+
+static int cs35l32_runtime_resume(struct device *dev)
+{
+ struct cs35l32_private *cs35l32 = dev_get_drvdata(dev);
+ int ret;
+
+ /* Enable power */
+ ret = regulator_bulk_enable(ARRAY_SIZE(cs35l32->supplies),
+ cs35l32->supplies);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (cs35l32->reset_gpio)
+ gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
+
+ regcache_cache_only(cs35l32->regmap, false);
+ regcache_sync(cs35l32->regmap);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops cs35l32_runtime_pm = {
+ SET_RUNTIME_PM_OPS(cs35l32_runtime_suspend, cs35l32_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id cs35l32_of_match[] = {
+ { .compatible = "cirrus,cs35l32", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cs35l32_of_match);
+
+
+static const struct i2c_device_id cs35l32_id[] = {
+ {"cs35l32", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, cs35l32_id);
+
+static struct i2c_driver cs35l32_i2c_driver = {
+ .driver = {
+ .name = "cs35l32",
+ .owner = THIS_MODULE,
+ .pm = &cs35l32_runtime_pm,
+ .of_match_table = cs35l32_of_match,
+ },
+ .id_table = cs35l32_id,
+ .probe = cs35l32_i2c_probe,
+ .remove = cs35l32_i2c_remove,
+};
+
+module_i2c_driver(cs35l32_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC CS35L32 driver");
+MODULE_AUTHOR("Brian Austin, Cirrus Logic Inc, <brian.austin@cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs35l32.h b/sound/soc/codecs/cs35l32.h
new file mode 100644
index 000000000000..31ab804a22bc
--- /dev/null
+++ b/sound/soc/codecs/cs35l32.h
@@ -0,0 +1,93 @@
+/*
+ * cs35l32.h -- CS35L32 ALSA SoC audio driver
+ *
+ * Copyright 2014 CirrusLogic, Inc.
+ *
+ * Author: Brian Austin <brian.austin@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __CS35L32_H__
+#define __CS35L32_H__
+
+struct cs35l32_platform_data {
+ /* Low Battery Threshold */
+ unsigned int batt_thresh;
+ /* Low Battery Recovery */
+ unsigned int batt_recov;
+ /* LED Current Management*/
+ unsigned int led_mng;
+ /* Audio Gain w/ LED */
+ unsigned int audiogain_mng;
+ /* Boost Management */
+ unsigned int boost_mng;
+ /* Data CFG for DUAL device */
+ unsigned int sdout_datacfg;
+ /* SDOUT Sharing */
+ unsigned int sdout_share;
+};
+
+#define CS35L32_CHIP_ID 0x00035A32
+#define CS35L32_DEVID_AB 0x01 /* Device ID A & B [RO] */
+#define CS35L32_DEVID_CD 0x02 /* Device ID C & D [RO] */
+#define CS35L32_DEVID_E 0x03 /* Device ID E [RO] */
+#define CS35L32_FAB_ID 0x04 /* Fab ID [RO] */
+#define CS35L32_REV_ID 0x05 /* Revision ID [RO] */
+#define CS35L32_PWRCTL1 0x06 /* Power Ctl 1 */
+#define CS35L32_PWRCTL2 0x07 /* Power Ctl 2 */
+#define CS35L32_CLK_CTL 0x08 /* Clock Ctl */
+#define CS35L32_BATT_THRESHOLD 0x09 /* Low Battery Threshold */
+#define CS35L32_VMON 0x0A /* Voltage Monitor [RO] */
+#define CS35L32_BST_CPCP_CTL 0x0B /* Conv Peak Curr Protection CTL */
+#define CS35L32_IMON_SCALING 0x0C /* IMON Scaling */
+#define CS35L32_AUDIO_LED_MNGR 0x0D /* Audio/LED Pwr Manager */
+#define CS35L32_ADSP_CTL 0x0F /* Serial Port Control */
+#define CS35L32_CLASSD_CTL 0x10 /* Class D Amp CTL */
+#define CS35L32_PROTECT_CTL 0x11 /* Protection Release CTL */
+#define CS35L32_INT_MASK_1 0x12 /* Interrupt Mask 1 */
+#define CS35L32_INT_MASK_2 0x13 /* Interrupt Mask 2 */
+#define CS35L32_INT_MASK_3 0x14 /* Interrupt Mask 3 */
+#define CS35L32_INT_STATUS_1 0x15 /* Interrupt Status 1 [RO] */
+#define CS35L32_INT_STATUS_2 0x16 /* Interrupt Status 2 [RO] */
+#define CS35L32_INT_STATUS_3 0x17 /* Interrupt Status 3 [RO] */
+#define CS35L32_LED_STATUS 0x18 /* LED Lighting Status [RO] */
+#define CS35L32_FLASH_MODE 0x19 /* LED Flash Mode Current */
+#define CS35L32_MOVIE_MODE 0x1A /* LED Movie Mode Current */
+#define CS35L32_FLASH_TIMER 0x1B /* LED Flash Timer */
+#define CS35L32_FLASH_INHIBIT 0x1C /* LED Flash Inhibit Current */
+#define CS35L32_MAX_REGISTER 0x1C
+
+#define CS35L32_MCLK_DIV2 0x01
+#define CS35L32_MCLK_RATIO 0x01
+#define CS35L32_MCLKDIS 0x80
+#define CS35L32_PDN_ALL 0x01
+#define CS35L32_PDN_AMP 0x80
+#define CS35L32_PDN_BOOST 0x04
+#define CS35L32_PDN_IMON 0x40
+#define CS35L32_PDN_VMON 0x80
+#define CS35L32_PDN_VPMON 0x20
+#define CS35L32_PDN_ADSP 0x08
+
+#define CS35L32_MCLK_DIV2_MASK 0x40
+#define CS35L32_MCLK_RATIO_MASK 0x01
+#define CS35L32_MCLK_MASK 0x41
+#define CS35L32_ADSP_MASTER_MASK 0x40
+#define CS35L32_BOOST_MASK 0x03
+#define CS35L32_GAIN_MGR_MASK 0x08
+#define CS35L32_ADSP_SHARE_MASK 0x08
+#define CS35L32_ADSP_DATACFG_MASK 0x30
+#define CS35L32_SDOUT_3ST 0x80
+#define CS35L32_BATT_REC_MASK 0x0E
+#define CS35L32_BATT_THRESH_MASK 0x30
+
+#define CS35L32_RATES (SNDRV_PCM_RATE_48000)
+#define CS35L32_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+
+#endif
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 69a85164357c..4fdd47d700e3 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -77,6 +77,7 @@ static bool cs4265_readable_register(struct device *dev, unsigned int reg)
case CS4265_INT_MASK:
case CS4265_STATUS_MODE_MSB:
case CS4265_STATUS_MODE_LSB:
+ case CS4265_CHIP_ID:
return true;
default:
return false;
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 969167d8b71e..35fbef743fbe 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -176,9 +176,9 @@ static bool cs42l52_volatile_register(struct device *dev, unsigned int reg)
case CS42L52_BATT_LEVEL:
case CS42L52_SPK_STATUS:
case CS42L52_CHARGE_PUMP:
- return 1;
+ return true;
default:
- return 0;
+ return false;
}
}
@@ -946,20 +946,6 @@ static struct snd_soc_dai_driver cs42l52_dai = {
.ops = &cs42l52_ops,
};
-static int cs42l52_suspend(struct snd_soc_codec *codec)
-{
- cs42l52_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int cs42l52_resume(struct snd_soc_codec *codec)
-{
- cs42l52_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
static int beep_rates[] = {
261, 522, 585, 667, 706, 774, 889, 1000,
1043, 1200, 1333, 1412, 1600, 1714, 2000, 2182
@@ -1104,8 +1090,6 @@ static int cs42l52_probe(struct snd_soc_codec *codec)
cs42l52_init_beep(codec);
- cs42l52_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
cs42l52->sysclk = CS42L52_DEFAULT_CLK;
cs42l52->config.format = CS42L52_DEFAULT_FORMAT;
@@ -1115,7 +1099,6 @@ static int cs42l52_probe(struct snd_soc_codec *codec)
static int cs42l52_remove(struct snd_soc_codec *codec)
{
cs42l52_free_beep(codec);
- cs42l52_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
@@ -1123,9 +1106,8 @@ static int cs42l52_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_cs42l52 = {
.probe = cs42l52_probe,
.remove = cs42l52_remove,
- .suspend = cs42l52_suspend,
- .resume = cs42l52_resume,
.set_bias_level = cs42l52_set_bias_level,
+ .suspend_bias_off = true,
.dapm_widgets = cs42l52_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(cs42l52_dapm_widgets),
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index c766a5a9ce80..2ddc7ac10ad7 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -171,9 +171,9 @@ static bool cs42l56_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case CS42L56_INT_STATUS:
- return 1;
+ return true;
default:
- return 0;
+ return false;
}
}
@@ -1016,20 +1016,6 @@ static struct snd_soc_dai_driver cs42l56_dai = {
.ops = &cs42l56_ops,
};
-static int cs42l56_suspend(struct snd_soc_codec *codec)
-{
- cs42l56_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int cs42l56_resume(struct snd_soc_codec *codec)
-{
- cs42l56_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
static int beep_freq[] = {
261, 522, 585, 667, 706, 774, 889, 1000,
1043, 1200, 1333, 1412, 1600, 1714, 2000, 2182
@@ -1168,18 +1154,12 @@ static int cs42l56_probe(struct snd_soc_codec *codec)
{
cs42l56_init_beep(codec);
- cs42l56_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
return 0;
}
static int cs42l56_remove(struct snd_soc_codec *codec)
{
- struct cs42l56_private *cs42l56 = snd_soc_codec_get_drvdata(codec);
-
cs42l56_free_beep(codec);
- cs42l56_set_bias_level(codec, SND_SOC_BIAS_OFF);
- regulator_bulk_free(ARRAY_SIZE(cs42l56->supplies), cs42l56->supplies);
return 0;
}
@@ -1187,9 +1167,8 @@ static int cs42l56_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_cs42l56 = {
.probe = cs42l56_probe,
.remove = cs42l56_remove,
- .suspend = cs42l56_suspend,
- .resume = cs42l56_resume,
.set_bias_level = cs42l56_set_bias_level,
+ .suspend_bias_off = true,
.dapm_widgets = cs42l56_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(cs42l56_dapm_widgets),
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 0e7b9eb2ba61..2f8b94683e83 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -1330,25 +1330,10 @@ static struct snd_soc_dai_driver cs42l73_dai[] = {
}
};
-static int cs42l73_suspend(struct snd_soc_codec *codec)
-{
- cs42l73_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int cs42l73_resume(struct snd_soc_codec *codec)
-{
- cs42l73_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
static int cs42l73_probe(struct snd_soc_codec *codec)
{
struct cs42l73_private *cs42l73 = snd_soc_codec_get_drvdata(codec);
- cs42l73_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
/* Set Charge Pump Frequency */
if (cs42l73->pdata.chgfreq)
snd_soc_update_bits(codec, CS42L73_CPFCHC,
@@ -1362,18 +1347,10 @@ static int cs42l73_probe(struct snd_soc_codec *codec)
return 0;
}
-static int cs42l73_remove(struct snd_soc_codec *codec)
-{
- cs42l73_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_cs42l73 = {
.probe = cs42l73_probe,
- .remove = cs42l73_remove,
- .suspend = cs42l73_suspend,
- .resume = cs42l73_resume,
.set_bias_level = cs42l73_set_bias_level,
+ .suspend_bias_off = true,
.dapm_widgets = cs42l73_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(cs42l73_dapm_widgets),
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
index 2fae31cb0067..61b2f9a2eef1 100644
--- a/sound/soc/codecs/da732x.c
+++ b/sound/soc/codecs/da732x.c
@@ -35,7 +35,6 @@
struct da732x_priv {
struct regmap *regmap;
- struct snd_soc_codec *codec;
unsigned int sysclk;
bool pll_en;
@@ -217,7 +216,7 @@ static void da732x_set_charge_pump(struct snd_soc_codec *codec, int state)
snd_soc_write(codec, DA732X_REG_CP_CTRL1, DA723X_CP_DIS);
break;
default:
- pr_err(KERN_ERR "Wrong charge pump state\n");
+ pr_err("Wrong charge pump state\n");
break;
}
}
@@ -1508,31 +1507,7 @@ static int da732x_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
-static int da732x_probe(struct snd_soc_codec *codec)
-{
- struct da732x_priv *da732x = snd_soc_codec_get_drvdata(codec);
- struct snd_soc_dapm_context *dapm = &codec->dapm;
-
- da732x->codec = codec;
-
- dapm->idle_bias_off = false;
-
- da732x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
-static int da732x_remove(struct snd_soc_codec *codec)
-{
-
- da732x_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
static struct snd_soc_codec_driver soc_codec_dev_da732x = {
- .probe = da732x_probe,
- .remove = da732x_remove,
.set_bias_level = da732x_set_bias_level,
.controls = da732x_snd_controls,
.num_controls = ARRAY_SIZE(da732x_snd_controls),
diff --git a/sound/soc/codecs/es8328-i2c.c b/sound/soc/codecs/es8328-i2c.c
new file mode 100644
index 000000000000..aae410d122ee
--- /dev/null
+++ b/sound/soc/codecs/es8328-i2c.c
@@ -0,0 +1,60 @@
+/*
+ * es8328-i2c.c -- ES8328 ALSA SoC I2C Audio driver
+ *
+ * Copyright 2014 Sutajio Ko-Usagi PTE LTD
+ *
+ * Author: Sean Cross <xobs@kosagi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#include <sound/soc.h>
+
+#include "es8328.h"
+
+static const struct i2c_device_id es8328_id[] = {
+ { "everest,es8328", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, es8328_id);
+
+static const struct of_device_id es8328_of_match[] = {
+ { .compatible = "everest,es8328", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, es8328_of_match);
+
+static int es8328_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ return es8328_probe(&i2c->dev,
+ devm_regmap_init_i2c(i2c, &es8328_regmap_config));
+}
+
+static int es8328_i2c_remove(struct i2c_client *i2c)
+{
+ snd_soc_unregister_codec(&i2c->dev);
+ return 0;
+}
+
+static struct i2c_driver es8328_i2c_driver = {
+ .driver = {
+ .name = "es8328",
+ .of_match_table = es8328_of_match,
+ },
+ .probe = es8328_i2c_probe,
+ .remove = es8328_i2c_remove,
+ .id_table = es8328_id,
+};
+
+module_i2c_driver(es8328_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC ES8328 audio CODEC I2C driver");
+MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/es8328-spi.c b/sound/soc/codecs/es8328-spi.c
new file mode 100644
index 000000000000..8fbd935e1c76
--- /dev/null
+++ b/sound/soc/codecs/es8328-spi.c
@@ -0,0 +1,49 @@
+/*
+ * es8328.c -- ES8328 ALSA SoC SPI Audio driver
+ *
+ * Copyright 2014 Sutajio Ko-Usagi PTE LTD
+ *
+ * Author: Sean Cross <xobs@kosagi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <sound/soc.h>
+#include "es8328.h"
+
+static const struct of_device_id es8328_of_match[] = {
+ { .compatible = "everest,es8328", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, es8328_of_match);
+
+static int es8328_spi_probe(struct spi_device *spi)
+{
+ return es8328_probe(&spi->dev,
+ devm_regmap_init_spi(spi, &es8328_regmap_config));
+}
+
+static int es8328_spi_remove(struct spi_device *spi)
+{
+ snd_soc_unregister_codec(&spi->dev);
+ return 0;
+}
+
+static struct spi_driver es8328_spi_driver = {
+ .driver = {
+ .name = "es8328",
+ .of_match_table = es8328_of_match,
+ },
+ .probe = es8328_spi_probe,
+ .remove = es8328_spi_remove,
+};
+
+module_spi_driver(es8328_spi_driver);
+MODULE_DESCRIPTION("ASoC ES8328 audio CODEC SPI driver");
+MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
new file mode 100644
index 000000000000..f27325155ace
--- /dev/null
+++ b/sound/soc/codecs/es8328.c
@@ -0,0 +1,756 @@
+/*
+ * es8328.c -- ES8328 ALSA SoC Audio driver
+ *
+ * Copyright 2014 Sutajio Ko-Usagi PTE LTD
+ *
+ * Author: Sean Cross <xobs@kosagi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include "es8328.h"
+
+#define ES8328_SYSCLK_RATE_1X 11289600
+#define ES8328_SYSCLK_RATE_2X 22579200
+
+/* Run the codec at 22.5792 or 11.2896 MHz to support these rates */
+static struct {
+ int rate;
+ u8 ratio;
+} mclk_ratios[] = {
+ { 8000, 9 },
+ {11025, 7 },
+ {22050, 4 },
+ {44100, 2 },
+};
+
+/* regulator supplies for sgtl5000, VDDD is an optional external supply */
+enum sgtl5000_regulator_supplies {
+ DVDD,
+ AVDD,
+ PVDD,
+ HPVDD,
+ ES8328_SUPPLY_NUM
+};
+
+/* vddd is optional supply */
+static const char * const supply_names[ES8328_SUPPLY_NUM] = {
+ "DVDD",
+ "AVDD",
+ "PVDD",
+ "HPVDD",
+};
+
+#define ES8328_RATES (SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_22050 | \
+ SNDRV_PCM_RATE_11025)
+#define ES8328_FORMATS (SNDRV_PCM_FMTBIT_S16_LE)
+
+struct es8328_priv {
+ struct regmap *regmap;
+ struct clk *clk;
+ int playback_fs;
+ bool deemph;
+ struct regulator_bulk_data supplies[ES8328_SUPPLY_NUM];
+};
+
+/*
+ * ES8328 Controls
+ */
+
+static const char * const adcpol_txt[] = {"Normal", "L Invert", "R Invert",
+ "L + R Invert"};
+static SOC_ENUM_SINGLE_DECL(adcpol,
+ ES8328_ADCCONTROL6, 6, adcpol_txt);
+
+static const DECLARE_TLV_DB_SCALE(play_tlv, -3000, 100, 0);
+static const DECLARE_TLV_DB_SCALE(dac_adc_tlv, -9600, 50, 0);
+static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 300, 0);
+static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
+static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 300, 0);
+
+static const int deemph_settings[] = { 0, 32000, 44100, 48000 };
+
+static int es8328_set_deemph(struct snd_soc_codec *codec)
+{
+ struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
+ int val, i, best;
+
+ /*
+ * If we're using deemphasis select the nearest available sample
+ * rate.
+ */
+ if (es8328->deemph) {
+ best = 1;
+ for (i = 2; i < ARRAY_SIZE(deemph_settings); i++) {
+ if (abs(deemph_settings[i] - es8328->playback_fs) <
+ abs(deemph_settings[best] - es8328->playback_fs))
+ best = i;
+ }
+
+ val = best << 1;
+ } else {
+ val = 0;
+ }
+
+ dev_dbg(codec->dev, "Set deemphasis %d\n", val);
+
+ return snd_soc_update_bits(codec, ES8328_DACCONTROL6, 0x6, val);
+}
+
+static int es8328_get_deemph(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.enumerated.item[0] = es8328->deemph;
+ return 0;
+}
+
+static int es8328_put_deemph(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
+ int deemph = ucontrol->value.enumerated.item[0];
+ int ret;
+
+ if (deemph > 1)
+ return -EINVAL;
+
+ ret = es8328_set_deemph(codec);
+ if (ret < 0)
+ return ret;
+
+ es8328->deemph = deemph;
+
+ return 0;
+}
+
+
+
+static const struct snd_kcontrol_new es8328_snd_controls[] = {
+ SOC_DOUBLE_R_TLV("Capture Digital Volume",
+ ES8328_ADCCONTROL8, ES8328_ADCCONTROL9,
+ 0, 0xc0, 1, dac_adc_tlv),
+ SOC_SINGLE("Capture ZC Switch", ES8328_ADCCONTROL7, 6, 1, 0),
+
+ SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0,
+ es8328_get_deemph, es8328_put_deemph),
+
+ SOC_ENUM("Capture Polarity", adcpol),
+
+ SOC_SINGLE_TLV("Left Mixer Left Bypass Volume",
+ ES8328_DACCONTROL17, 3, 7, 1, bypass_tlv),
+ SOC_SINGLE_TLV("Left Mixer Right Bypass Volume",
+ ES8328_DACCONTROL19, 3, 7, 1, bypass_tlv),
+ SOC_SINGLE_TLV("Right Mixer Left Bypass Volume",
+ ES8328_DACCONTROL18, 3, 7, 1, bypass_tlv),
+ SOC_SINGLE_TLV("Right Mixer Right Bypass Volume",
+ ES8328_DACCONTROL20, 3, 7, 1, bypass_tlv),
+
+ SOC_DOUBLE_R_TLV("PCM Volume",
+ ES8328_LDACVOL, ES8328_RDACVOL,
+ 0, ES8328_DACVOL_MAX, 1, dac_adc_tlv),
+
+ SOC_DOUBLE_R_TLV("Output 1 Playback Volume",
+ ES8328_LOUT1VOL, ES8328_ROUT1VOL,
+ 0, ES8328_OUT1VOL_MAX, 0, play_tlv),
+
+ SOC_DOUBLE_R_TLV("Output 2 Playback Volume",
+ ES8328_LOUT2VOL, ES8328_ROUT2VOL,
+ 0, ES8328_OUT2VOL_MAX, 0, play_tlv),
+
+ SOC_DOUBLE_TLV("Mic PGA Volume", ES8328_ADCCONTROL1,
+ 4, 0, 8, 0, mic_tlv),
+};
+
+/*
+ * DAPM Controls
+ */
+
+static const char * const es8328_line_texts[] = {
+ "Line 1", "Line 2", "PGA", "Differential"};
+
+static const struct soc_enum es8328_lline_enum =
+ SOC_ENUM_SINGLE(ES8328_DACCONTROL16, 3,
+ ARRAY_SIZE(es8328_line_texts),
+ es8328_line_texts);
+static const struct snd_kcontrol_new es8328_left_line_controls =
+ SOC_DAPM_ENUM("Route", es8328_lline_enum);
+
+static const struct soc_enum es8328_rline_enum =
+ SOC_ENUM_SINGLE(ES8328_DACCONTROL16, 0,
+ ARRAY_SIZE(es8328_line_texts),
+ es8328_line_texts);
+static const struct snd_kcontrol_new es8328_right_line_controls =
+ SOC_DAPM_ENUM("Route", es8328_lline_enum);
+
+/* Left Mixer */
+static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
+ SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 8, 1, 0),
+ SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 7, 1, 0),
+ SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 8, 1, 0),
+ SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 7, 1, 0),
+};
+
+/* Right Mixer */
+static const struct snd_kcontrol_new es8328_right_mixer_controls[] = {
+ SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 8, 1, 0),
+ SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 7, 1, 0),
+ SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 8, 1, 0),
+ SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 7, 1, 0),
+};
+
+static const char * const es8328_pga_sel[] = {
+ "Line 1", "Line 2", "Line 3", "Differential"};
+
+/* Left PGA Mux */
+static const struct soc_enum es8328_lpga_enum =
+ SOC_ENUM_SINGLE(ES8328_ADCCONTROL2, 6,
+ ARRAY_SIZE(es8328_pga_sel),
+ es8328_pga_sel);
+static const struct snd_kcontrol_new es8328_left_pga_controls =
+ SOC_DAPM_ENUM("Route", es8328_lpga_enum);
+
+/* Right PGA Mux */
+static const struct soc_enum es8328_rpga_enum =
+ SOC_ENUM_SINGLE(ES8328_ADCCONTROL2, 4,
+ ARRAY_SIZE(es8328_pga_sel),
+ es8328_pga_sel);
+static const struct snd_kcontrol_new es8328_right_pga_controls =
+ SOC_DAPM_ENUM("Route", es8328_rpga_enum);
+
+/* Differential Mux */
+static const char * const es8328_diff_sel[] = {"Line 1", "Line 2"};
+static SOC_ENUM_SINGLE_DECL(diffmux,
+ ES8328_ADCCONTROL3, 7, es8328_diff_sel);
+static const struct snd_kcontrol_new es8328_diffmux_controls =
+ SOC_DAPM_ENUM("Route", diffmux);
+
+/* Mono ADC Mux */
+static const char * const es8328_mono_mux[] = {"Stereo", "Mono (Left)",
+ "Mono (Right)", "Digital Mono"};
+static SOC_ENUM_SINGLE_DECL(monomux,
+ ES8328_ADCCONTROL3, 3, es8328_mono_mux);
+static const struct snd_kcontrol_new es8328_monomux_controls =
+ SOC_DAPM_ENUM("Route", monomux);
+
+static const struct snd_soc_dapm_widget es8328_dapm_widgets[] = {
+ SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0,
+ &es8328_diffmux_controls),
+ SND_SOC_DAPM_MUX("Left ADC Mux", SND_SOC_NOPM, 0, 0,
+ &es8328_monomux_controls),
+ SND_SOC_DAPM_MUX("Right ADC Mux", SND_SOC_NOPM, 0, 0,
+ &es8328_monomux_controls),
+
+ SND_SOC_DAPM_MUX("Left PGA Mux", ES8328_ADCPOWER,
+ ES8328_ADCPOWER_AINL_OFF, 1,
+ &es8328_left_pga_controls),
+ SND_SOC_DAPM_MUX("Right PGA Mux", ES8328_ADCPOWER,
+ ES8328_ADCPOWER_AINR_OFF, 1,
+ &es8328_right_pga_controls),
+
+ SND_SOC_DAPM_MUX("Left Line Mux", SND_SOC_NOPM, 0, 0,
+ &es8328_left_line_controls),
+ SND_SOC_DAPM_MUX("Right Line Mux", SND_SOC_NOPM, 0, 0,
+ &es8328_right_line_controls),
+
+ SND_SOC_DAPM_ADC("Right ADC", "Right Capture", ES8328_ADCPOWER,
+ ES8328_ADCPOWER_ADCR_OFF, 1),
+ SND_SOC_DAPM_ADC("Left ADC", "Left Capture", ES8328_ADCPOWER,
+ ES8328_ADCPOWER_ADCL_OFF, 1),
+
+ SND_SOC_DAPM_SUPPLY("Mic Bias", ES8328_ADCPOWER,
+ ES8328_ADCPOWER_MIC_BIAS_OFF, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Mic Bias Gen", ES8328_ADCPOWER,
+ ES8328_ADCPOWER_ADC_BIAS_GEN_OFF, 1, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("DAC STM", ES8328_CHIPPOWER,
+ ES8328_CHIPPOWER_DACSTM_RESET, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC STM", ES8328_CHIPPOWER,
+ ES8328_CHIPPOWER_ADCSTM_RESET, 1, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("DAC DIG", ES8328_CHIPPOWER,
+ ES8328_CHIPPOWER_DACDIG_OFF, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC DIG", ES8328_CHIPPOWER,
+ ES8328_CHIPPOWER_ADCDIG_OFF, 1, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("DAC DLL", ES8328_CHIPPOWER,
+ ES8328_CHIPPOWER_DACDLL_OFF, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC DLL", ES8328_CHIPPOWER,
+ ES8328_CHIPPOWER_ADCDLL_OFF, 1, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("ADC Vref", ES8328_CHIPPOWER,
+ ES8328_CHIPPOWER_ADCVREF_OFF, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DAC Vref", ES8328_CHIPPOWER,
+ ES8328_CHIPPOWER_DACVREF_OFF, 1, NULL, 0),
+
+ SND_SOC_DAPM_DAC("Right DAC", "Right Playback", ES8328_DACPOWER,
+ ES8328_DACPOWER_RDAC_OFF, 1),
+ SND_SOC_DAPM_DAC("Left DAC", "Left Playback", ES8328_DACPOWER,
+ ES8328_DACPOWER_LDAC_OFF, 1),
+
+ SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0,
+ &es8328_left_mixer_controls[0],
+ ARRAY_SIZE(es8328_left_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0,
+ &es8328_right_mixer_controls[0],
+ ARRAY_SIZE(es8328_right_mixer_controls)),
+
+ SND_SOC_DAPM_PGA("Right Out 2", ES8328_DACPOWER,
+ ES8328_DACPOWER_ROUT2_ON, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Left Out 2", ES8328_DACPOWER,
+ ES8328_DACPOWER_LOUT2_ON, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Right Out 1", ES8328_DACPOWER,
+ ES8328_DACPOWER_ROUT1_ON, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Left Out 1", ES8328_DACPOWER,
+ ES8328_DACPOWER_LOUT1_ON, 0, NULL, 0),
+
+ SND_SOC_DAPM_OUTPUT("LOUT1"),
+ SND_SOC_DAPM_OUTPUT("ROUT1"),
+ SND_SOC_DAPM_OUTPUT("LOUT2"),
+ SND_SOC_DAPM_OUTPUT("ROUT2"),
+
+ SND_SOC_DAPM_INPUT("LINPUT1"),
+ SND_SOC_DAPM_INPUT("LINPUT2"),
+ SND_SOC_DAPM_INPUT("RINPUT1"),
+ SND_SOC_DAPM_INPUT("RINPUT2"),
+};
+
+static const struct snd_soc_dapm_route es8328_dapm_routes[] = {
+
+ { "Left Line Mux", "Line 1", "LINPUT1" },
+ { "Left Line Mux", "Line 2", "LINPUT2" },
+ { "Left Line Mux", "PGA", "Left PGA Mux" },
+ { "Left Line Mux", "Differential", "Differential Mux" },
+
+ { "Right Line Mux", "Line 1", "RINPUT1" },
+ { "Right Line Mux", "Line 2", "RINPUT2" },
+ { "Right Line Mux", "PGA", "Right PGA Mux" },
+ { "Right Line Mux", "Differential", "Differential Mux" },
+
+ { "Left PGA Mux", "Line 1", "LINPUT1" },
+ { "Left PGA Mux", "Line 2", "LINPUT2" },
+ { "Left PGA Mux", "Differential", "Differential Mux" },
+
+ { "Right PGA Mux", "Line 1", "RINPUT1" },
+ { "Right PGA Mux", "Line 2", "RINPUT2" },
+ { "Right PGA Mux", "Differential", "Differential Mux" },
+
+ { "Differential Mux", "Line 1", "LINPUT1" },
+ { "Differential Mux", "Line 1", "RINPUT1" },
+ { "Differential Mux", "Line 2", "LINPUT2" },
+ { "Differential Mux", "Line 2", "RINPUT2" },
+
+ { "Left ADC Mux", "Stereo", "Left PGA Mux" },
+ { "Left ADC Mux", "Mono (Left)", "Left PGA Mux" },
+ { "Left ADC Mux", "Digital Mono", "Left PGA Mux" },
+
+ { "Right ADC Mux", "Stereo", "Right PGA Mux" },
+ { "Right ADC Mux", "Mono (Right)", "Right PGA Mux" },
+ { "Right ADC Mux", "Digital Mono", "Right PGA Mux" },
+
+ { "Left ADC", NULL, "Left ADC Mux" },
+ { "Right ADC", NULL, "Right ADC Mux" },
+
+ { "ADC DIG", NULL, "ADC STM" },
+ { "ADC DIG", NULL, "ADC Vref" },
+ { "ADC DIG", NULL, "ADC DLL" },
+
+ { "Left ADC", NULL, "ADC DIG" },
+ { "Right ADC", NULL, "ADC DIG" },
+
+ { "Mic Bias", NULL, "Mic Bias Gen" },
+
+ { "Left Line Mux", "Line 1", "LINPUT1" },
+ { "Left Line Mux", "Line 2", "LINPUT2" },
+ { "Left Line Mux", "PGA", "Left PGA Mux" },
+ { "Left Line Mux", "Differential", "Differential Mux" },
+
+ { "Right Line Mux", "Line 1", "RINPUT1" },
+ { "Right Line Mux", "Line 2", "RINPUT2" },
+ { "Right Line Mux", "PGA", "Right PGA Mux" },
+ { "Right Line Mux", "Differential", "Differential Mux" },
+
+ { "Left Out 1", NULL, "Left DAC" },
+ { "Right Out 1", NULL, "Right DAC" },
+ { "Left Out 2", NULL, "Left DAC" },
+ { "Right Out 2", NULL, "Right DAC" },
+
+ { "Left Mixer", "Playback Switch", "Left DAC" },
+ { "Left Mixer", "Left Bypass Switch", "Left Line Mux" },
+ { "Left Mixer", "Right Playback Switch", "Right DAC" },
+ { "Left Mixer", "Right Bypass Switch", "Right Line Mux" },
+
+ { "Right Mixer", "Left Playback Switch", "Left DAC" },
+ { "Right Mixer", "Left Bypass Switch", "Left Line Mux" },
+ { "Right Mixer", "Playback Switch", "Right DAC" },
+ { "Right Mixer", "Right Bypass Switch", "Right Line Mux" },
+
+ { "DAC DIG", NULL, "DAC STM" },
+ { "DAC DIG", NULL, "DAC Vref" },
+ { "DAC DIG", NULL, "DAC DLL" },
+
+ { "Left DAC", NULL, "DAC DIG" },
+ { "Right DAC", NULL, "DAC DIG" },
+
+ { "Left Out 1", NULL, "Left Mixer" },
+ { "LOUT1", NULL, "Left Out 1" },
+ { "Right Out 1", NULL, "Right Mixer" },
+ { "ROUT1", NULL, "Right Out 1" },
+
+ { "Left Out 2", NULL, "Left Mixer" },
+ { "LOUT2", NULL, "Left Out 2" },
+ { "Right Out 2", NULL, "Right Mixer" },
+ { "ROUT2", NULL, "Right Out 2" },
+};
+
+static int es8328_mute(struct snd_soc_dai *dai, int mute)
+{
+ return snd_soc_update_bits(dai->codec, ES8328_DACCONTROL3,
+ ES8328_DACCONTROL3_DACMUTE,
+ mute ? ES8328_DACCONTROL3_DACMUTE : 0);
+}
+
+static int es8328_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
+ int clk_rate;
+ int i;
+ int reg;
+ u8 ratio;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ reg = ES8328_DACCONTROL2;
+ else
+ reg = ES8328_ADCCONTROL5;
+
+ clk_rate = clk_get_rate(es8328->clk);
+
+ if ((clk_rate != ES8328_SYSCLK_RATE_1X) &&
+ (clk_rate != ES8328_SYSCLK_RATE_2X)) {
+ dev_err(codec->dev,
+ "%s: clock is running at %d Hz, not %d or %d Hz\n",
+ __func__, clk_rate,
+ ES8328_SYSCLK_RATE_1X, ES8328_SYSCLK_RATE_2X);
+ return -EINVAL;
+ }
+
+ /* find master mode MCLK to sampling frequency ratio */
+ ratio = mclk_ratios[0].rate;
+ for (i = 1; i < ARRAY_SIZE(mclk_ratios); i++)
+ if (params_rate(params) <= mclk_ratios[i].rate)
+ ratio = mclk_ratios[i].ratio;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ es8328->playback_fs = params_rate(params);
+ es8328_set_deemph(codec);
+ }
+
+ return snd_soc_update_bits(codec, reg, ES8328_RATEMASK, ratio);
+}
+
+static int es8328_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
+ int clk_rate;
+ u8 mode = ES8328_DACCONTROL1_DACWL_16;
+
+ /* set master/slave audio interface */
+ if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBM_CFM)
+ return -EINVAL;
+
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ mode |= ES8328_DACCONTROL1_DACFORMAT_I2S;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ mode |= ES8328_DACCONTROL1_DACFORMAT_RJUST;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ mode |= ES8328_DACCONTROL1_DACFORMAT_LJUST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* clock inversion */
+ if ((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_NF)
+ return -EINVAL;
+
+ snd_soc_write(codec, ES8328_DACCONTROL1, mode);
+ snd_soc_write(codec, ES8328_ADCCONTROL4, mode);
+
+ /* Master serial port mode, with BCLK generated automatically */
+ clk_rate = clk_get_rate(es8328->clk);
+ if (clk_rate == ES8328_SYSCLK_RATE_1X)
+ snd_soc_write(codec, ES8328_MASTERMODE,
+ ES8328_MASTERMODE_MSC);
+ else
+ snd_soc_write(codec, ES8328_MASTERMODE,
+ ES8328_MASTERMODE_MCLKDIV2 |
+ ES8328_MASTERMODE_MSC);
+
+ return 0;
+}
+
+static int es8328_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+
+ case SND_SOC_BIAS_PREPARE:
+ /* VREF, VMID=2x50k, digital enabled */
+ snd_soc_write(codec, ES8328_CHIPPOWER, 0);
+ snd_soc_update_bits(codec, ES8328_CONTROL1,
+ ES8328_CONTROL1_VMIDSEL_MASK |
+ ES8328_CONTROL1_ENREF,
+ ES8328_CONTROL1_VMIDSEL_50k |
+ ES8328_CONTROL1_ENREF);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+ snd_soc_update_bits(codec, ES8328_CONTROL1,
+ ES8328_CONTROL1_VMIDSEL_MASK |
+ ES8328_CONTROL1_ENREF,
+ ES8328_CONTROL1_VMIDSEL_5k |
+ ES8328_CONTROL1_ENREF);
+
+ /* Charge caps */
+ msleep(100);
+ }
+
+ snd_soc_write(codec, ES8328_CONTROL2,
+ ES8328_CONTROL2_OVERCURRENT_ON |
+ ES8328_CONTROL2_THERMAL_SHUTDOWN_ON);
+
+ /* VREF, VMID=2*500k, digital stopped */
+ snd_soc_update_bits(codec, ES8328_CONTROL1,
+ ES8328_CONTROL1_VMIDSEL_MASK |
+ ES8328_CONTROL1_ENREF,
+ ES8328_CONTROL1_VMIDSEL_500k |
+ ES8328_CONTROL1_ENREF);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ snd_soc_update_bits(codec, ES8328_CONTROL1,
+ ES8328_CONTROL1_VMIDSEL_MASK |
+ ES8328_CONTROL1_ENREF,
+ 0);
+ break;
+ }
+ codec->dapm.bias_level = level;
+ return 0;
+}
+
+static const struct snd_soc_dai_ops es8328_dai_ops = {
+ .hw_params = es8328_hw_params,
+ .digital_mute = es8328_mute,
+ .set_fmt = es8328_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver es8328_dai = {
+ .name = "es8328-hifi-analog",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = ES8328_RATES,
+ .formats = ES8328_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = ES8328_RATES,
+ .formats = ES8328_FORMATS,
+ },
+ .ops = &es8328_dai_ops,
+};
+
+static int es8328_suspend(struct snd_soc_codec *codec)
+{
+ struct es8328_priv *es8328;
+ int ret;
+
+ es8328 = snd_soc_codec_get_drvdata(codec);
+
+ clk_disable_unprepare(es8328->clk);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(es8328->supplies),
+ es8328->supplies);
+ if (ret) {
+ dev_err(codec->dev, "unable to disable regulators\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int es8328_resume(struct snd_soc_codec *codec)
+{
+ struct regmap *regmap = dev_get_regmap(codec->dev, NULL);
+ struct es8328_priv *es8328;
+ int ret;
+
+ es8328 = snd_soc_codec_get_drvdata(codec);
+
+ ret = clk_prepare_enable(es8328->clk);
+ if (ret) {
+ dev_err(codec->dev, "unable to enable clock\n");
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(es8328->supplies),
+ es8328->supplies);
+ if (ret) {
+ dev_err(codec->dev, "unable to enable regulators\n");
+ return ret;
+ }
+
+ regcache_mark_dirty(regmap);
+ ret = regcache_sync(regmap);
+ if (ret) {
+ dev_err(codec->dev, "unable to sync regcache\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int es8328_codec_probe(struct snd_soc_codec *codec)
+{
+ struct es8328_priv *es8328;
+ int ret;
+
+ es8328 = snd_soc_codec_get_drvdata(codec);
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(es8328->supplies),
+ es8328->supplies);
+ if (ret) {
+ dev_err(codec->dev, "unable to enable regulators\n");
+ return ret;
+ }
+
+ /* Setup clocks */
+ es8328->clk = devm_clk_get(codec->dev, NULL);
+ if (IS_ERR(es8328->clk)) {
+ dev_err(codec->dev, "codec clock missing or invalid\n");
+ ret = PTR_ERR(es8328->clk);
+ goto clk_fail;
+ }
+
+ ret = clk_prepare_enable(es8328->clk);
+ if (ret) {
+ dev_err(codec->dev, "unable to prepare codec clk\n");
+ goto clk_fail;
+ }
+
+ return 0;
+
+clk_fail:
+ regulator_bulk_disable(ARRAY_SIZE(es8328->supplies),
+ es8328->supplies);
+ return ret;
+}
+
+static int es8328_remove(struct snd_soc_codec *codec)
+{
+ struct es8328_priv *es8328;
+
+ es8328 = snd_soc_codec_get_drvdata(codec);
+
+ if (es8328->clk)
+ clk_disable_unprepare(es8328->clk);
+
+ regulator_bulk_disable(ARRAY_SIZE(es8328->supplies),
+ es8328->supplies);
+
+ return 0;
+}
+
+const struct regmap_config es8328_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = ES8328_REG_MAX,
+ .cache_type = REGCACHE_RBTREE,
+};
+EXPORT_SYMBOL_GPL(es8328_regmap_config);
+
+static struct snd_soc_codec_driver es8328_codec_driver = {
+ .probe = es8328_codec_probe,
+ .suspend = es8328_suspend,
+ .resume = es8328_resume,
+ .remove = es8328_remove,
+ .set_bias_level = es8328_set_bias_level,
+ .suspend_bias_off = true,
+
+ .controls = es8328_snd_controls,
+ .num_controls = ARRAY_SIZE(es8328_snd_controls),
+ .dapm_widgets = es8328_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(es8328_dapm_widgets),
+ .dapm_routes = es8328_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(es8328_dapm_routes),
+};
+
+int es8328_probe(struct device *dev, struct regmap *regmap)
+{
+ struct es8328_priv *es8328;
+ int ret;
+ int i;
+
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ es8328 = devm_kzalloc(dev, sizeof(*es8328), GFP_KERNEL);
+ if (es8328 == NULL)
+ return -ENOMEM;
+
+ es8328->regmap = regmap;
+
+ for (i = 0; i < ARRAY_SIZE(es8328->supplies); i++)
+ es8328->supplies[i].supply = supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(es8328->supplies),
+ es8328->supplies);
+ if (ret) {
+ dev_err(dev, "unable to get regulators\n");
+ return ret;
+ }
+
+ dev_set_drvdata(dev, es8328);
+
+ return snd_soc_register_codec(dev,
+ &es8328_codec_driver, &es8328_dai, 1);
+}
+EXPORT_SYMBOL_GPL(es8328_probe);
+
+MODULE_DESCRIPTION("ASoC ES8328 driver");
+MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/es8328.h b/sound/soc/codecs/es8328.h
new file mode 100644
index 000000000000..cb36afe10c0e
--- /dev/null
+++ b/sound/soc/codecs/es8328.h
@@ -0,0 +1,314 @@
+/*
+ * es8328.h -- ES8328 ALSA SoC Audio driver
+ */
+
+#ifndef _ES8328_H
+#define _ES8328_H
+
+#include <linux/regmap.h>
+
+struct device;
+
+extern const struct regmap_config es8328_regmap_config;
+int es8328_probe(struct device *dev, struct regmap *regmap);
+
+#define ES8328_DACLVOL 46
+#define ES8328_DACRVOL 47
+#define ES8328_DACCTL 28
+#define ES8328_RATEMASK (0x1f << 0)
+
+#define ES8328_CONTROL1 0x00
+#define ES8328_CONTROL1_VMIDSEL_OFF (0 << 0)
+#define ES8328_CONTROL1_VMIDSEL_50k (1 << 0)
+#define ES8328_CONTROL1_VMIDSEL_500k (2 << 0)
+#define ES8328_CONTROL1_VMIDSEL_5k (3 << 0)
+#define ES8328_CONTROL1_VMIDSEL_MASK (7 << 0)
+#define ES8328_CONTROL1_ENREF (1 << 2)
+#define ES8328_CONTROL1_SEQEN (1 << 3)
+#define ES8328_CONTROL1_SAMEFS (1 << 4)
+#define ES8328_CONTROL1_DACMCLK_ADC (0 << 5)
+#define ES8328_CONTROL1_DACMCLK_DAC (1 << 5)
+#define ES8328_CONTROL1_LRCM (1 << 6)
+#define ES8328_CONTROL1_SCP_RESET (1 << 7)
+
+#define ES8328_CONTROL2 0x01
+#define ES8328_CONTROL2_VREF_BUF_OFF (1 << 0)
+#define ES8328_CONTROL2_VREF_LOWPOWER (1 << 1)
+#define ES8328_CONTROL2_IBIASGEN_OFF (1 << 2)
+#define ES8328_CONTROL2_ANALOG_OFF (1 << 3)
+#define ES8328_CONTROL2_VREF_BUF_LOWPOWER (1 << 4)
+#define ES8328_CONTROL2_VCM_MOD_LOWPOWER (1 << 5)
+#define ES8328_CONTROL2_OVERCURRENT_ON (1 << 6)
+#define ES8328_CONTROL2_THERMAL_SHUTDOWN_ON (1 << 7)
+
+#define ES8328_CHIPPOWER 0x02
+#define ES8328_CHIPPOWER_DACVREF_OFF 0
+#define ES8328_CHIPPOWER_ADCVREF_OFF 1
+#define ES8328_CHIPPOWER_DACDLL_OFF 2
+#define ES8328_CHIPPOWER_ADCDLL_OFF 3
+#define ES8328_CHIPPOWER_DACSTM_RESET 4
+#define ES8328_CHIPPOWER_ADCSTM_RESET 5
+#define ES8328_CHIPPOWER_DACDIG_OFF 6
+#define ES8328_CHIPPOWER_ADCDIG_OFF 7
+
+#define ES8328_ADCPOWER 0x03
+#define ES8328_ADCPOWER_INT1_LOWPOWER 0
+#define ES8328_ADCPOWER_FLASH_ADC_LOWPOWER 1
+#define ES8328_ADCPOWER_ADC_BIAS_GEN_OFF 2
+#define ES8328_ADCPOWER_MIC_BIAS_OFF 3
+#define ES8328_ADCPOWER_ADCR_OFF 4
+#define ES8328_ADCPOWER_ADCL_OFF 5
+#define ES8328_ADCPOWER_AINR_OFF 6
+#define ES8328_ADCPOWER_AINL_OFF 7
+
+#define ES8328_DACPOWER 0x04
+#define ES8328_DACPOWER_OUT3_ON 0
+#define ES8328_DACPOWER_MONO_ON 1
+#define ES8328_DACPOWER_ROUT2_ON 2
+#define ES8328_DACPOWER_LOUT2_ON 3
+#define ES8328_DACPOWER_ROUT1_ON 4
+#define ES8328_DACPOWER_LOUT1_ON 5
+#define ES8328_DACPOWER_RDAC_OFF 6
+#define ES8328_DACPOWER_LDAC_OFF 7
+
+#define ES8328_CHIPLOPOW1 0x05
+#define ES8328_CHIPLOPOW2 0x06
+#define ES8328_ANAVOLMANAG 0x07
+
+#define ES8328_MASTERMODE 0x08
+#define ES8328_MASTERMODE_BCLKDIV (0 << 0)
+#define ES8328_MASTERMODE_BCLK_INV (1 << 5)
+#define ES8328_MASTERMODE_MCLKDIV2 (1 << 6)
+#define ES8328_MASTERMODE_MSC (1 << 7)
+
+#define ES8328_ADCCONTROL1 0x09
+#define ES8328_ADCCONTROL2 0x0a
+#define ES8328_ADCCONTROL3 0x0b
+#define ES8328_ADCCONTROL4 0x0c
+#define ES8328_ADCCONTROL5 0x0d
+#define ES8328_ADCCONTROL5_RATEMASK (0x1f << 0)
+
+#define ES8328_ADCCONTROL6 0x0e
+
+#define ES8328_ADCCONTROL7 0x0f
+#define ES8328_ADCCONTROL7_ADC_MUTE (1 << 2)
+#define ES8328_ADCCONTROL7_ADC_LER (1 << 3)
+#define ES8328_ADCCONTROL7_ADC_ZERO_CROSS (1 << 4)
+#define ES8328_ADCCONTROL7_ADC_SOFT_RAMP (1 << 5)
+#define ES8328_ADCCONTROL7_ADC_RAMP_RATE_4 (0 << 6)
+#define ES8328_ADCCONTROL7_ADC_RAMP_RATE_8 (1 << 6)
+#define ES8328_ADCCONTROL7_ADC_RAMP_RATE_16 (2 << 6)
+#define ES8328_ADCCONTROL7_ADC_RAMP_RATE_32 (3 << 6)
+
+#define ES8328_ADCCONTROL8 0x10
+#define ES8328_ADCCONTROL9 0x11
+#define ES8328_ADCCONTROL10 0x12
+#define ES8328_ADCCONTROL11 0x13
+#define ES8328_ADCCONTROL12 0x14
+#define ES8328_ADCCONTROL13 0x15
+#define ES8328_ADCCONTROL14 0x16
+
+#define ES8328_DACCONTROL1 0x17
+#define ES8328_DACCONTROL1_DACFORMAT_I2S (0 << 1)
+#define ES8328_DACCONTROL1_DACFORMAT_LJUST (1 << 1)
+#define ES8328_DACCONTROL1_DACFORMAT_RJUST (2 << 1)
+#define ES8328_DACCONTROL1_DACFORMAT_PCM (3 << 1)
+#define ES8328_DACCONTROL1_DACWL_24 (0 << 3)
+#define ES8328_DACCONTROL1_DACWL_20 (1 << 3)
+#define ES8328_DACCONTROL1_DACWL_18 (2 << 3)
+#define ES8328_DACCONTROL1_DACWL_16 (3 << 3)
+#define ES8328_DACCONTROL1_DACWL_32 (4 << 3)
+#define ES8328_DACCONTROL1_DACLRP_I2S_POL_NORMAL (0 << 6)
+#define ES8328_DACCONTROL1_DACLRP_I2S_POL_INV (1 << 6)
+#define ES8328_DACCONTROL1_DACLRP_PCM_MSB_CLK2 (0 << 6)
+#define ES8328_DACCONTROL1_DACLRP_PCM_MSB_CLK1 (1 << 6)
+#define ES8328_DACCONTROL1_LRSWAP (1 << 7)
+
+#define ES8328_DACCONTROL2 0x18
+#define ES8328_DACCONTROL2_RATEMASK (0x1f << 0)
+#define ES8328_DACCONTROL2_DOUBLESPEED (1 << 5)
+
+#define ES8328_DACCONTROL3 0x19
+#define ES8328_DACCONTROL3_AUTOMUTE (1 << 2)
+#define ES8328_DACCONTROL3_DACMUTE (1 << 2)
+#define ES8328_DACCONTROL3_LEFTGAINVOL (1 << 3)
+#define ES8328_DACCONTROL3_DACZEROCROSS (1 << 4)
+#define ES8328_DACCONTROL3_DACSOFTRAMP (1 << 5)
+#define ES8328_DACCONTROL3_DACRAMPRATE (3 << 6)
+
+#define ES8328_LDACVOL 0x1a
+#define ES8328_LDACVOL_MASK (0 << 0)
+#define ES8328_LDACVOL_MAX (0xc0)
+
+#define ES8328_RDACVOL 0x1b
+#define ES8328_RDACVOL_MASK (0 << 0)
+#define ES8328_RDACVOL_MAX (0xc0)
+
+#define ES8328_DACVOL_MAX (0xc0)
+
+#define ES8328_DACCONTROL4 0x1a
+#define ES8328_DACCONTROL5 0x1b
+
+#define ES8328_DACCONTROL6 0x1c
+#define ES8328_DACCONTROL6_CLICKFREE (1 << 3)
+#define ES8328_DACCONTROL6_DAC_INVR (1 << 4)
+#define ES8328_DACCONTROL6_DAC_INVL (1 << 5)
+#define ES8328_DACCONTROL6_DEEMPH_OFF (0 << 6)
+#define ES8328_DACCONTROL6_DEEMPH_32k (1 << 6)
+#define ES8328_DACCONTROL6_DEEMPH_44_1k (2 << 6)
+#define ES8328_DACCONTROL6_DEEMPH_48k (3 << 6)
+
+#define ES8328_DACCONTROL7 0x1d
+#define ES8328_DACCONTROL7_VPP_SCALE_3p5 (0 << 0)
+#define ES8328_DACCONTROL7_VPP_SCALE_4p0 (1 << 0)
+#define ES8328_DACCONTROL7_VPP_SCALE_3p0 (2 << 0)
+#define ES8328_DACCONTROL7_VPP_SCALE_2p5 (3 << 0)
+#define ES8328_DACCONTROL7_SHELVING_STRENGTH (1 << 2) /* In eights */
+#define ES8328_DACCONTROL7_MONO (1 << 5)
+#define ES8328_DACCONTROL7_ZEROR (1 << 6)
+#define ES8328_DACCONTROL7_ZEROL (1 << 7)
+
+/* Shelving filter */
+#define ES8328_DACCONTROL8 0x1e
+#define ES8328_DACCONTROL9 0x1f
+#define ES8328_DACCONTROL10 0x20
+#define ES8328_DACCONTROL11 0x21
+#define ES8328_DACCONTROL12 0x22
+#define ES8328_DACCONTROL13 0x23
+#define ES8328_DACCONTROL14 0x24
+#define ES8328_DACCONTROL15 0x25
+
+#define ES8328_DACCONTROL16 0x26
+#define ES8328_DACCONTROL16_RMIXSEL_RIN1 (0 << 0)
+#define ES8328_DACCONTROL16_RMIXSEL_RIN2 (1 << 0)
+#define ES8328_DACCONTROL16_RMIXSEL_RIN3 (2 << 0)
+#define ES8328_DACCONTROL16_RMIXSEL_RADC (3 << 0)
+#define ES8328_DACCONTROL16_LMIXSEL_LIN1 (0 << 3)
+#define ES8328_DACCONTROL16_LMIXSEL_LIN2 (1 << 3)
+#define ES8328_DACCONTROL16_LMIXSEL_LIN3 (2 << 3)
+#define ES8328_DACCONTROL16_LMIXSEL_LADC (3 << 3)
+
+#define ES8328_DACCONTROL17 0x27
+#define ES8328_DACCONTROL17_LI2LOVOL (7 << 3)
+#define ES8328_DACCONTROL17_LI2LO (1 << 6)
+#define ES8328_DACCONTROL17_LD2LO (1 << 7)
+
+#define ES8328_DACCONTROL18 0x28
+#define ES8328_DACCONTROL18_RI2LOVOL (7 << 3)
+#define ES8328_DACCONTROL18_RI2LO (1 << 6)
+#define ES8328_DACCONTROL18_RD2LO (1 << 7)
+
+#define ES8328_DACCONTROL19 0x29
+#define ES8328_DACCONTROL19_LI2ROVOL (7 << 3)
+#define ES8328_DACCONTROL19_LI2RO (1 << 6)
+#define ES8328_DACCONTROL19_LD2RO (1 << 7)
+
+#define ES8328_DACCONTROL20 0x2a
+#define ES8328_DACCONTROL20_RI2ROVOL (7 << 3)
+#define ES8328_DACCONTROL20_RI2RO (1 << 6)
+#define ES8328_DACCONTROL20_RD2RO (1 << 7)
+
+#define ES8328_DACCONTROL21 0x2b
+#define ES8328_DACCONTROL21_LI2MOVOL (7 << 3)
+#define ES8328_DACCONTROL21_LI2MO (1 << 6)
+#define ES8328_DACCONTROL21_LD2MO (1 << 7)
+
+#define ES8328_DACCONTROL22 0x2c
+#define ES8328_DACCONTROL22_RI2MOVOL (7 << 3)
+#define ES8328_DACCONTROL22_RI2MO (1 << 6)
+#define ES8328_DACCONTROL22_RD2MO (1 << 7)
+
+#define ES8328_DACCONTROL23 0x2d
+#define ES8328_DACCONTROL23_MOUTINV (1 << 1)
+#define ES8328_DACCONTROL23_HPSWPOL (1 << 2)
+#define ES8328_DACCONTROL23_HPSWEN (1 << 3)
+#define ES8328_DACCONTROL23_VROI_1p5k (0 << 4)
+#define ES8328_DACCONTROL23_VROI_40k (1 << 4)
+#define ES8328_DACCONTROL23_OUT3_VREF (0 << 5)
+#define ES8328_DACCONTROL23_OUT3_ROUT1 (1 << 5)
+#define ES8328_DACCONTROL23_OUT3_MONOOUT (2 << 5)
+#define ES8328_DACCONTROL23_OUT3_RIGHT_MIXER (3 << 5)
+#define ES8328_DACCONTROL23_ROUT2INV (1 << 7)
+
+/* LOUT1 Amplifier */
+#define ES8328_LOUT1VOL 0x2e
+#define ES8328_LOUT1VOL_MASK (0 << 5)
+#define ES8328_LOUT1VOL_MAX (0x24)
+
+/* ROUT1 Amplifier */
+#define ES8328_ROUT1VOL 0x2f
+#define ES8328_ROUT1VOL_MASK (0 << 5)
+#define ES8328_ROUT1VOL_MAX (0x24)
+
+#define ES8328_OUT1VOL_MAX (0x24)
+
+/* LOUT2 Amplifier */
+#define ES8328_LOUT2VOL 0x30
+#define ES8328_LOUT2VOL_MASK (0 << 5)
+#define ES8328_LOUT2VOL_MAX (0x24)
+
+/* ROUT2 Amplifier */
+#define ES8328_ROUT2VOL 0x31
+#define ES8328_ROUT2VOL_MASK (0 << 5)
+#define ES8328_ROUT2VOL_MAX (0x24)
+
+#define ES8328_OUT2VOL_MAX (0x24)
+
+/* Mono Out Amplifier */
+#define ES8328_MONOOUTVOL 0x32
+#define ES8328_MONOOUTVOL_MASK (0 << 5)
+#define ES8328_MONOOUTVOL_MAX (0x24)
+
+#define ES8328_DACCONTROL29 0x33
+#define ES8328_DACCONTROL30 0x34
+
+#define ES8328_SYSCLK 0
+
+#define ES8328_REG_MAX 0x35
+
+#define ES8328_PLL1 0
+#define ES8328_PLL2 1
+
+/* clock inputs */
+#define ES8328_MCLK 0
+#define ES8328_PCMCLK 1
+
+/* clock divider id's */
+#define ES8328_PCMDIV 0
+#define ES8328_BCLKDIV 1
+#define ES8328_VXCLKDIV 2
+
+/* PCM clock dividers */
+#define ES8328_PCM_DIV_1 (0 << 6)
+#define ES8328_PCM_DIV_3 (2 << 6)
+#define ES8328_PCM_DIV_5_5 (3 << 6)
+#define ES8328_PCM_DIV_2 (4 << 6)
+#define ES8328_PCM_DIV_4 (5 << 6)
+#define ES8328_PCM_DIV_6 (6 << 6)
+#define ES8328_PCM_DIV_8 (7 << 6)
+
+/* BCLK clock dividers */
+#define ES8328_BCLK_DIV_1 (0 << 7)
+#define ES8328_BCLK_DIV_2 (1 << 7)
+#define ES8328_BCLK_DIV_4 (2 << 7)
+#define ES8328_BCLK_DIV_8 (3 << 7)
+
+/* VXCLK clock dividers */
+#define ES8328_VXCLK_DIV_1 (0 << 6)
+#define ES8328_VXCLK_DIV_2 (1 << 6)
+#define ES8328_VXCLK_DIV_4 (2 << 6)
+#define ES8328_VXCLK_DIV_8 (3 << 6)
+#define ES8328_VXCLK_DIV_16 (4 << 6)
+
+#define ES8328_DAI_HIFI 0
+#define ES8328_DAI_VOICE 1
+
+#define ES8328_1536FS 1536
+#define ES8328_1024FS 1024
+#define ES8328_768FS 768
+#define ES8328_512FS 512
+#define ES8328_384FS 384
+#define ES8328_256FS 256
+#define ES8328_128FS 128
+
+#endif
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index bcebd1a9ce31..df7c01cf7072 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -293,41 +293,13 @@ static int jz4740_codec_dev_probe(struct snd_soc_codec *codec)
regmap_update_bits(jz4740_codec->regmap, JZ4740_REG_CODEC_1,
JZ4740_CODEC_1_SW2_ENABLE, JZ4740_CODEC_1_SW2_ENABLE);
- jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
return 0;
}
-static int jz4740_codec_dev_remove(struct snd_soc_codec *codec)
-{
- jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-
-static int jz4740_codec_suspend(struct snd_soc_codec *codec)
-{
- return jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_OFF);
-}
-
-static int jz4740_codec_resume(struct snd_soc_codec *codec)
-{
- return jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-}
-
-#else
-#define jz4740_codec_suspend NULL
-#define jz4740_codec_resume NULL
-#endif
-
static struct snd_soc_codec_driver soc_codec_dev_jz4740_codec = {
.probe = jz4740_codec_dev_probe,
- .remove = jz4740_codec_dev_remove,
- .suspend = jz4740_codec_suspend,
- .resume = jz4740_codec_resume,
.set_bias_level = jz4740_codec_set_bias_level,
+ .suspend_bias_off = true,
.controls = jz4740_codec_controls,
.num_controls = ARRAY_SIZE(jz4740_codec_controls),
diff --git a/sound/soc/codecs/lm49453.c b/sound/soc/codecs/lm49453.c
index 275b3f72f3f4..c1ae5764983f 100644
--- a/sound/soc/codecs/lm49453.c
+++ b/sound/soc/codecs/lm49453.c
@@ -1395,18 +1395,6 @@ static struct snd_soc_dai_driver lm49453_dai[] = {
},
};
-static int lm49453_suspend(struct snd_soc_codec *codec)
-{
- lm49453_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int lm49453_resume(struct snd_soc_codec *codec)
-{
- lm49453_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-
/* power down chip */
static int lm49453_remove(struct snd_soc_codec *codec)
{
@@ -1416,8 +1404,6 @@ static int lm49453_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_lm49453 = {
.remove = lm49453_remove,
- .suspend = lm49453_suspend,
- .resume = lm49453_resume,
.set_bias_level = lm49453_set_bias_level,
.controls = lm49453_snd_controls,
.num_controls = ARRAY_SIZE(lm49453_snd_controls),
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 4a063fa88526..d519294f57c7 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -1311,8 +1311,6 @@ static const struct snd_soc_dapm_route max98090_dapm_routes[] = {
{"MIC1 Input", NULL, "MIC1"},
{"MIC2 Input", NULL, "MIC2"},
- {"DMICL", NULL, "DMICL_ENA"},
- {"DMICR", NULL, "DMICR_ENA"},
{"DMICL", NULL, "AHPF"},
{"DMICR", NULL, "AHPF"},
@@ -1370,6 +1368,8 @@ static const struct snd_soc_dapm_route max98090_dapm_routes[] = {
{"DMIC Mux", "ADC", "ADCR"},
{"DMIC Mux", "DMIC", "DMICL"},
{"DMIC Mux", "DMIC", "DMICR"},
+ {"DMIC Mux", "DMIC", "DMICL_ENA"},
+ {"DMIC Mux", "DMIC", "DMICR_ENA"},
{"LBENL Mux", "Normal", "DMIC Mux"},
{"LBENL Mux", "Loopback", "LTENL Mux"},
@@ -1972,6 +1972,102 @@ static int max98090_dai_digital_mute(struct snd_soc_dai *codec_dai, int mute)
return 0;
}
+static int max98090_dai_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct max98090_priv *max98090 = snd_soc_codec_get_drvdata(codec);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (!max98090->master && dai->active == 1)
+ queue_delayed_work(system_power_efficient_wq,
+ &max98090->pll_det_enable_work,
+ msecs_to_jiffies(10));
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (!max98090->master && dai->active == 1)
+ schedule_work(&max98090->pll_det_disable_work);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void max98090_pll_det_enable_work(struct work_struct *work)
+{
+ struct max98090_priv *max98090 =
+ container_of(work, struct max98090_priv,
+ pll_det_enable_work.work);
+ struct snd_soc_codec *codec = max98090->codec;
+ unsigned int status, mask;
+
+ /*
+ * Clear status register in order to clear possibly already occurred
+ * PLL unlock. If PLL hasn't still locked, the status will be set
+ * again and PLL unlock interrupt will occur.
+ * Note this will clear all status bits
+ */
+ regmap_read(max98090->regmap, M98090_REG_DEVICE_STATUS, &status);
+
+ /*
+ * Queue jack work in case jack state has just changed but handler
+ * hasn't run yet
+ */
+ regmap_read(max98090->regmap, M98090_REG_INTERRUPT_S, &mask);
+ status &= mask;
+ if (status & M98090_JDET_MASK)
+ queue_delayed_work(system_power_efficient_wq,
+ &max98090->jack_work,
+ msecs_to_jiffies(100));
+
+ /* Enable PLL unlock interrupt */
+ snd_soc_update_bits(codec, M98090_REG_INTERRUPT_S,
+ M98090_IULK_MASK,
+ 1 << M98090_IULK_SHIFT);
+}
+
+static void max98090_pll_det_disable_work(struct work_struct *work)
+{
+ struct max98090_priv *max98090 =
+ container_of(work, struct max98090_priv, pll_det_disable_work);
+ struct snd_soc_codec *codec = max98090->codec;
+
+ cancel_delayed_work_sync(&max98090->pll_det_enable_work);
+
+ /* Disable PLL unlock interrupt */
+ snd_soc_update_bits(codec, M98090_REG_INTERRUPT_S,
+ M98090_IULK_MASK, 0);
+}
+
+static void max98090_pll_work(struct work_struct *work)
+{
+ struct max98090_priv *max98090 =
+ container_of(work, struct max98090_priv, pll_work);
+ struct snd_soc_codec *codec = max98090->codec;
+
+ if (!snd_soc_codec_is_active(codec))
+ return;
+
+ dev_info(codec->dev, "PLL unlocked\n");
+
+ /* Toggle shutdown OFF then ON */
+ snd_soc_update_bits(codec, M98090_REG_DEVICE_SHUTDOWN,
+ M98090_SHDNN_MASK, 0);
+ msleep(10);
+ snd_soc_update_bits(codec, M98090_REG_DEVICE_SHUTDOWN,
+ M98090_SHDNN_MASK, M98090_SHDNN_MASK);
+
+ /* Give PLL time to lock */
+ msleep(10);
+}
+
static void max98090_jack_work(struct work_struct *work)
{
struct max98090_priv *max98090 = container_of(work,
@@ -2063,12 +2159,16 @@ static void max98090_jack_work(struct work_struct *work)
static irqreturn_t max98090_interrupt(int irq, void *data)
{
- struct snd_soc_codec *codec = data;
- struct max98090_priv *max98090 = snd_soc_codec_get_drvdata(codec);
+ struct max98090_priv *max98090 = data;
+ struct snd_soc_codec *codec = max98090->codec;
int ret;
unsigned int mask;
unsigned int active;
+ /* Treat interrupt before codec is initialized as spurious */
+ if (codec == NULL)
+ return IRQ_NONE;
+
dev_dbg(codec->dev, "***** max98090_interrupt *****\n");
ret = regmap_read(max98090->regmap, M98090_REG_INTERRUPT_S, &mask);
@@ -2103,8 +2203,10 @@ static irqreturn_t max98090_interrupt(int irq, void *data)
if (active & M98090_SLD_MASK)
dev_dbg(codec->dev, "M98090_SLD_MASK\n");
- if (active & M98090_ULK_MASK)
- dev_err(codec->dev, "M98090_ULK_MASK\n");
+ if (active & M98090_ULK_MASK) {
+ dev_dbg(codec->dev, "M98090_ULK_MASK\n");
+ schedule_work(&max98090->pll_work);
+ }
if (active & M98090_JDET_MASK) {
dev_dbg(codec->dev, "M98090_JDET_MASK\n");
@@ -2177,6 +2279,7 @@ static struct snd_soc_dai_ops max98090_dai_ops = {
.set_tdm_slot = max98090_set_tdm_slot,
.hw_params = max98090_dai_hw_params,
.digital_mute = max98090_dai_digital_mute,
+ .trigger = max98090_dai_trigger,
};
static struct snd_soc_dai_driver max98090_dai[] = {
@@ -2230,7 +2333,6 @@ static int max98090_probe(struct snd_soc_codec *codec)
max98090->lin_state = 0;
max98090->pa1en = 0;
max98090->pa2en = 0;
- max98090->extmic_mux = 0;
ret = snd_soc_read(codec, M98090_REG_REVISION_ID);
if (ret < 0) {
@@ -2258,22 +2360,16 @@ static int max98090_probe(struct snd_soc_codec *codec)
max98090->jack_state = M98090_JACK_STATE_NO_HEADSET;
INIT_DELAYED_WORK(&max98090->jack_work, max98090_jack_work);
+ INIT_DELAYED_WORK(&max98090->pll_det_enable_work,
+ max98090_pll_det_enable_work);
+ INIT_WORK(&max98090->pll_det_disable_work,
+ max98090_pll_det_disable_work);
+ INIT_WORK(&max98090->pll_work, max98090_pll_work);
/* Enable jack detection */
snd_soc_write(codec, M98090_REG_JACK_DETECT,
M98090_JDETEN_MASK | M98090_JDEB_25MS);
- /* Register for interrupts */
- dev_dbg(codec->dev, "irq = %d\n", max98090->irq);
-
- ret = devm_request_threaded_irq(codec->dev, max98090->irq, NULL,
- max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "max98090_interrupt", codec);
- if (ret < 0) {
- dev_err(codec->dev, "request_irq failed: %d\n",
- ret);
- }
-
/*
* Clear any old interrupts.
* An old interrupt ocurring prior to installing the ISR
@@ -2310,6 +2406,10 @@ static int max98090_remove(struct snd_soc_codec *codec)
struct max98090_priv *max98090 = snd_soc_codec_get_drvdata(codec);
cancel_delayed_work_sync(&max98090->jack_work);
+ cancel_delayed_work_sync(&max98090->pll_det_enable_work);
+ cancel_work_sync(&max98090->pll_det_disable_work);
+ cancel_work_sync(&max98090->pll_work);
+ max98090->codec = NULL;
return 0;
}
@@ -2362,7 +2462,6 @@ static int max98090_i2c_probe(struct i2c_client *i2c,
max98090->devtype = driver_data;
i2c_set_clientdata(i2c, max98090);
max98090->pdata = i2c->dev.platform_data;
- max98090->irq = i2c->irq;
max98090->regmap = devm_regmap_init_i2c(i2c, &max98090_regmap);
if (IS_ERR(max98090->regmap)) {
@@ -2371,6 +2470,15 @@ static int max98090_i2c_probe(struct i2c_client *i2c,
goto err_enable;
}
+ ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+ max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "max98090_interrupt", max98090);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "request_irq failed: %d\n",
+ ret);
+ return ret;
+ }
+
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_max98090, max98090_dai,
ARRAY_SIZE(max98090_dai));
diff --git a/sound/soc/codecs/max98090.h b/sound/soc/codecs/max98090.h
index cf1b6062ba8c..a5f6bada06da 100644
--- a/sound/soc/codecs/max98090.h
+++ b/sound/soc/codecs/max98090.h
@@ -11,11 +11,6 @@
#ifndef _MAX98090_H
#define _MAX98090_H
-#include <linux/version.h>
-
-/* One can override the Linux version here with an explicit version number */
-#define M98090_LINUX_VERSION LINUX_VERSION_CODE
-
/*
* MAX98090 Register Definitions
*/
@@ -1502,9 +1497,6 @@
#define M98090_REVID_WIDTH 8
#define M98090_REVID_NUM (1<<M98090_REVID_WIDTH)
-#define M98090_BYTE1(w) ((w >> 8) & 0xff)
-#define M98090_BYTE0(w) (w & 0xff)
-
/* Silicon revision number */
#define M98090_REVA 0x40
#define M98091_REVA 0x50
@@ -1529,9 +1521,11 @@ struct max98090_priv {
unsigned int bclk;
unsigned int lrclk;
struct max98090_cdata dai[1];
- int irq;
int jack_state;
struct delayed_work jack_work;
+ struct delayed_work pll_det_enable_work;
+ struct work_struct pll_det_disable_work;
+ struct work_struct pll_work;
struct snd_soc_jack *jack;
unsigned int dai_fmt;
int tdm_slots;
@@ -1539,7 +1533,6 @@ struct max98090_priv {
u8 lin_state;
unsigned int pa1en;
unsigned int pa2en;
- unsigned int extmic_mux;
unsigned int sidetone;
bool master;
};
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 388f90a597fa..71f775aad7c7 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -765,12 +765,18 @@ static int __init mc13783_codec_probe(struct platform_device *pdev)
return -ENOSYS;
ret = of_property_read_u32(np, "adc-port", &priv->adc_ssi_port);
- if (ret)
- goto out;
+ if (ret) {
+ of_node_put(np);
+ return ret;
+ }
ret = of_property_read_u32(np, "dac-port", &priv->dac_ssi_port);
- if (ret)
- goto out;
+ if (ret) {
+ of_node_put(np);
+ return ret;
+ }
+
+ of_node_put(np);
}
dev_set_drvdata(&pdev->dev, priv);
@@ -783,8 +789,6 @@ static int __init mc13783_codec_probe(struct platform_device *pdev)
ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_mc13783,
mc13783_dai_async, ARRAY_SIZE(mc13783_dai_async));
-out:
- of_node_put(np);
return ret;
}
diff --git a/sound/soc/codecs/ml26124.c b/sound/soc/codecs/ml26124.c
index e661e8420e3d..711f55039522 100644
--- a/sound/soc/codecs/ml26124.c
+++ b/sound/soc/codecs/ml26124.c
@@ -565,41 +565,19 @@ static struct snd_soc_dai_driver ml26124_dai = {
.symmetric_rates = 1,
};
-#ifdef CONFIG_PM
-static int ml26124_suspend(struct snd_soc_codec *codec)
-{
- ml26124_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int ml26124_resume(struct snd_soc_codec *codec)
-{
- ml26124_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-#else
-#define ml26124_suspend NULL
-#define ml26124_resume NULL
-#endif
-
static int ml26124_probe(struct snd_soc_codec *codec)
{
/* Software Reset */
snd_soc_update_bits(codec, ML26124_SW_RST, 0x01, 1);
snd_soc_update_bits(codec, ML26124_SW_RST, 0x01, 0);
- ml26124_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_ml26124 = {
.probe = ml26124_probe,
- .suspend = ml26124_suspend,
- .resume = ml26124_resume,
.set_bias_level = ml26124_set_bias_level,
+ .suspend_bias_off = true,
.dapm_widgets = ml26124_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(ml26124_dapm_widgets),
.dapm_routes = ml26124_intercon,
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index b86b426f159d..4aa555cbcca8 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -269,6 +269,7 @@ static int rt286_hw_read(void *context, unsigned int reg, unsigned int *value)
return 0;
}
+#ifdef CONFIG_PM
static void rt286_index_sync(struct snd_soc_codec *codec)
{
struct rt286_priv *rt286 = snd_soc_codec_get_drvdata(codec);
@@ -279,6 +280,7 @@ static void rt286_index_sync(struct snd_soc_codec *codec)
rt286->index_cache[i].def);
}
}
+#endif
static int rt286_support_power_controls[] = {
RT286_DAC_OUT1,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index f1ec6e6bd08a..c3f2decd643c 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -1906,6 +1906,32 @@ static int rt5640_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
+int rt5640_dmic_enable(struct snd_soc_codec *codec,
+ bool dmic1_data_pin, bool dmic2_data_pin)
+{
+ struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+
+ regmap_update_bits(rt5640->regmap, RT5640_GPIO_CTRL1,
+ RT5640_GP2_PIN_MASK, RT5640_GP2_PIN_DMIC1_SCL);
+
+ if (dmic1_data_pin) {
+ regmap_update_bits(rt5640->regmap, RT5640_DMIC,
+ RT5640_DMIC_1_DP_MASK, RT5640_DMIC_1_DP_GPIO3);
+ regmap_update_bits(rt5640->regmap, RT5640_GPIO_CTRL1,
+ RT5640_GP3_PIN_MASK, RT5640_GP3_PIN_DMIC1_SDA);
+ }
+
+ if (dmic2_data_pin) {
+ regmap_update_bits(rt5640->regmap, RT5640_DMIC,
+ RT5640_DMIC_2_DP_MASK, RT5640_DMIC_2_DP_GPIO4);
+ regmap_update_bits(rt5640->regmap, RT5640_GPIO_CTRL1,
+ RT5640_GP4_PIN_MASK, RT5640_GP4_PIN_DMIC2_SDA);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt5640_dmic_enable);
+
static int rt5640_probe(struct snd_soc_codec *codec)
{
struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
@@ -1945,6 +1971,10 @@ static int rt5640_probe(struct snd_soc_codec *codec)
return -ENODEV;
}
+ if (rt5640->pdata.dmic_en)
+ rt5640_dmic_enable(codec, rt5640->pdata.dmic1_data_pin,
+ rt5640->pdata.dmic2_data_pin);
+
return 0;
}
@@ -2195,25 +2225,6 @@ static int rt5640_i2c_probe(struct i2c_client *i2c,
regmap_update_bits(rt5640->regmap, RT5640_IN3_IN4,
RT5640_IN_DF2, RT5640_IN_DF2);
- if (rt5640->pdata.dmic_en) {
- regmap_update_bits(rt5640->regmap, RT5640_GPIO_CTRL1,
- RT5640_GP2_PIN_MASK, RT5640_GP2_PIN_DMIC1_SCL);
-
- if (rt5640->pdata.dmic1_data_pin) {
- regmap_update_bits(rt5640->regmap, RT5640_DMIC,
- RT5640_DMIC_1_DP_MASK, RT5640_DMIC_1_DP_GPIO3);
- regmap_update_bits(rt5640->regmap, RT5640_GPIO_CTRL1,
- RT5640_GP3_PIN_MASK, RT5640_GP3_PIN_DMIC1_SDA);
- }
-
- if (rt5640->pdata.dmic2_data_pin) {
- regmap_update_bits(rt5640->regmap, RT5640_DMIC,
- RT5640_DMIC_2_DP_MASK, RT5640_DMIC_2_DP_GPIO4);
- regmap_update_bits(rt5640->regmap, RT5640_GPIO_CTRL1,
- RT5640_GP4_PIN_MASK, RT5640_GP4_PIN_DMIC2_SDA);
- }
- }
-
rt5640->hp_mute = 1;
return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5640,
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index 58ebe96b86da..3deb8babeabb 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -2097,4 +2097,7 @@ struct rt5640_priv {
bool hp_mute;
};
+int rt5640_dmic_enable(struct snd_soc_codec *codec,
+ bool dmic1_data_pin, bool dmic2_data_pin);
+
#endif
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index a7762d0a623e..3fb83bf09768 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -17,6 +17,7 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -2103,6 +2104,77 @@ static int rt5645_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
+static int rt5645_jack_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack)
+{
+ struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
+ int gpio_state, jack_type = 0;
+ unsigned int val;
+
+ gpio_state = gpio_get_value(rt5645->pdata.hp_det_gpio);
+
+ dev_dbg(codec->dev, "gpio = %d(%d)\n", rt5645->pdata.hp_det_gpio,
+ gpio_state);
+
+ if ((rt5645->pdata.gpio_hp_det_active_high && gpio_state) ||
+ (!rt5645->pdata.gpio_hp_det_active_high && !gpio_state)) {
+ snd_soc_dapm_force_enable_pin(&codec->dapm, "micbias1");
+ snd_soc_dapm_force_enable_pin(&codec->dapm, "micbias2");
+ snd_soc_dapm_force_enable_pin(&codec->dapm, "LDO2");
+ snd_soc_dapm_force_enable_pin(&codec->dapm, "Mic Det Power");
+ snd_soc_dapm_sync(&codec->dapm);
+
+ snd_soc_write(codec, RT5645_IN1_CTRL1, 0x0006);
+ snd_soc_write(codec, RT5645_JD_CTRL3, 0x00b0);
+
+ snd_soc_update_bits(codec, RT5645_IN1_CTRL2,
+ RT5645_CBJ_MN_JD, 0);
+ snd_soc_update_bits(codec, RT5645_IN1_CTRL2,
+ RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD);
+
+ msleep(400);
+ val = snd_soc_read(codec, RT5645_IN1_CTRL3) & 0x7;
+ dev_dbg(codec->dev, "val = %d\n", val);
+
+ if (val == 1 || val == 2)
+ jack_type = SND_JACK_HEADSET;
+ else
+ jack_type = SND_JACK_HEADPHONE;
+
+ snd_soc_dapm_disable_pin(&codec->dapm, "micbias1");
+ snd_soc_dapm_disable_pin(&codec->dapm, "micbias2");
+ snd_soc_dapm_disable_pin(&codec->dapm, "LDO2");
+ snd_soc_dapm_disable_pin(&codec->dapm, "Mic Det Power");
+ snd_soc_dapm_sync(&codec->dapm);
+ }
+
+ snd_soc_jack_report(rt5645->jack, jack_type, SND_JACK_HEADSET);
+
+ return 0;
+}
+
+int rt5645_set_jack_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack)
+{
+ struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
+
+ rt5645->jack = jack;
+
+ rt5645_jack_detect(codec, rt5645->jack);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt5645_set_jack_detect);
+
+static irqreturn_t rt5645_irq(int irq, void *data)
+{
+ struct rt5645_priv *rt5645 = data;
+
+ rt5645_jack_detect(rt5645->codec, rt5645->jack);
+
+ return IRQ_HANDLED;
+}
+
static int rt5645_probe(struct snd_soc_codec *codec)
{
struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
@@ -2250,6 +2322,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
if (rt5645 == NULL)
return -ENOMEM;
+ rt5645->i2c = i2c;
i2c_set_clientdata(i2c, rt5645);
if (pdata)
@@ -2345,12 +2418,38 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
}
+ if (rt5645->i2c->irq) {
+ ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+ | IRQF_ONESHOT, "rt5645", rt5645);
+ if (ret)
+ dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
+ }
+
+ if (gpio_is_valid(rt5645->pdata.hp_det_gpio)) {
+ ret = gpio_request(rt5645->pdata.hp_det_gpio, "rt5645");
+ if (ret)
+ dev_err(&i2c->dev, "Fail gpio_request hp_det_gpio\n");
+
+ ret = gpio_direction_input(rt5645->pdata.hp_det_gpio);
+ if (ret)
+ dev_err(&i2c->dev, "Fail gpio_direction hp_det_gpio\n");
+ }
+
return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5645,
rt5645_dai, ARRAY_SIZE(rt5645_dai));
}
static int rt5645_i2c_remove(struct i2c_client *i2c)
{
+ struct rt5645_priv *rt5645 = i2c_get_clientdata(i2c);
+
+ if (i2c->irq)
+ free_irq(i2c->irq, rt5645);
+
+ if (gpio_is_valid(rt5645->pdata.hp_det_gpio))
+ gpio_free(rt5645->pdata.hp_det_gpio);
+
snd_soc_unregister_codec(&i2c->dev);
return 0;
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 355b7e9eefab..50c62c5668ea 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -2166,6 +2166,8 @@ struct rt5645_priv {
struct snd_soc_codec *codec;
struct rt5645_platform_data pdata;
struct regmap *regmap;
+ struct i2c_client *i2c;
+ struct snd_soc_jack *jack;
int sysclk;
int sysclk_src;
@@ -2178,4 +2180,7 @@ struct rt5645_priv {
int pll_out;
};
+int rt5645_set_jack_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack);
+
#endif /* __RT5645_H__ */
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 5337c448b5e3..16aa4d99a713 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -15,10 +15,12 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
+#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -540,6 +542,7 @@ static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
+static const DECLARE_TLV_DB_SCALE(st_vol_tlv, -4650, 150, 0);
/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
static unsigned int bst_tlv[] = {
@@ -604,6 +607,10 @@ static const struct snd_kcontrol_new rt5677_snd_controls[] = {
RT5677_MONO_ADC_L_VOL_SFT, RT5677_MONO_ADC_R_VOL_SFT, 127, 0,
adc_vol_tlv),
+ /* Sidetone Control */
+ SOC_SINGLE_TLV("Sidetone Volume", RT5677_SIDETONE_CTRL,
+ RT5677_ST_VOL_SFT, 31, 0, st_vol_tlv),
+
/* ADC Boost Volume Control */
SOC_DOUBLE_TLV("STO1 ADC Boost Volume", RT5677_STO1_2_ADC_BST,
RT5677_STO1_ADC_L_BST_SFT, RT5677_STO1_ADC_R_BST_SFT, 3, 0,
@@ -1700,14 +1707,19 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("Haptic Generator"),
- SND_SOC_DAPM_PGA("DMIC1", RT5677_DMIC_CTRL1, RT5677_DMIC_1_EN_SFT, 0,
- NULL, 0),
- SND_SOC_DAPM_PGA("DMIC2", RT5677_DMIC_CTRL1, RT5677_DMIC_2_EN_SFT, 0,
- NULL, 0),
- SND_SOC_DAPM_PGA("DMIC3", RT5677_DMIC_CTRL1, RT5677_DMIC_3_EN_SFT, 0,
- NULL, 0),
- SND_SOC_DAPM_PGA("DMIC4", RT5677_DMIC_CTRL2, RT5677_DMIC_4_EN_SFT, 0,
- NULL, 0),
+ SND_SOC_DAPM_PGA("DMIC1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DMIC2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DMIC3", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DMIC4", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("DMIC1 power", RT5677_DMIC_CTRL1,
+ RT5677_DMIC_1_EN_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DMIC2 power", RT5677_DMIC_CTRL1,
+ RT5677_DMIC_2_EN_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DMIC3 power", RT5677_DMIC_CTRL1,
+ RT5677_DMIC_3_EN_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DMIC4 power", RT5677_DMIC_CTRL2,
+ RT5677_DMIC_4_EN_SFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("DMIC CLK", SND_SOC_NOPM, 0, 0,
set_dmic_clk, SND_SOC_DAPM_PRE_PMU),
@@ -1987,6 +1999,9 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
/* Sidetone Mux */
SND_SOC_DAPM_MUX("Sidetone Mux", SND_SOC_NOPM, 0, 0,
&rt5677_sidetone_mux),
+ SND_SOC_DAPM_SUPPLY("Sidetone Power", RT5677_SIDETONE_CTRL,
+ RT5677_ST_EN_SFT, 0, NULL, 0),
+
/* VAD Mux*/
SND_SOC_DAPM_MUX("VAD ADC Mux", SND_SOC_NOPM, 0, 0,
&rt5677_vad_src_mux),
@@ -2130,6 +2145,13 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "DMIC L4", NULL, "DMIC CLK" },
{ "DMIC R4", NULL, "DMIC CLK" },
+ { "DMIC L1", NULL, "DMIC1 power" },
+ { "DMIC R1", NULL, "DMIC1 power" },
+ { "DMIC L3", NULL, "DMIC3 power" },
+ { "DMIC R3", NULL, "DMIC3 power" },
+ { "DMIC L4", NULL, "DMIC4 power" },
+ { "DMIC R4", NULL, "DMIC4 power" },
+
{ "BST1", NULL, "IN1P" },
{ "BST1", NULL, "IN1N" },
{ "BST2", NULL, "IN2P" },
@@ -2691,6 +2713,7 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "Sidetone Mux", "DMIC4 L", "DMIC L4" },
{ "Sidetone Mux", "ADC1", "ADC 1" },
{ "Sidetone Mux", "ADC2", "ADC 2" },
+ { "Sidetone Mux", NULL, "Sidetone Power" },
{ "Stereo DAC MIXL", "ST L Switch", "Sidetone Mux" },
{ "Stereo DAC MIXL", "DAC1 L Switch", "DAC1 MIXL" },
@@ -2793,6 +2816,16 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "PDM2R", NULL, "PDM2 R Mux" },
};
+static const struct snd_soc_dapm_route rt5677_dmic2_clk_1[] = {
+ { "DMIC L2", NULL, "DMIC1 power" },
+ { "DMIC R2", NULL, "DMIC1 power" },
+};
+
+static const struct snd_soc_dapm_route rt5677_dmic2_clk_2[] = {
+ { "DMIC L2", NULL, "DMIC2 power" },
+ { "DMIC R2", NULL, "DMIC2 power" },
+};
+
static int rt5677_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
@@ -3084,6 +3117,59 @@ static int rt5677_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
return 0;
}
+static int rt5677_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int val = 0;
+
+ if (rx_mask || tx_mask)
+ val |= (1 << 12);
+
+ switch (slots) {
+ case 4:
+ val |= (1 << 10);
+ break;
+ case 6:
+ val |= (2 << 10);
+ break;
+ case 8:
+ val |= (3 << 10);
+ break;
+ case 2:
+ default:
+ break;
+ }
+
+ switch (slot_width) {
+ case 20:
+ val |= (1 << 8);
+ break;
+ case 24:
+ val |= (2 << 8);
+ break;
+ case 32:
+ val |= (3 << 8);
+ break;
+ case 16:
+ default:
+ break;
+ }
+
+ switch (dai->id) {
+ case RT5677_AIF1:
+ snd_soc_update_bits(codec, RT5677_TDM1_CTRL1, 0x1f00, val);
+ break;
+ case RT5677_AIF2:
+ snd_soc_update_bits(codec, RT5677_TDM2_CTRL1, 0x1f00, val);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int rt5677_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
@@ -3138,12 +3224,148 @@ static int rt5677_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
+#ifdef CONFIG_GPIOLIB
+static inline struct rt5677_priv *gpio_to_rt5677(struct gpio_chip *chip)
+{
+ return container_of(chip, struct rt5677_priv, gpio_chip);
+}
+
+static void rt5677_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct rt5677_priv *rt5677 = gpio_to_rt5677(chip);
+
+ switch (offset) {
+ case RT5677_GPIO1 ... RT5677_GPIO5:
+ regmap_update_bits(rt5677->regmap, RT5677_GPIO_CTRL2,
+ 0x1 << (offset * 3 + 1), !!value << (offset * 3 + 1));
+ break;
+
+ case RT5677_GPIO6:
+ regmap_update_bits(rt5677->regmap, RT5677_GPIO_CTRL3,
+ RT5677_GPIO6_OUT_MASK, !!value << RT5677_GPIO6_OUT_SFT);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int rt5677_gpio_direction_out(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct rt5677_priv *rt5677 = gpio_to_rt5677(chip);
+
+ switch (offset) {
+ case RT5677_GPIO1 ... RT5677_GPIO5:
+ regmap_update_bits(rt5677->regmap, RT5677_GPIO_CTRL2,
+ 0x3 << (offset * 3 + 1),
+ (0x2 | !!value) << (offset * 3 + 1));
+ break;
+
+ case RT5677_GPIO6:
+ regmap_update_bits(rt5677->regmap, RT5677_GPIO_CTRL3,
+ RT5677_GPIO6_DIR_MASK | RT5677_GPIO6_OUT_MASK,
+ RT5677_GPIO6_DIR_OUT | !!value << RT5677_GPIO6_OUT_SFT);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int rt5677_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct rt5677_priv *rt5677 = gpio_to_rt5677(chip);
+ int value, ret;
+
+ ret = regmap_read(rt5677->regmap, RT5677_GPIO_ST, &value);
+ if (ret < 0)
+ return ret;
+
+ return (value & (0x1 << offset)) >> offset;
+}
+
+static int rt5677_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
+{
+ struct rt5677_priv *rt5677 = gpio_to_rt5677(chip);
+
+ switch (offset) {
+ case RT5677_GPIO1 ... RT5677_GPIO5:
+ regmap_update_bits(rt5677->regmap, RT5677_GPIO_CTRL2,
+ 0x1 << (offset * 3 + 2), 0x0);
+ break;
+
+ case RT5677_GPIO6:
+ regmap_update_bits(rt5677->regmap, RT5677_GPIO_CTRL3,
+ RT5677_GPIO6_DIR_MASK, RT5677_GPIO6_DIR_IN);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct gpio_chip rt5677_template_chip = {
+ .label = "rt5677",
+ .owner = THIS_MODULE,
+ .direction_output = rt5677_gpio_direction_out,
+ .set = rt5677_gpio_set,
+ .direction_input = rt5677_gpio_direction_in,
+ .get = rt5677_gpio_get,
+ .can_sleep = 1,
+};
+
+static void rt5677_init_gpio(struct i2c_client *i2c)
+{
+ struct rt5677_priv *rt5677 = i2c_get_clientdata(i2c);
+ int ret;
+
+ rt5677->gpio_chip = rt5677_template_chip;
+ rt5677->gpio_chip.ngpio = RT5677_GPIO_NUM;
+ rt5677->gpio_chip.dev = &i2c->dev;
+ rt5677->gpio_chip.base = -1;
+
+ ret = gpiochip_add(&rt5677->gpio_chip);
+ if (ret != 0)
+ dev_err(&i2c->dev, "Failed to add GPIOs: %d\n", ret);
+}
+
+static void rt5677_free_gpio(struct i2c_client *i2c)
+{
+ struct rt5677_priv *rt5677 = i2c_get_clientdata(i2c);
+
+ gpiochip_remove(&rt5677->gpio_chip);
+}
+#else
+static void rt5677_init_gpio(struct i2c_client *i2c)
+{
+}
+
+static void rt5677_free_gpio(struct i2c_client *i2c)
+{
+}
+#endif
+
static int rt5677_probe(struct snd_soc_codec *codec)
{
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
rt5677->codec = codec;
+ if (rt5677->pdata.dmic2_clk_pin == RT5677_DMIC_CLK2) {
+ snd_soc_dapm_add_routes(&codec->dapm,
+ rt5677_dmic2_clk_2,
+ ARRAY_SIZE(rt5677_dmic2_clk_2));
+ } else { /*use dmic1 clock by default*/
+ snd_soc_dapm_add_routes(&codec->dapm,
+ rt5677_dmic2_clk_1,
+ ARRAY_SIZE(rt5677_dmic2_clk_1));
+ }
+
rt5677_set_bias_level(codec, SND_SOC_BIAS_OFF);
regmap_write(rt5677->regmap, RT5677_DIG_MISC, 0x0020);
@@ -3157,6 +3379,8 @@ static int rt5677_remove(struct snd_soc_codec *codec)
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
regmap_write(rt5677->regmap, RT5677_RESET, 0x10ec);
+ if (gpio_is_valid(rt5677->pow_ldo2))
+ gpio_set_value_cansleep(rt5677->pow_ldo2, 0);
return 0;
}
@@ -3168,6 +3392,8 @@ static int rt5677_suspend(struct snd_soc_codec *codec)
regcache_cache_only(rt5677->regmap, true);
regcache_mark_dirty(rt5677->regmap);
+ if (gpio_is_valid(rt5677->pow_ldo2))
+ gpio_set_value_cansleep(rt5677->pow_ldo2, 0);
return 0;
}
@@ -3176,6 +3402,10 @@ static int rt5677_resume(struct snd_soc_codec *codec)
{
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+ if (gpio_is_valid(rt5677->pow_ldo2)) {
+ gpio_set_value_cansleep(rt5677->pow_ldo2, 1);
+ msleep(10);
+ }
regcache_cache_only(rt5677->regmap, false);
regcache_sync(rt5677->regmap);
@@ -3195,6 +3425,7 @@ static struct snd_soc_dai_ops rt5677_aif_dai_ops = {
.set_fmt = rt5677_set_dai_fmt,
.set_sysclk = rt5677_set_dai_sysclk,
.set_pll = rt5677_set_dai_pll,
+ .set_tdm_slot = rt5677_set_tdm_slot,
};
static struct snd_soc_dai_driver rt5677_dai[] = {
@@ -3333,6 +3564,35 @@ static const struct i2c_device_id rt5677_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id);
+static int rt5677_parse_dt(struct rt5677_priv *rt5677, struct device_node *np)
+{
+ rt5677->pdata.in1_diff = of_property_read_bool(np,
+ "realtek,in1-differential");
+ rt5677->pdata.in2_diff = of_property_read_bool(np,
+ "realtek,in2-differential");
+ rt5677->pdata.lout1_diff = of_property_read_bool(np,
+ "realtek,lout1-differential");
+ rt5677->pdata.lout2_diff = of_property_read_bool(np,
+ "realtek,lout2-differential");
+ rt5677->pdata.lout3_diff = of_property_read_bool(np,
+ "realtek,lout3-differential");
+
+ rt5677->pow_ldo2 = of_get_named_gpio(np,
+ "realtek,pow-ldo2-gpio", 0);
+
+ /*
+ * POW_LDO2 is optional (it may be statically tied on the board).
+ * -ENOENT means that the property doesn't exist, i.e. there is no
+ * GPIO, so is not an error. Any other error code means the property
+ * exists, but could not be parsed.
+ */
+ if (!gpio_is_valid(rt5677->pow_ldo2) &&
+ (rt5677->pow_ldo2 != -ENOENT))
+ return rt5677->pow_ldo2;
+
+ return 0;
+}
+
static int rt5677_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -3351,6 +3611,33 @@ static int rt5677_i2c_probe(struct i2c_client *i2c,
if (pdata)
rt5677->pdata = *pdata;
+ if (i2c->dev.of_node) {
+ ret = rt5677_parse_dt(rt5677, i2c->dev.of_node);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to parse device tree: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ rt5677->pow_ldo2 = -EINVAL;
+ }
+
+ if (gpio_is_valid(rt5677->pow_ldo2)) {
+ ret = devm_gpio_request_one(&i2c->dev, rt5677->pow_ldo2,
+ GPIOF_OUT_INIT_HIGH,
+ "RT5677 POW_LDO2");
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to request POW_LDO2 %d: %d\n",
+ rt5677->pow_ldo2, ret);
+ return ret;
+ }
+ /* Wait a while until I2C bus becomes available. The datasheet
+ * does not specify the exact we should wait but startup
+ * sequence mentiones at least a few milliseconds.
+ */
+ msleep(10);
+ }
+
rt5677->regmap = devm_regmap_init_i2c(i2c, &rt5677_regmap);
if (IS_ERR(rt5677->regmap)) {
ret = PTR_ERR(rt5677->regmap);
@@ -3381,6 +3668,29 @@ static int rt5677_i2c_probe(struct i2c_client *i2c,
regmap_update_bits(rt5677->regmap, RT5677_IN1,
RT5677_IN_DF2, RT5677_IN_DF2);
+ if (rt5677->pdata.lout1_diff)
+ regmap_update_bits(rt5677->regmap, RT5677_LOUT1,
+ RT5677_LOUT1_L_DF, RT5677_LOUT1_L_DF);
+
+ if (rt5677->pdata.lout2_diff)
+ regmap_update_bits(rt5677->regmap, RT5677_LOUT1,
+ RT5677_LOUT2_L_DF, RT5677_LOUT2_L_DF);
+
+ if (rt5677->pdata.lout3_diff)
+ regmap_update_bits(rt5677->regmap, RT5677_LOUT1,
+ RT5677_LOUT3_L_DF, RT5677_LOUT3_L_DF);
+
+ if (rt5677->pdata.dmic2_clk_pin == RT5677_DMIC_CLK2) {
+ regmap_update_bits(rt5677->regmap, RT5677_GEN_CTRL2,
+ RT5677_GPIO5_FUNC_MASK,
+ RT5677_GPIO5_FUNC_DMIC);
+ regmap_update_bits(rt5677->regmap, RT5677_GPIO_CTRL2,
+ RT5677_GPIO5_DIR_MASK,
+ RT5677_GPIO5_DIR_OUT);
+ }
+
+ rt5677_init_gpio(i2c);
+
return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5677,
rt5677_dai, ARRAY_SIZE(rt5677_dai));
}
@@ -3388,6 +3698,7 @@ static int rt5677_i2c_probe(struct i2c_client *i2c,
static int rt5677_i2c_remove(struct i2c_client *i2c)
{
snd_soc_unregister_codec(&i2c->dev);
+ rt5677_free_gpio(i2c);
return 0;
}
diff --git a/sound/soc/codecs/rt5677.h b/sound/soc/codecs/rt5677.h
index 863393e62096..d4eb6d5e6746 100644
--- a/sound/soc/codecs/rt5677.h
+++ b/sound/soc/codecs/rt5677.h
@@ -382,6 +382,10 @@
#define RT5677_ST_SEL_SFT 9
#define RT5677_ST_EN (0x1 << 6)
#define RT5677_ST_EN_SFT 6
+#define RT5677_ST_GAIN (0x1 << 5)
+#define RT5677_ST_GAIN_SFT 5
+#define RT5677_ST_VOL_MASK (0x1f << 0)
+#define RT5677_ST_VOL_SFT 0
/* Analog DAC1/2/3 Source Control (0x15) */
#define RT5677_ANA_DAC3_SRC_SEL_MASK (0x3 << 4)
@@ -1287,16 +1291,16 @@
#define RT5677_PLL1_PD_SFT 8
#define RT5677_PLL1_PD_1 (0x0 << 8)
#define RT5677_PLL1_PD_2 (0x1 << 8)
-#define RT5671_DAC_OSR_MASK (0x3 << 6)
-#define RT5671_DAC_OSR_SFT 6
-#define RT5671_DAC_OSR_128 (0x0 << 6)
-#define RT5671_DAC_OSR_64 (0x1 << 6)
-#define RT5671_DAC_OSR_32 (0x2 << 6)
-#define RT5671_ADC_OSR_MASK (0x3 << 4)
-#define RT5671_ADC_OSR_SFT 4
-#define RT5671_ADC_OSR_128 (0x0 << 4)
-#define RT5671_ADC_OSR_64 (0x1 << 4)
-#define RT5671_ADC_OSR_32 (0x2 << 4)
+#define RT5677_DAC_OSR_MASK (0x3 << 6)
+#define RT5677_DAC_OSR_SFT 6
+#define RT5677_DAC_OSR_128 (0x0 << 6)
+#define RT5677_DAC_OSR_64 (0x1 << 6)
+#define RT5677_DAC_OSR_32 (0x2 << 6)
+#define RT5677_ADC_OSR_MASK (0x3 << 4)
+#define RT5677_ADC_OSR_SFT 4
+#define RT5677_ADC_OSR_128 (0x0 << 4)
+#define RT5677_ADC_OSR_64 (0x1 << 4)
+#define RT5677_ADC_OSR_32 (0x2 << 4)
/* Global Clock Control 2 (0x81) */
#define RT5677_PLL2_PR_SRC_MASK (0x1 << 15)
@@ -1312,18 +1316,18 @@
#define RT5677_PLL2_SRC_BCLK4 (0x4 << 12)
#define RT5677_PLL2_SRC_RCCLK (0x5 << 12)
#define RT5677_PLL2_SRC_SLIM (0x6 << 12)
-#define RT5671_DSP_ASRC_O_SRC (0x3 << 10)
-#define RT5671_DSP_ASRC_O_SRC_SFT 10
-#define RT5671_DSP_ASRC_O_MCLK (0x0 << 10)
-#define RT5671_DSP_ASRC_O_PLL1 (0x1 << 10)
-#define RT5671_DSP_ASRC_O_SLIM (0x2 << 10)
-#define RT5671_DSP_ASRC_O_RCCLK (0x3 << 10)
-#define RT5671_DSP_ASRC_I_SRC (0x3 << 8)
-#define RT5671_DSP_ASRC_I_SRC_SFT 8
-#define RT5671_DSP_ASRC_I_MCLK (0x0 << 8)
-#define RT5671_DSP_ASRC_I_PLL1 (0x1 << 8)
-#define RT5671_DSP_ASRC_I_SLIM (0x2 << 8)
-#define RT5671_DSP_ASRC_I_RCCLK (0x3 << 8)
+#define RT5677_DSP_ASRC_O_SRC (0x3 << 10)
+#define RT5677_DSP_ASRC_O_SRC_SFT 10
+#define RT5677_DSP_ASRC_O_MCLK (0x0 << 10)
+#define RT5677_DSP_ASRC_O_PLL1 (0x1 << 10)
+#define RT5677_DSP_ASRC_O_SLIM (0x2 << 10)
+#define RT5677_DSP_ASRC_O_RCCLK (0x3 << 10)
+#define RT5677_DSP_ASRC_I_SRC (0x3 << 8)
+#define RT5677_DSP_ASRC_I_SRC_SFT 8
+#define RT5677_DSP_ASRC_I_MCLK (0x0 << 8)
+#define RT5677_DSP_ASRC_I_PLL1 (0x1 << 8)
+#define RT5677_DSP_ASRC_I_SLIM (0x2 << 8)
+#define RT5677_DSP_ASRC_I_RCCLK (0x3 << 8)
#define RT5677_DSP_CLK_SRC_MASK (0x1 << 7)
#define RT5677_DSP_CLK_SRC_SFT 7
#define RT5677_DSP_CLK_SRC_PLL2 (0x0 << 7)
@@ -1363,6 +1367,110 @@
#define RT5677_SEL_SRC_IB01 (0x1 << 0)
#define RT5677_SEL_SRC_IB01_SFT 0
+/* GPIO status (0xbf) */
+#define RT5677_GPIO6_STATUS_MASK (0x1 << 5)
+#define RT5677_GPIO6_STATUS_SFT 5
+#define RT5677_GPIO5_STATUS_MASK (0x1 << 4)
+#define RT5677_GPIO5_STATUS_SFT 4
+#define RT5677_GPIO4_STATUS_MASK (0x1 << 3)
+#define RT5677_GPIO4_STATUS_SFT 3
+#define RT5677_GPIO3_STATUS_MASK (0x1 << 2)
+#define RT5677_GPIO3_STATUS_SFT 2
+#define RT5677_GPIO2_STATUS_MASK (0x1 << 1)
+#define RT5677_GPIO2_STATUS_SFT 1
+#define RT5677_GPIO1_STATUS_MASK (0x1 << 0)
+#define RT5677_GPIO1_STATUS_SFT 0
+
+/* GPIO Control 1 (0xc0) */
+#define RT5677_GPIO1_PIN_MASK (0x1 << 15)
+#define RT5677_GPIO1_PIN_SFT 15
+#define RT5677_GPIO1_PIN_GPIO1 (0x0 << 15)
+#define RT5677_GPIO1_PIN_IRQ (0x1 << 15)
+#define RT5677_IPTV_MODE_MASK (0x1 << 14)
+#define RT5677_IPTV_MODE_SFT 14
+#define RT5677_IPTV_MODE_GPIO (0x0 << 14)
+#define RT5677_IPTV_MODE_IPTV (0x1 << 14)
+#define RT5677_FUNC_MODE_MASK (0x1 << 13)
+#define RT5677_FUNC_MODE_SFT 13
+#define RT5677_FUNC_MODE_DMIC_GPIO (0x0 << 13)
+#define RT5677_FUNC_MODE_JTAG (0x1 << 13)
+
+/* GPIO Control 2 (0xc1) */
+#define RT5677_GPIO5_DIR_MASK (0x1 << 14)
+#define RT5677_GPIO5_DIR_SFT 14
+#define RT5677_GPIO5_DIR_IN (0x0 << 14)
+#define RT5677_GPIO5_DIR_OUT (0x1 << 14)
+#define RT5677_GPIO5_OUT_MASK (0x1 << 13)
+#define RT5677_GPIO5_OUT_SFT 13
+#define RT5677_GPIO5_OUT_LO (0x0 << 13)
+#define RT5677_GPIO5_OUT_HI (0x1 << 13)
+#define RT5677_GPIO5_P_MASK (0x1 << 12)
+#define RT5677_GPIO5_P_SFT 12
+#define RT5677_GPIO5_P_NOR (0x0 << 12)
+#define RT5677_GPIO5_P_INV (0x1 << 12)
+#define RT5677_GPIO4_DIR_MASK (0x1 << 11)
+#define RT5677_GPIO4_DIR_SFT 11
+#define RT5677_GPIO4_DIR_IN (0x0 << 11)
+#define RT5677_GPIO4_DIR_OUT (0x1 << 11)
+#define RT5677_GPIO4_OUT_MASK (0x1 << 10)
+#define RT5677_GPIO4_OUT_SFT 10
+#define RT5677_GPIO4_OUT_LO (0x0 << 10)
+#define RT5677_GPIO4_OUT_HI (0x1 << 10)
+#define RT5677_GPIO4_P_MASK (0x1 << 9)
+#define RT5677_GPIO4_P_SFT 9
+#define RT5677_GPIO4_P_NOR (0x0 << 9)
+#define RT5677_GPIO4_P_INV (0x1 << 9)
+#define RT5677_GPIO3_DIR_MASK (0x1 << 8)
+#define RT5677_GPIO3_DIR_SFT 8
+#define RT5677_GPIO3_DIR_IN (0x0 << 8)
+#define RT5677_GPIO3_DIR_OUT (0x1 << 8)
+#define RT5677_GPIO3_OUT_MASK (0x1 << 7)
+#define RT5677_GPIO3_OUT_SFT 7
+#define RT5677_GPIO3_OUT_LO (0x0 << 7)
+#define RT5677_GPIO3_OUT_HI (0x1 << 7)
+#define RT5677_GPIO3_P_MASK (0x1 << 6)
+#define RT5677_GPIO3_P_SFT 6
+#define RT5677_GPIO3_P_NOR (0x0 << 6)
+#define RT5677_GPIO3_P_INV (0x1 << 6)
+#define RT5677_GPIO2_DIR_MASK (0x1 << 5)
+#define RT5677_GPIO2_DIR_SFT 5
+#define RT5677_GPIO2_DIR_IN (0x0 << 5)
+#define RT5677_GPIO2_DIR_OUT (0x1 << 5)
+#define RT5677_GPIO2_OUT_MASK (0x1 << 4)
+#define RT5677_GPIO2_OUT_SFT 4
+#define RT5677_GPIO2_OUT_LO (0x0 << 4)
+#define RT5677_GPIO2_OUT_HI (0x1 << 4)
+#define RT5677_GPIO2_P_MASK (0x1 << 3)
+#define RT5677_GPIO2_P_SFT 3
+#define RT5677_GPIO2_P_NOR (0x0 << 3)
+#define RT5677_GPIO2_P_INV (0x1 << 3)
+#define RT5677_GPIO1_DIR_MASK (0x1 << 2)
+#define RT5677_GPIO1_DIR_SFT 2
+#define RT5677_GPIO1_DIR_IN (0x0 << 2)
+#define RT5677_GPIO1_DIR_OUT (0x1 << 2)
+#define RT5677_GPIO1_OUT_MASK (0x1 << 1)
+#define RT5677_GPIO1_OUT_SFT 1
+#define RT5677_GPIO1_OUT_LO (0x0 << 1)
+#define RT5677_GPIO1_OUT_HI (0x1 << 1)
+#define RT5677_GPIO1_P_MASK (0x1 << 0)
+#define RT5677_GPIO1_P_SFT 0
+#define RT5677_GPIO1_P_NOR (0x0 << 0)
+#define RT5677_GPIO1_P_INV (0x1 << 0)
+
+/* GPIO Control 3 (0xc2) */
+#define RT5677_GPIO6_DIR_MASK (0x1 << 2)
+#define RT5677_GPIO6_DIR_SFT 2
+#define RT5677_GPIO6_DIR_IN (0x0 << 2)
+#define RT5677_GPIO6_DIR_OUT (0x1 << 2)
+#define RT5677_GPIO6_OUT_MASK (0x1 << 1)
+#define RT5677_GPIO6_OUT_SFT 1
+#define RT5677_GPIO6_OUT_LO (0x0 << 1)
+#define RT5677_GPIO6_OUT_HI (0x1 << 1)
+#define RT5677_GPIO6_P_MASK (0x1 << 0)
+#define RT5677_GPIO6_P_SFT 0
+#define RT5677_GPIO6_P_NOR (0x0 << 0)
+#define RT5677_GPIO6_P_INV (0x1 << 0)
+
/* Virtual DSP Mixer Control (0xf7 0xf8 0xf9) */
#define RT5677_DSP_IB_01_H (0x1 << 15)
#define RT5677_DSP_IB_01_H_SFT 15
@@ -1393,6 +1501,11 @@
#define RT5677_DSP_IB_9_L (0x1 << 1)
#define RT5677_DSP_IB_9_L_SFT 1
+/* General Control2 (0xfc)*/
+#define RT5677_GPIO5_FUNC_MASK (0x1 << 9)
+#define RT5677_GPIO5_FUNC_GPIO (0x0 << 9)
+#define RT5677_GPIO5_FUNC_DMIC (0x1 << 9)
+
/* System Clock Source */
enum {
RT5677_SCLK_S_MCLK,
@@ -1418,6 +1531,16 @@ enum {
RT5677_AIFS,
};
+enum {
+ RT5677_GPIO1,
+ RT5677_GPIO2,
+ RT5677_GPIO3,
+ RT5677_GPIO4,
+ RT5677_GPIO5,
+ RT5677_GPIO6,
+ RT5677_GPIO_NUM,
+};
+
struct rt5677_priv {
struct snd_soc_codec *codec;
struct rt5677_platform_data pdata;
@@ -1431,6 +1554,10 @@ struct rt5677_priv {
int pll_src;
int pll_in;
int pll_out;
+ int pow_ldo2; /* POW_LDO2 pin */
+#ifdef CONFIG_GPIOLIB
+ struct gpio_chip gpio_chip;
+#endif
};
#endif /* __RT5677_H__ */
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index e997d271728d..6bb77d76561b 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -626,6 +626,9 @@ static int sgtl5000_set_clock(struct snd_soc_codec *codec, int frame_rate)
} else {
dev_err(codec->dev,
"PLL not supported in slave mode\n");
+ dev_err(codec->dev, "%d ratio is not supported. "
+ "SYS_MCLK needs to be 256, 384 or 512 * fs\n",
+ sgtl5000->sysclk / sys_fs);
return -EINVAL;
}
}
@@ -1073,26 +1076,6 @@ static bool sgtl5000_readable(struct device *dev, unsigned int reg)
}
}
-#ifdef CONFIG_SUSPEND
-static int sgtl5000_suspend(struct snd_soc_codec *codec)
-{
- sgtl5000_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int sgtl5000_resume(struct snd_soc_codec *codec)
-{
- /* Bring the codec back up to standby to enable regulators */
- sgtl5000_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-#else
-#define sgtl5000_suspend NULL
-#define sgtl5000_resume NULL
-#endif /* CONFIG_SUSPEND */
-
/*
* sgtl5000 has 3 internal power supplies:
* 1. VAG, normally set to vdda/2
@@ -1352,11 +1335,6 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
*/
snd_soc_write(codec, SGTL5000_DAP_CTRL, 0);
- /* leading to standby state */
- ret = sgtl5000_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- if (ret)
- goto err;
-
return 0;
err:
@@ -1373,8 +1351,6 @@ static int sgtl5000_remove(struct snd_soc_codec *codec)
{
struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
- sgtl5000_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
sgtl5000->supplies);
regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
@@ -1387,9 +1363,8 @@ static int sgtl5000_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver sgtl5000_driver = {
.probe = sgtl5000_probe,
.remove = sgtl5000_remove,
- .suspend = sgtl5000_suspend,
- .resume = sgtl5000_resume,
.set_bias_level = sgtl5000_set_bias_level,
+ .suspend_bias_off = true,
.controls = sgtl5000_snd_controls,
.num_controls = ARRAY_SIZE(sgtl5000_snd_controls),
.dapm_widgets = sgtl5000_dapm_widgets,
@@ -1442,6 +1417,7 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
{
struct sgtl5000_priv *sgtl5000;
int ret, reg, rev;
+ unsigned int mclk;
sgtl5000 = devm_kzalloc(&client->dev, sizeof(struct sgtl5000_priv),
GFP_KERNEL);
@@ -1465,6 +1441,14 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
return ret;
}
+ /* SGTL5000 SYS_MCLK should be between 8 and 27 MHz */
+ mclk = clk_get_rate(sgtl5000->mclk);
+ if (mclk < 8000000 || mclk > 27000000) {
+ dev_err(&client->dev, "Invalid SYS_CLK frequency: %u.%03uMHz\n",
+ mclk / 1000000, mclk / 1000 % 1000);
+ return -EINVAL;
+ }
+
ret = clk_prepare_enable(sgtl5000->mclk);
if (ret)
return ret;
diff --git a/sound/soc/codecs/ssm2518.c b/sound/soc/codecs/ssm2518.c
index e8680bea5f86..67ea55adb307 100644
--- a/sound/soc/codecs/ssm2518.c
+++ b/sound/soc/codecs/ssm2518.c
@@ -646,17 +646,6 @@ static struct snd_soc_dai_driver ssm2518_dai = {
.ops = &ssm2518_dai_ops,
};
-static int ssm2518_probe(struct snd_soc_codec *codec)
-{
- return ssm2518_set_bias_level(codec, SND_SOC_BIAS_OFF);
-}
-
-static int ssm2518_remove(struct snd_soc_codec *codec)
-{
- ssm2518_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static int ssm2518_set_sysclk(struct snd_soc_codec *codec, int clk_id,
int source, unsigned int freq, int dir)
{
@@ -727,8 +716,6 @@ static int ssm2518_set_sysclk(struct snd_soc_codec *codec, int clk_id,
}
static struct snd_soc_codec_driver ssm2518_codec_driver = {
- .probe = ssm2518_probe,
- .remove = ssm2518_remove,
.set_bias_level = ssm2518_set_bias_level,
.set_sysclk = ssm2518_set_sysclk,
.idle_bias_off = true,
diff --git a/sound/soc/codecs/ssm2602-i2c.c b/sound/soc/codecs/ssm2602-i2c.c
index abd63d537173..0d9779d6bfda 100644
--- a/sound/soc/codecs/ssm2602-i2c.c
+++ b/sound/soc/codecs/ssm2602-i2c.c
@@ -41,10 +41,19 @@ static const struct i2c_device_id ssm2602_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ssm2602_i2c_id);
+static const struct of_device_id ssm2602_of_match[] = {
+ { .compatible = "adi,ssm2602", },
+ { .compatible = "adi,ssm2603", },
+ { .compatible = "adi,ssm2604", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ssm2602_of_match);
+
static struct i2c_driver ssm2602_i2c_driver = {
.driver = {
.name = "ssm2602",
.owner = THIS_MODULE,
+ .of_match_table = ssm2602_of_match,
},
.probe = ssm2602_i2c_probe,
.remove = ssm2602_i2c_remove,
diff --git a/sound/soc/codecs/ssm2602-spi.c b/sound/soc/codecs/ssm2602-spi.c
index 2bf55e24a7bb..b5df14fbe3ad 100644
--- a/sound/soc/codecs/ssm2602-spi.c
+++ b/sound/soc/codecs/ssm2602-spi.c
@@ -26,10 +26,17 @@ static int ssm2602_spi_remove(struct spi_device *spi)
return 0;
}
+static const struct of_device_id ssm2602_of_match[] = {
+ { .compatible = "adi,ssm2602", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ssm2602_of_match);
+
static struct spi_driver ssm2602_spi_driver = {
.driver = {
.name = "ssm2602",
.owner = THIS_MODULE,
+ .of_match_table = ssm2602_of_match,
},
.probe = ssm2602_spi_probe,
.remove = ssm2602_spi_remove,
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 4021cd435740..314eaece1b7d 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -192,7 +192,7 @@ static const struct snd_pcm_hw_constraint_list ssm2602_constraints_12288000 = {
};
static const unsigned int ssm2602_rates_11289600[] = {
- 8000, 44100, 88200,
+ 8000, 11025, 22050, 44100, 88200,
};
static const struct snd_pcm_hw_constraint_list ssm2602_constraints_11289600 = {
@@ -237,6 +237,16 @@ static const struct ssm2602_coeff ssm2602_coeff_table[] = {
{18432000, 96000, SSM2602_COEFF_SRATE(0x7, 0x1, 0x0)},
{12000000, 96000, SSM2602_COEFF_SRATE(0x7, 0x0, 0x1)},
+ /* 11.025k */
+ {11289600, 11025, SSM2602_COEFF_SRATE(0xc, 0x0, 0x0)},
+ {16934400, 11025, SSM2602_COEFF_SRATE(0xc, 0x1, 0x0)},
+ {12000000, 11025, SSM2602_COEFF_SRATE(0xc, 0x1, 0x1)},
+
+ /* 22.05k */
+ {11289600, 22050, SSM2602_COEFF_SRATE(0xd, 0x0, 0x0)},
+ {16934400, 22050, SSM2602_COEFF_SRATE(0xd, 0x1, 0x0)},
+ {12000000, 22050, SSM2602_COEFF_SRATE(0xd, 0x1, 0x1)},
+
/* 44.1k */
{11289600, 44100, SSM2602_COEFF_SRATE(0x8, 0x0, 0x0)},
{16934400, 44100, SSM2602_COEFF_SRATE(0x8, 0x1, 0x0)},
@@ -467,7 +477,8 @@ static int ssm2602_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
-#define SSM2602_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+#define SSM2602_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |\
SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\
SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |\
SNDRV_PCM_RATE_96000)
@@ -502,18 +513,11 @@ static struct snd_soc_dai_driver ssm2602_dai = {
.symmetric_samplebits = 1,
};
-static int ssm2602_suspend(struct snd_soc_codec *codec)
-{
- ssm2602_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
static int ssm2602_resume(struct snd_soc_codec *codec)
{
struct ssm2602_priv *ssm2602 = snd_soc_codec_get_drvdata(codec);
regcache_sync(ssm2602->regmap);
- ssm2602_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
}
@@ -586,27 +590,14 @@ static int ssm260x_codec_probe(struct snd_soc_codec *codec)
break;
}
- if (ret)
- return ret;
-
- ssm2602_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
-/* remove everything here */
-static int ssm2602_remove(struct snd_soc_codec *codec)
-{
- ssm2602_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
+ return ret;
}
static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = {
.probe = ssm260x_codec_probe,
- .remove = ssm2602_remove,
- .suspend = ssm2602_suspend,
.resume = ssm2602_resume,
.set_bias_level = ssm2602_set_bias_level,
+ .suspend_bias_off = true,
.controls = ssm260x_snd_controls,
.num_controls = ARRAY_SIZE(ssm260x_snd_controls),
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
new file mode 100644
index 000000000000..4b5c17f8507e
--- /dev/null
+++ b/sound/soc/codecs/ssm4567.c
@@ -0,0 +1,343 @@
+/*
+ * SSM4567 amplifier audio driver
+ *
+ * Copyright 2014 Google Chromium project.
+ * Author: Anatol Pomozov <anatol@chromium.org>
+ *
+ * Based on code copyright/by:
+ * Copyright 2013 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#define SSM4567_REG_POWER_CTRL 0x00
+#define SSM4567_REG_AMP_SNS_CTRL 0x01
+#define SSM4567_REG_DAC_CTRL 0x02
+#define SSM4567_REG_DAC_VOLUME 0x03
+#define SSM4567_REG_SAI_CTRL_1 0x04
+#define SSM4567_REG_SAI_CTRL_2 0x05
+#define SSM4567_REG_SAI_PLACEMENT_1 0x06
+#define SSM4567_REG_SAI_PLACEMENT_2 0x07
+#define SSM4567_REG_SAI_PLACEMENT_3 0x08
+#define SSM4567_REG_SAI_PLACEMENT_4 0x09
+#define SSM4567_REG_SAI_PLACEMENT_5 0x0a
+#define SSM4567_REG_SAI_PLACEMENT_6 0x0b
+#define SSM4567_REG_BATTERY_V_OUT 0x0c
+#define SSM4567_REG_LIMITER_CTRL_1 0x0d
+#define SSM4567_REG_LIMITER_CTRL_2 0x0e
+#define SSM4567_REG_LIMITER_CTRL_3 0x0f
+#define SSM4567_REG_STATUS_1 0x10
+#define SSM4567_REG_STATUS_2 0x11
+#define SSM4567_REG_FAULT_CTRL 0x12
+#define SSM4567_REG_PDM_CTRL 0x13
+#define SSM4567_REG_MCLK_RATIO 0x14
+#define SSM4567_REG_BOOST_CTRL_1 0x15
+#define SSM4567_REG_BOOST_CTRL_2 0x16
+#define SSM4567_REG_SOFT_RESET 0xff
+
+/* POWER_CTRL */
+#define SSM4567_POWER_APWDN_EN BIT(7)
+#define SSM4567_POWER_BSNS_PWDN BIT(6)
+#define SSM4567_POWER_VSNS_PWDN BIT(5)
+#define SSM4567_POWER_ISNS_PWDN BIT(4)
+#define SSM4567_POWER_BOOST_PWDN BIT(3)
+#define SSM4567_POWER_AMP_PWDN BIT(2)
+#define SSM4567_POWER_VBAT_ONLY BIT(1)
+#define SSM4567_POWER_SPWDN BIT(0)
+
+/* DAC_CTRL */
+#define SSM4567_DAC_HV BIT(7)
+#define SSM4567_DAC_MUTE BIT(6)
+#define SSM4567_DAC_HPF BIT(5)
+#define SSM4567_DAC_LPM BIT(4)
+#define SSM4567_DAC_FS_MASK 0x7
+#define SSM4567_DAC_FS_8000_12000 0x0
+#define SSM4567_DAC_FS_16000_24000 0x1
+#define SSM4567_DAC_FS_32000_48000 0x2
+#define SSM4567_DAC_FS_64000_96000 0x3
+#define SSM4567_DAC_FS_128000_192000 0x4
+
+struct ssm4567 {
+ struct regmap *regmap;
+};
+
+static const struct reg_default ssm4567_reg_defaults[] = {
+ { SSM4567_REG_POWER_CTRL, 0x81 },
+ { SSM4567_REG_AMP_SNS_CTRL, 0x09 },
+ { SSM4567_REG_DAC_CTRL, 0x32 },
+ { SSM4567_REG_DAC_VOLUME, 0x40 },
+ { SSM4567_REG_SAI_CTRL_1, 0x00 },
+ { SSM4567_REG_SAI_CTRL_2, 0x08 },
+ { SSM4567_REG_SAI_PLACEMENT_1, 0x01 },
+ { SSM4567_REG_SAI_PLACEMENT_2, 0x20 },
+ { SSM4567_REG_SAI_PLACEMENT_3, 0x32 },
+ { SSM4567_REG_SAI_PLACEMENT_4, 0x07 },
+ { SSM4567_REG_SAI_PLACEMENT_5, 0x07 },
+ { SSM4567_REG_SAI_PLACEMENT_6, 0x07 },
+ { SSM4567_REG_BATTERY_V_OUT, 0x00 },
+ { SSM4567_REG_LIMITER_CTRL_1, 0xa4 },
+ { SSM4567_REG_LIMITER_CTRL_2, 0x73 },
+ { SSM4567_REG_LIMITER_CTRL_3, 0x00 },
+ { SSM4567_REG_STATUS_1, 0x00 },
+ { SSM4567_REG_STATUS_2, 0x00 },
+ { SSM4567_REG_FAULT_CTRL, 0x30 },
+ { SSM4567_REG_PDM_CTRL, 0x40 },
+ { SSM4567_REG_MCLK_RATIO, 0x11 },
+ { SSM4567_REG_BOOST_CTRL_1, 0x03 },
+ { SSM4567_REG_BOOST_CTRL_2, 0x00 },
+ { SSM4567_REG_SOFT_RESET, 0x00 },
+};
+
+
+static bool ssm4567_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SSM4567_REG_POWER_CTRL ... SSM4567_REG_BOOST_CTRL_2:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
+static bool ssm4567_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SSM4567_REG_POWER_CTRL ... SSM4567_REG_SAI_PLACEMENT_6:
+ case SSM4567_REG_LIMITER_CTRL_1 ... SSM4567_REG_LIMITER_CTRL_3:
+ case SSM4567_REG_FAULT_CTRL ... SSM4567_REG_BOOST_CTRL_2:
+ /* The datasheet states that soft reset register is read-only,
+ * but logically it is write-only. */
+ case SSM4567_REG_SOFT_RESET:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool ssm4567_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SSM4567_REG_BATTERY_V_OUT:
+ case SSM4567_REG_STATUS_1 ... SSM4567_REG_STATUS_2:
+ case SSM4567_REG_SOFT_RESET:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const DECLARE_TLV_DB_MINMAX_MUTE(ssm4567_vol_tlv, -7125, 2400);
+
+static const struct snd_kcontrol_new ssm4567_snd_controls[] = {
+ SOC_SINGLE_TLV("Master Playback Volume", SSM4567_REG_DAC_VOLUME, 0,
+ 0xff, 1, ssm4567_vol_tlv),
+ SOC_SINGLE("DAC Low Power Mode Switch", SSM4567_REG_DAC_CTRL, 4, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget ssm4567_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC("DAC", "HiFi Playback", SSM4567_REG_POWER_CTRL, 2, 1),
+
+ SND_SOC_DAPM_OUTPUT("OUT"),
+};
+
+static const struct snd_soc_dapm_route ssm4567_routes[] = {
+ { "OUT", NULL, "DAC" },
+};
+
+static int ssm4567_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct ssm4567 *ssm4567 = snd_soc_codec_get_drvdata(codec);
+ unsigned int rate = params_rate(params);
+ unsigned int dacfs;
+
+ if (rate >= 8000 && rate <= 12000)
+ dacfs = SSM4567_DAC_FS_8000_12000;
+ else if (rate >= 16000 && rate <= 24000)
+ dacfs = SSM4567_DAC_FS_16000_24000;
+ else if (rate >= 32000 && rate <= 48000)
+ dacfs = SSM4567_DAC_FS_32000_48000;
+ else if (rate >= 64000 && rate <= 96000)
+ dacfs = SSM4567_DAC_FS_64000_96000;
+ else if (rate >= 128000 && rate <= 192000)
+ dacfs = SSM4567_DAC_FS_128000_192000;
+ else
+ return -EINVAL;
+
+ return regmap_update_bits(ssm4567->regmap, SSM4567_REG_DAC_CTRL,
+ SSM4567_DAC_FS_MASK, dacfs);
+}
+
+static int ssm4567_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct ssm4567 *ssm4567 = snd_soc_codec_get_drvdata(dai->codec);
+ unsigned int val;
+
+ val = mute ? SSM4567_DAC_MUTE : 0;
+ return regmap_update_bits(ssm4567->regmap, SSM4567_REG_DAC_CTRL,
+ SSM4567_DAC_MUTE, val);
+}
+
+static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
+{
+ int ret = 0;
+
+ if (!enable) {
+ ret = regmap_update_bits(ssm4567->regmap,
+ SSM4567_REG_POWER_CTRL,
+ SSM4567_POWER_SPWDN, SSM4567_POWER_SPWDN);
+ regcache_mark_dirty(ssm4567->regmap);
+ }
+
+ regcache_cache_only(ssm4567->regmap, !enable);
+
+ if (enable) {
+ ret = regmap_update_bits(ssm4567->regmap,
+ SSM4567_REG_POWER_CTRL,
+ SSM4567_POWER_SPWDN, 0x00);
+ regcache_sync(ssm4567->regmap);
+ }
+
+ return ret;
+}
+
+static int ssm4567_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct ssm4567 *ssm4567 = snd_soc_codec_get_drvdata(codec);
+ int ret = 0;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
+ ret = ssm4567_set_power(ssm4567, true);
+ break;
+ case SND_SOC_BIAS_OFF:
+ ret = ssm4567_set_power(ssm4567, false);
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ codec->dapm.bias_level = level;
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops ssm4567_dai_ops = {
+ .hw_params = ssm4567_hw_params,
+ .digital_mute = ssm4567_mute,
+};
+
+static struct snd_soc_dai_driver ssm4567_dai = {
+ .name = "ssm4567-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32,
+ },
+ .ops = &ssm4567_dai_ops,
+};
+
+static struct snd_soc_codec_driver ssm4567_codec_driver = {
+ .set_bias_level = ssm4567_set_bias_level,
+ .idle_bias_off = true,
+
+ .controls = ssm4567_snd_controls,
+ .num_controls = ARRAY_SIZE(ssm4567_snd_controls),
+ .dapm_widgets = ssm4567_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ssm4567_dapm_widgets),
+ .dapm_routes = ssm4567_routes,
+ .num_dapm_routes = ARRAY_SIZE(ssm4567_routes),
+};
+
+static const struct regmap_config ssm4567_regmap_config = {
+ .val_bits = 8,
+ .reg_bits = 8,
+
+ .max_register = SSM4567_REG_SOFT_RESET,
+ .readable_reg = ssm4567_readable_reg,
+ .writeable_reg = ssm4567_writeable_reg,
+ .volatile_reg = ssm4567_volatile_reg,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = ssm4567_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(ssm4567_reg_defaults),
+};
+
+static int ssm4567_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct ssm4567 *ssm4567;
+ int ret;
+
+ ssm4567 = devm_kzalloc(&i2c->dev, sizeof(*ssm4567), GFP_KERNEL);
+ if (ssm4567 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, ssm4567);
+
+ ssm4567->regmap = devm_regmap_init_i2c(i2c, &ssm4567_regmap_config);
+ if (IS_ERR(ssm4567->regmap))
+ return PTR_ERR(ssm4567->regmap);
+
+ ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET, 0x00);
+ if (ret)
+ return ret;
+
+ ret = ssm4567_set_power(ssm4567, false);
+ if (ret)
+ return ret;
+
+ return snd_soc_register_codec(&i2c->dev, &ssm4567_codec_driver,
+ &ssm4567_dai, 1);
+}
+
+static int ssm4567_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+}
+
+static const struct i2c_device_id ssm4567_i2c_ids[] = {
+ { "ssm4567", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ssm4567_i2c_ids);
+
+static struct i2c_driver ssm4567_driver = {
+ .driver = {
+ .name = "ssm4567",
+ .owner = THIS_MODULE,
+ },
+ .probe = ssm4567_i2c_probe,
+ .remove = ssm4567_i2c_remove,
+ .id_table = ssm4567_i2c_ids,
+};
+module_i2c_driver(ssm4567_driver);
+
+MODULE_DESCRIPTION("ASoC SSM4567 driver");
+MODULE_AUTHOR("Anatol Pomozov <anatol@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
index 23b32960ff1d..f039dc825971 100644
--- a/sound/soc/codecs/tas2552.c
+++ b/sound/soc/codecs/tas2552.c
@@ -78,6 +78,44 @@ struct tas2552_data {
unsigned int mclk;
};
+/* Input mux controls */
+static const char *tas2552_input_texts[] = {
+ "Digital", "Analog"
+};
+
+static SOC_ENUM_SINGLE_DECL(tas2552_input_mux_enum, TAS2552_CFG_3, 7,
+ tas2552_input_texts);
+
+static const struct snd_kcontrol_new tas2552_input_mux_control[] = {
+ SOC_DAPM_ENUM("Input selection", tas2552_input_mux_enum)
+};
+
+static const struct snd_soc_dapm_widget tas2552_dapm_widgets[] =
+{
+ SND_SOC_DAPM_INPUT("IN"),
+
+ /* MUX Controls */
+ SND_SOC_DAPM_MUX("Input selection", SND_SOC_NOPM, 0, 0,
+ tas2552_input_mux_control),
+
+ SND_SOC_DAPM_AIF_IN("DAC IN", "DAC Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_OUT_DRV("ClassD", TAS2552_CFG_2, 7, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PLL", TAS2552_CFG_2, 3, 0, NULL, 0),
+
+ SND_SOC_DAPM_OUTPUT("OUT")
+};
+
+static const struct snd_soc_dapm_route tas2552_audio_map[] = {
+ {"DAC", NULL, "DAC IN"},
+ {"Input selection", "Digital", "DAC"},
+ {"Input selection", "Analog", "IN"},
+ {"ClassD", NULL, "Input selection"},
+ {"OUT", NULL, "ClassD"},
+ {"ClassD", NULL, "PLL"},
+};
+
+#ifdef CONFIG_PM_RUNTIME
static void tas2552_sw_shutdown(struct tas2552_data *tas_data, int sw_shutdown)
{
u8 cfg1_reg;
@@ -90,6 +128,7 @@ static void tas2552_sw_shutdown(struct tas2552_data *tas_data, int sw_shutdown)
snd_soc_update_bits(tas_data->codec, TAS2552_CFG_1,
TAS2552_SWS_MASK, cfg1_reg);
}
+#endif
static int tas2552_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
@@ -101,10 +140,6 @@ static int tas2552_hw_params(struct snd_pcm_substream *substream,
int d;
u8 p, j;
- /* Turn on Class D amplifier */
- snd_soc_update_bits(codec, TAS2552_CFG_2, TAS2552_CLASSD_EN_MASK,
- TAS2552_CLASSD_EN);
-
if (!tas2552->mclk)
return -EINVAL;
@@ -147,9 +182,6 @@ static int tas2552_hw_params(struct snd_pcm_substream *substream,
}
- snd_soc_update_bits(codec, TAS2552_CFG_2, TAS2552_PLL_ENABLE,
- TAS2552_PLL_ENABLE);
-
return 0;
}
@@ -269,19 +301,10 @@ static const struct dev_pm_ops tas2552_pm = {
NULL)
};
-static void tas2552_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_codec *codec = dai->codec;
-
- snd_soc_update_bits(codec, TAS2552_CFG_2, TAS2552_PLL_ENABLE, 0);
-}
-
static struct snd_soc_dai_ops tas2552_speaker_dai_ops = {
.hw_params = tas2552_hw_params,
.set_sysclk = tas2552_set_dai_sysclk,
.set_fmt = tas2552_set_dai_fmt,
- .shutdown = tas2552_shutdown,
.digital_mute = tas2552_mute,
};
@@ -294,7 +317,7 @@ static struct snd_soc_dai_driver tas2552_dai[] = {
{
.name = "tas2552-amplifier",
.playback = {
- .stream_name = "Speaker",
+ .stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
@@ -312,6 +335,7 @@ static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 24);
static const struct snd_kcontrol_new tas2552_snd_controls[] = {
SOC_SINGLE_TLV("Speaker Driver Playback Volume",
TAS2552_PGA_GAIN, 0, 0x1f, 1, dac_tlv),
+ SOC_DAPM_SINGLE("Playback AMP", SND_SOC_NOPM, 0, 1, 0),
};
static const struct reg_default tas2552_init_regs[] = {
@@ -321,6 +345,7 @@ static const struct reg_default tas2552_init_regs[] = {
static int tas2552_codec_probe(struct snd_soc_codec *codec)
{
struct tas2552_data *tas2552 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
int ret;
tas2552->codec = codec;
@@ -362,9 +387,14 @@ static int tas2552_codec_probe(struct snd_soc_codec *codec)
goto patch_fail;
}
- snd_soc_write(codec, TAS2552_CFG_2, TAS2552_CLASSD_EN |
- TAS2552_BOOST_EN | TAS2552_APT_EN |
- TAS2552_LIM_EN);
+ snd_soc_write(codec, TAS2552_CFG_2, TAS2552_BOOST_EN |
+ TAS2552_APT_EN | TAS2552_LIM_EN);
+
+ snd_soc_dapm_new_controls(dapm, tas2552_dapm_widgets,
+ ARRAY_SIZE(tas2552_dapm_widgets));
+ snd_soc_dapm_add_routes(dapm, tas2552_audio_map,
+ ARRAY_SIZE(tas2552_audio_map));
+
return 0;
patch_fail:
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index aea9e1ff9126..145fe5b253d4 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -167,13 +167,13 @@ struct aic31xx_priv {
struct regulator_bulk_data supplies[AIC31XX_NUM_SUPPLIES];
struct aic31xx_disable_nb disable_nb[AIC31XX_NUM_SUPPLIES];
unsigned int sysclk;
+ u8 p_div;
int rate_div_line;
};
struct aic31xx_rate_divs {
- u32 mclk;
+ u32 mclk_p;
u32 rate;
- u8 p_val;
u8 pll_j;
u16 pll_d;
u16 dosr;
@@ -186,62 +186,51 @@ struct aic31xx_rate_divs {
/* ADC dividers can be disabled by cofiguring them to 0 */
static const struct aic31xx_rate_divs aic31xx_divs[] = {
- /* mclk rate pll: p j d dosr ndac mdac aors nadc madc */
+ /* mclk/p rate pll: j d dosr ndac mdac aors nadc madc */
/* 8k rate */
- {12000000, 8000, 1, 8, 1920, 128, 48, 2, 128, 48, 2},
- {12000000, 8000, 1, 8, 1920, 128, 32, 3, 128, 32, 3},
- {24000000, 8000, 2, 8, 1920, 128, 48, 2, 128, 48, 2},
- {25000000, 8000, 2, 7, 8643, 128, 48, 2, 128, 48, 2},
+ {12000000, 8000, 8, 1920, 128, 48, 2, 128, 48, 2},
+ {12000000, 8000, 8, 1920, 128, 32, 3, 128, 32, 3},
+ {12500000, 8000, 7, 8643, 128, 48, 2, 128, 48, 2},
/* 11.025k rate */
- {12000000, 11025, 1, 7, 5264, 128, 32, 2, 128, 32, 2},
- {12000000, 11025, 1, 8, 4672, 128, 24, 3, 128, 24, 3},
- {24000000, 11025, 2, 7, 5264, 128, 32, 2, 128, 32, 2},
- {25000000, 11025, 2, 7, 2253, 128, 32, 2, 128, 32, 2},
+ {12000000, 11025, 7, 5264, 128, 32, 2, 128, 32, 2},
+ {12000000, 11025, 8, 4672, 128, 24, 3, 128, 24, 3},
+ {12500000, 11025, 7, 2253, 128, 32, 2, 128, 32, 2},
/* 16k rate */
- {12000000, 16000, 1, 8, 1920, 128, 24, 2, 128, 24, 2},
- {12000000, 16000, 1, 8, 1920, 128, 16, 3, 128, 16, 3},
- {24000000, 16000, 2, 8, 1920, 128, 24, 2, 128, 24, 2},
- {25000000, 16000, 2, 7, 8643, 128, 24, 2, 128, 24, 2},
+ {12000000, 16000, 8, 1920, 128, 24, 2, 128, 24, 2},
+ {12000000, 16000, 8, 1920, 128, 16, 3, 128, 16, 3},
+ {12500000, 16000, 7, 8643, 128, 24, 2, 128, 24, 2},
/* 22.05k rate */
- {12000000, 22050, 1, 7, 5264, 128, 16, 2, 128, 16, 2},
- {12000000, 22050, 1, 8, 4672, 128, 12, 3, 128, 12, 3},
- {24000000, 22050, 2, 7, 5264, 128, 16, 2, 128, 16, 2},
- {25000000, 22050, 2, 7, 2253, 128, 16, 2, 128, 16, 2},
+ {12000000, 22050, 7, 5264, 128, 16, 2, 128, 16, 2},
+ {12000000, 22050, 8, 4672, 128, 12, 3, 128, 12, 3},
+ {12500000, 22050, 7, 2253, 128, 16, 2, 128, 16, 2},
/* 32k rate */
- {12000000, 32000, 1, 8, 1920, 128, 12, 2, 128, 12, 2},
- {12000000, 32000, 1, 8, 1920, 128, 8, 3, 128, 8, 3},
- {24000000, 32000, 2, 8, 1920, 128, 12, 2, 128, 12, 2},
- {25000000, 32000, 2, 7, 8643, 128, 12, 2, 128, 12, 2},
+ {12000000, 32000, 8, 1920, 128, 12, 2, 128, 12, 2},
+ {12000000, 32000, 8, 1920, 128, 8, 3, 128, 8, 3},
+ {12500000, 32000, 7, 8643, 128, 12, 2, 128, 12, 2},
/* 44.1k rate */
- {12000000, 44100, 1, 7, 5264, 128, 8, 2, 128, 8, 2},
- {12000000, 44100, 1, 8, 4672, 128, 6, 3, 128, 6, 3},
- {24000000, 44100, 2, 7, 5264, 128, 8, 2, 128, 8, 2},
- {25000000, 44100, 2, 7, 2253, 128, 8, 2, 128, 8, 2},
+ {12000000, 44100, 7, 5264, 128, 8, 2, 128, 8, 2},
+ {12000000, 44100, 8, 4672, 128, 6, 3, 128, 6, 3},
+ {12500000, 44100, 7, 2253, 128, 8, 2, 128, 8, 2},
/* 48k rate */
- {12000000, 48000, 1, 8, 1920, 128, 8, 2, 128, 8, 2},
- {12000000, 48000, 1, 7, 6800, 96, 5, 4, 96, 5, 4},
- {24000000, 48000, 2, 8, 1920, 128, 8, 2, 128, 8, 2},
- {25000000, 48000, 2, 7, 8643, 128, 8, 2, 128, 8, 2},
+ {12000000, 48000, 8, 1920, 128, 8, 2, 128, 8, 2},
+ {12000000, 48000, 7, 6800, 96, 5, 4, 96, 5, 4},
+ {12500000, 48000, 7, 8643, 128, 8, 2, 128, 8, 2},
/* 88.2k rate */
- {12000000, 88200, 1, 7, 5264, 64, 8, 2, 64, 8, 2},
- {12000000, 88200, 1, 8, 4672, 64, 6, 3, 64, 6, 3},
- {24000000, 88200, 2, 7, 5264, 64, 8, 2, 64, 8, 2},
- {25000000, 88200, 2, 7, 2253, 64, 8, 2, 64, 8, 2},
+ {12000000, 88200, 7, 5264, 64, 8, 2, 64, 8, 2},
+ {12000000, 88200, 8, 4672, 64, 6, 3, 64, 6, 3},
+ {12500000, 88200, 7, 2253, 64, 8, 2, 64, 8, 2},
/* 96k rate */
- {12000000, 96000, 1, 8, 1920, 64, 8, 2, 64, 8, 2},
- {12000000, 96000, 1, 7, 6800, 48, 5, 4, 48, 5, 4},
- {24000000, 96000, 2, 8, 1920, 64, 8, 2, 64, 8, 2},
- {25000000, 96000, 2, 7, 8643, 64, 8, 2, 64, 8, 2},
+ {12000000, 96000, 8, 1920, 64, 8, 2, 64, 8, 2},
+ {12000000, 96000, 7, 6800, 48, 5, 4, 48, 5, 4},
+ {12500000, 96000, 7, 8643, 64, 8, 2, 64, 8, 2},
/* 176.4k rate */
- {12000000, 176400, 1, 7, 5264, 32, 8, 2, 32, 8, 2},
- {12000000, 176400, 1, 8, 4672, 32, 6, 3, 32, 6, 3},
- {24000000, 176400, 2, 7, 5264, 32, 8, 2, 32, 8, 2},
- {25000000, 176400, 2, 7, 2253, 32, 8, 2, 32, 8, 2},
+ {12000000, 176400, 7, 5264, 32, 8, 2, 32, 8, 2},
+ {12000000, 176400, 8, 4672, 32, 6, 3, 32, 6, 3},
+ {12500000, 176400, 7, 2253, 32, 8, 2, 32, 8, 2},
/* 192k rate */
- {12000000, 192000, 1, 8, 1920, 32, 8, 2, 32, 8, 2},
- {12000000, 192000, 1, 7, 6800, 24, 5, 4, 24, 5, 4},
- {24000000, 192000, 2, 8, 1920, 32, 8, 2, 32, 8, 2},
- {25000000, 192000, 2, 7, 8643, 32, 8, 2, 32, 8, 2},
+ {12000000, 192000, 8, 1920, 32, 8, 2, 32, 8, 2},
+ {12000000, 192000, 7, 6800, 24, 5, 4, 24, 5, 4},
+ {12500000, 192000, 7, 8643, 32, 8, 2, 32, 8, 2},
};
static const char * const ldac_in_text[] = {
@@ -692,6 +681,7 @@ static int aic31xx_setup_pll(struct snd_soc_codec *codec,
{
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
int bclk_score = snd_soc_params_to_frame_size(params);
+ int mclk_p = aic31xx->sysclk / aic31xx->p_div;
int bclk_n = 0;
int match = -1;
int i;
@@ -704,7 +694,7 @@ static int aic31xx_setup_pll(struct snd_soc_codec *codec,
for (i = 0; i < ARRAY_SIZE(aic31xx_divs); i++) {
if (aic31xx_divs[i].rate == params_rate(params) &&
- aic31xx_divs[i].mclk == aic31xx->sysclk) {
+ aic31xx_divs[i].mclk_p == mclk_p) {
int s = (aic31xx_divs[i].dosr * aic31xx_divs[i].mdac) %
snd_soc_params_to_frame_size(params);
int bn = (aic31xx_divs[i].dosr * aic31xx_divs[i].mdac) /
@@ -738,7 +728,7 @@ static int aic31xx_setup_pll(struct snd_soc_codec *codec,
/* PLL configuration */
snd_soc_update_bits(codec, AIC31XX_PLLPR, AIC31XX_PLL_MASK,
- (aic31xx_divs[i].p_val << 4) | 0x01);
+ (aic31xx->p_div << 4) | 0x01);
snd_soc_write(codec, AIC31XX_PLLJ, aic31xx_divs[i].pll_j);
snd_soc_write(codec, AIC31XX_PLLDMSB,
@@ -772,7 +762,7 @@ static int aic31xx_setup_pll(struct snd_soc_codec *codec,
dev_dbg(codec->dev,
"pll %d.%04d/%d dosr %d n %d m %d aosr %d n %d m %d bclk_n %d\n",
aic31xx_divs[i].pll_j, aic31xx_divs[i].pll_d,
- aic31xx_divs[i].p_val, aic31xx_divs[i].dosr,
+ aic31xx->p_div, aic31xx_divs[i].dosr,
aic31xx_divs[i].ndac, aic31xx_divs[i].mdac,
aic31xx_divs[i].aosr, aic31xx_divs[i].nadc,
aic31xx_divs[i].madc, bclk_n);
@@ -840,7 +830,7 @@ static int aic31xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
{
struct snd_soc_codec *codec = codec_dai->codec;
u8 iface_reg1 = 0;
- u8 iface_reg3 = 0;
+ u8 iface_reg2 = 0;
u8 dsp_a_val = 0;
dev_dbg(codec->dev, "## %s: fmt = 0x%x\n", __func__, fmt);
@@ -865,7 +855,7 @@ static int aic31xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
/* NOTE: BCLKINV bit value 1 equas NB and 0 equals IB */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
- iface_reg3 |= AIC31XX_BCLKINV_MASK;
+ iface_reg2 |= AIC31XX_BCLKINV_MASK;
break;
case SND_SOC_DAIFMT_IB_NF:
break;
@@ -897,7 +887,7 @@ static int aic31xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
dsp_a_val);
snd_soc_update_bits(codec, AIC31XX_IFACE2,
AIC31XX_BCLKINV_MASK,
- iface_reg3);
+ iface_reg2);
return 0;
}
@@ -912,7 +902,16 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
dev_dbg(codec->dev, "## %s: clk_id = %d, freq = %d, dir = %d\n",
__func__, clk_id, freq, dir);
- for (i = 0; aic31xx_divs[i].mclk != freq; i++) {
+ for (i = 1; freq/i > 20000000 && i < 8; i++)
+ ;
+ if (freq/i > 20000000) {
+ dev_err(aic31xx->dev, "%s: Too high mclk frequency %u\n",
+ __func__, freq);
+ return -EINVAL;
+ }
+ aic31xx->p_div = i;
+
+ for (i = 0; aic31xx_divs[i].mclk_p != freq/aic31xx->p_div; i++) {
if (i == ARRAY_SIZE(aic31xx_divs)) {
dev_err(aic31xx->dev, "%s: Unsupported frequency %d\n",
__func__, freq);
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index 52ed57c69dfa..fe16c34607bb 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -18,7 +18,8 @@
#define AIC31XX_RATES SNDRV_PCM_RATE_8000_192000
#define AIC31XX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
- | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE)
+ | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE \
+ | SNDRV_PCM_FMTBIT_S32_LE)
#define AIC31XX_STEREO_CLASS_D_BIT 0x1
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 64f179ee9834..f7c2a575a892 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1121,6 +1121,7 @@ static int aic3x_regulator_event(struct notifier_block *nb,
static int aic3x_set_power(struct snd_soc_codec *codec, int power)
{
struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
+ unsigned int pll_c, pll_d;
int ret;
if (power) {
@@ -1138,6 +1139,18 @@ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
/* Sync reg_cache with the hardware */
regcache_cache_only(aic3x->regmap, false);
regcache_sync(aic3x->regmap);
+
+ /* Rewrite paired PLL D registers in case cached sync skipped
+ * writing one of them and thus caused other one also not
+ * being written
+ */
+ pll_c = snd_soc_read(codec, AIC3X_PLL_PROGC_REG);
+ pll_d = snd_soc_read(codec, AIC3X_PLL_PROGD_REG);
+ if (pll_c == aic3x_reg[AIC3X_PLL_PROGC_REG].def ||
+ pll_d == aic3x_reg[AIC3X_PLL_PROGD_REG].def) {
+ snd_soc_write(codec, AIC3X_PLL_PROGC_REG, pll_c);
+ snd_soc_write(codec, AIC3X_PLL_PROGD_REG, pll_d);
+ }
} else {
/*
* Do soft reset to this codec instance in order to clear
@@ -1222,20 +1235,6 @@ static struct snd_soc_dai_driver aic3x_dai = {
.symmetric_rates = 1,
};
-static int aic3x_suspend(struct snd_soc_codec *codec)
-{
- aic3x_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int aic3x_resume(struct snd_soc_codec *codec)
-{
- aic3x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-
static void aic3x_mono_init(struct snd_soc_codec *codec)
{
/* DAC to Mono Line Out default volume and route to Output mixer */
@@ -1429,8 +1428,6 @@ static struct snd_soc_codec_driver soc_codec_dev_aic3x = {
.idle_bias_off = true,
.probe = aic3x_probe,
.remove = aic3x_remove,
- .suspend = aic3x_suspend,
- .resume = aic3x_resume,
.controls = aic3x_snd_controls,
.num_controls = ARRAY_SIZE(aic3x_snd_controls),
.dapm_widgets = aic3x_dapm_widgets,
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 3dfdcc4197fa..628ec774cf22 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -212,7 +212,7 @@ static void wm8350_pga_work(struct work_struct *work)
{
struct snd_soc_dapm_context *dapm =
container_of(work, struct snd_soc_dapm_context, delayed_work.work);
- struct snd_soc_codec *codec = dapm->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
struct wm8350_data *wm8350_data = snd_soc_codec_get_drvdata(codec);
struct wm8350_output *out1 = &wm8350_data->out1,
*out2 = &wm8350_data->out2;
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index a237f1627f61..31bb4801a005 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -413,7 +413,6 @@ static int wm8741_resume(struct snd_soc_codec *codec)
return 0;
}
#else
-#define wm8741_suspend NULL
#define wm8741_resume NULL
#endif
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index e54e097f4fcb..21ca3a94fc96 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -1433,7 +1433,7 @@ static void wm8753_work(struct work_struct *work)
struct snd_soc_dapm_context *dapm =
container_of(work, struct snd_soc_dapm_context,
delayed_work.work);
- struct snd_soc_codec *codec = dapm->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
wm8753_set_bias_level(codec, dapm->bias_level);
}
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index 0ea01dfcb6e1..3addc5fe5cb2 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -518,23 +518,6 @@ static int wm8804_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
-#ifdef CONFIG_PM
-static int wm8804_suspend(struct snd_soc_codec *codec)
-{
- wm8804_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8804_resume(struct snd_soc_codec *codec)
-{
- wm8804_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-#else
-#define wm8804_suspend NULL
-#define wm8804_resume NULL
-#endif
-
static int wm8804_remove(struct snd_soc_codec *codec)
{
struct wm8804_priv *wm8804;
@@ -671,8 +654,6 @@ static struct snd_soc_dai_driver wm8804_dai = {
static struct snd_soc_codec_driver soc_codec_dev_wm8804 = {
.probe = wm8804_probe,
.remove = wm8804_remove,
- .suspend = wm8804_suspend,
- .resume = wm8804_resume,
.set_bias_level = wm8804_set_bias_level,
.idle_bias_off = true,
diff --git a/sound/soc/codecs/wm8971.c b/sound/soc/codecs/wm8971.c
index 0499cd4cfb71..39ddb9b8834c 100644
--- a/sound/soc/codecs/wm8971.c
+++ b/sound/soc/codecs/wm8971.c
@@ -615,7 +615,7 @@ static void wm8971_work(struct work_struct *work)
struct snd_soc_dapm_context *dapm =
container_of(work, struct snd_soc_dapm_context,
delayed_work.work);
- struct snd_soc_codec *codec = dapm->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
wm8971_set_bias_level(codec, codec->dapm.bias_level);
}
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 6cc0566dc29a..1fcb9f3f3097 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -4082,17 +4082,23 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
switch (control->type) {
case WM8994:
- if (wm8994->micdet_irq) {
+ if (wm8994->micdet_irq)
ret = request_threaded_irq(wm8994->micdet_irq, NULL,
wm8994_mic_irq,
IRQF_TRIGGER_RISING,
"Mic1 detect",
wm8994);
- if (ret != 0)
- dev_warn(codec->dev,
- "Failed to request Mic1 detect IRQ: %d\n",
- ret);
- }
+ else
+ ret = wm8994_request_irq(wm8994->wm8994,
+ WM8994_IRQ_MIC1_DET,
+ wm8994_mic_irq, "Mic 1 detect",
+ wm8994);
+
+ if (ret != 0)
+ dev_warn(codec->dev,
+ "Failed to request Mic1 detect IRQ: %d\n",
+ ret);
+
ret = wm8994_request_irq(wm8994->wm8994,
WM8994_IRQ_MIC1_SHRT,
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
index cae4ac5a5730..1288edeb8c7d 100644
--- a/sound/soc/codecs/wm8995.c
+++ b/sound/soc/codecs/wm8995.c
@@ -1998,23 +1998,6 @@ static int wm8995_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
-#ifdef CONFIG_PM
-static int wm8995_suspend(struct snd_soc_codec *codec)
-{
- wm8995_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int wm8995_resume(struct snd_soc_codec *codec)
-{
- wm8995_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- return 0;
-}
-#else
-#define wm8995_suspend NULL
-#define wm8995_resume NULL
-#endif
-
static int wm8995_remove(struct snd_soc_codec *codec)
{
struct wm8995_priv *wm8995;
@@ -2220,8 +2203,6 @@ static struct snd_soc_dai_driver wm8995_dai[] = {
static struct snd_soc_codec_driver soc_codec_dev_wm8995 = {
.probe = wm8995_probe,
.remove = wm8995_remove,
- .suspend = wm8995_suspend,
- .resume = wm8995_resume,
.set_bias_level = wm8995_set_bias_level,
.idle_bias_off = true,
};
diff --git a/sound/soc/davinci/Kconfig b/sound/soc/davinci/Kconfig
index d69510c53239..8e948c63f3d9 100644
--- a/sound/soc/davinci/Kconfig
+++ b/sound/soc/davinci/Kconfig
@@ -63,7 +63,8 @@ config SND_DM365_AIC3X_CODEC
Say Y if you want to add support for AIC3101 audio codec
config SND_DM365_VOICE_CODEC
- bool "Voice Codec - CQ93VC"
+ tristate "Voice Codec - CQ93VC"
+ depends on SND_DAVINCI_SOC
select MFD_DAVINCI_VOICECODEC
select SND_DAVINCI_SOC_VCIF
select SND_SOC_CQ0093VC
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 68347b55f6e1..0eed9b1b24e1 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -42,14 +42,26 @@
#define MCASP_MAX_AFIFO_DEPTH 64
+static u32 context_regs[] = {
+ DAVINCI_MCASP_TXFMCTL_REG,
+ DAVINCI_MCASP_RXFMCTL_REG,
+ DAVINCI_MCASP_TXFMT_REG,
+ DAVINCI_MCASP_RXFMT_REG,
+ DAVINCI_MCASP_ACLKXCTL_REG,
+ DAVINCI_MCASP_ACLKRCTL_REG,
+ DAVINCI_MCASP_AHCLKXCTL_REG,
+ DAVINCI_MCASP_AHCLKRCTL_REG,
+ DAVINCI_MCASP_PDIR_REG,
+ DAVINCI_MCASP_RXMASK_REG,
+ DAVINCI_MCASP_TXMASK_REG,
+ DAVINCI_MCASP_RXTDM_REG,
+ DAVINCI_MCASP_TXTDM_REG,
+};
+
struct davinci_mcasp_context {
- u32 txfmtctl;
- u32 rxfmtctl;
- u32 txfmt;
- u32 rxfmt;
- u32 aclkxctl;
- u32 aclkrctl;
- u32 pdir;
+ u32 config_regs[ARRAY_SIZE(context_regs)];
+ u32 afifo_regs[2]; /* for read/write fifo control registers */
+ u32 *xrsr_regs; /* for serializer configuration */
};
struct davinci_mcasp {
@@ -874,14 +886,24 @@ static int davinci_mcasp_suspend(struct snd_soc_dai *dai)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
struct davinci_mcasp_context *context = &mcasp->context;
+ u32 reg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(context_regs); i++)
+ context->config_regs[i] = mcasp_get_reg(mcasp, context_regs[i]);
+
+ if (mcasp->txnumevt) {
+ reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
+ context->afifo_regs[0] = mcasp_get_reg(mcasp, reg);
+ }
+ if (mcasp->rxnumevt) {
+ reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
+ context->afifo_regs[1] = mcasp_get_reg(mcasp, reg);
+ }
- context->txfmtctl = mcasp_get_reg(mcasp, DAVINCI_MCASP_TXFMCTL_REG);
- context->rxfmtctl = mcasp_get_reg(mcasp, DAVINCI_MCASP_RXFMCTL_REG);
- context->txfmt = mcasp_get_reg(mcasp, DAVINCI_MCASP_TXFMT_REG);
- context->rxfmt = mcasp_get_reg(mcasp, DAVINCI_MCASP_RXFMT_REG);
- context->aclkxctl = mcasp_get_reg(mcasp, DAVINCI_MCASP_ACLKXCTL_REG);
- context->aclkrctl = mcasp_get_reg(mcasp, DAVINCI_MCASP_ACLKRCTL_REG);
- context->pdir = mcasp_get_reg(mcasp, DAVINCI_MCASP_PDIR_REG);
+ for (i = 0; i < mcasp->num_serializer; i++)
+ context->xrsr_regs[i] = mcasp_get_reg(mcasp,
+ DAVINCI_MCASP_XRSRCTL_REG(i));
return 0;
}
@@ -890,14 +912,24 @@ static int davinci_mcasp_resume(struct snd_soc_dai *dai)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
struct davinci_mcasp_context *context = &mcasp->context;
+ u32 reg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(context_regs); i++)
+ mcasp_set_reg(mcasp, context_regs[i], context->config_regs[i]);
+
+ if (mcasp->txnumevt) {
+ reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
+ mcasp_set_reg(mcasp, reg, context->afifo_regs[0]);
+ }
+ if (mcasp->rxnumevt) {
+ reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
+ mcasp_set_reg(mcasp, reg, context->afifo_regs[1]);
+ }
- mcasp_set_reg(mcasp, DAVINCI_MCASP_TXFMCTL_REG, context->txfmtctl);
- mcasp_set_reg(mcasp, DAVINCI_MCASP_RXFMCTL_REG, context->rxfmtctl);
- mcasp_set_reg(mcasp, DAVINCI_MCASP_TXFMT_REG, context->txfmt);
- mcasp_set_reg(mcasp, DAVINCI_MCASP_RXFMT_REG, context->rxfmt);
- mcasp_set_reg(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, context->aclkxctl);
- mcasp_set_reg(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, context->aclkrctl);
- mcasp_set_reg(mcasp, DAVINCI_MCASP_PDIR_REG, context->pdir);
+ for (i = 0; i < mcasp->num_serializer; i++)
+ mcasp_set_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
+ context->xrsr_regs[i]);
return 0;
}
@@ -1216,6 +1248,11 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
mcasp->op_mode = pdata->op_mode;
mcasp->tdm_slots = pdata->tdm_slots;
mcasp->num_serializer = pdata->num_serializer;
+#ifdef CONFIG_PM_SLEEP
+ mcasp->context.xrsr_regs = devm_kzalloc(&pdev->dev,
+ sizeof(u32) * mcasp->num_serializer,
+ GFP_KERNEL);
+#endif
mcasp->serial_dir = pdata->serial_dir;
mcasp->version = pdata->version;
mcasp->txnumevt = pdata->txnumevt;
diff --git a/sound/soc/davinci/edma-pcm.c b/sound/soc/davinci/edma-pcm.c
index 605e643133db..59e588abe54b 100644
--- a/sound/soc/davinci/edma-pcm.c
+++ b/sound/soc/davinci/edma-pcm.c
@@ -25,6 +25,8 @@
#include <sound/dmaengine_pcm.h>
#include <linux/edma.h>
+#include "edma-pcm.h"
+
static const struct snd_pcm_hardware edma_pcm_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index f3012b645b51..081e406b3713 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -240,6 +240,18 @@ config SND_SOC_IMX_WM8962
Say Y if you want to add support for SoC audio on an i.MX board with
a wm8962 codec.
+config SND_SOC_IMX_ES8328
+ tristate "SoC Audio support for i.MX boards with the ES8328 codec"
+ depends on OF && (I2C || SPI)
+ select SND_SOC_ES8328_I2C if I2C
+ select SND_SOC_ES8328_SPI if SPI_MASTER
+ select SND_SOC_IMX_PCM_DMA
+ select SND_SOC_IMX_AUDMUX
+ select SND_SOC_FSL_SSI
+ help
+ Say Y if you want to add support for the ES8328 audio codec connected
+ via SSI/I2S over either SPI or I2C.
+
config SND_SOC_IMX_SGTL5000
tristate "SoC Audio support for i.MX boards with sgtl5000"
depends on OF && I2C
@@ -268,6 +280,20 @@ config SND_SOC_IMX_MC13783
select SND_SOC_MC13783
select SND_SOC_IMX_PCM_DMA
+config SND_SOC_FSL_ASOC_CARD
+ tristate "Generic ASoC Sound Card with ASRC support"
+ depends on OF && I2C
+ select SND_SOC_IMX_AUDMUX
+ select SND_SOC_IMX_PCM_DMA
+ select SND_SOC_FSL_ESAI
+ select SND_SOC_FSL_SAI
+ select SND_SOC_FSL_SSI
+ help
+ ALSA SoC Audio support with ASRC feature for Freescale SoCs that have
+ ESAI/SAI/SSI and connect with external CODECs such as WM8962, CS42888
+ and SGTL5000.
+ Say Y if you want to add support for Freescale Generic ASoC Sound Card.
+
endif # SND_IMX_SOC
endmenu
diff --git a/sound/soc/fsl/Makefile b/sound/soc/fsl/Makefile
index 9ff59267eac9..d28dc25c9375 100644
--- a/sound/soc/fsl/Makefile
+++ b/sound/soc/fsl/Makefile
@@ -11,6 +11,7 @@ snd-soc-p1022-rdk-objs := p1022_rdk.o
obj-$(CONFIG_SND_SOC_P1022_RDK) += snd-soc-p1022-rdk.o
# Freescale SSI/DMA/SAI/SPDIF Support
+snd-soc-fsl-asoc-card-objs := fsl-asoc-card.o
snd-soc-fsl-asrc-objs := fsl_asrc.o fsl_asrc_dma.o
snd-soc-fsl-sai-objs := fsl_sai.o
snd-soc-fsl-ssi-y := fsl_ssi.o
@@ -19,6 +20,7 @@ snd-soc-fsl-spdif-objs := fsl_spdif.o
snd-soc-fsl-esai-objs := fsl_esai.o
snd-soc-fsl-utils-objs := fsl_utils.o
snd-soc-fsl-dma-objs := fsl_dma.o
+obj-$(CONFIG_SND_SOC_FSL_ASOC_CARD) += snd-soc-fsl-asoc-card.o
obj-$(CONFIG_SND_SOC_FSL_ASRC) += snd-soc-fsl-asrc.o
obj-$(CONFIG_SND_SOC_FSL_SAI) += snd-soc-fsl-sai.o
obj-$(CONFIG_SND_SOC_FSL_SSI) += snd-soc-fsl-ssi.o
@@ -50,6 +52,7 @@ snd-soc-eukrea-tlv320-objs := eukrea-tlv320.o
snd-soc-phycore-ac97-objs := phycore-ac97.o
snd-soc-mx27vis-aic32x4-objs := mx27vis-aic32x4.o
snd-soc-wm1133-ev1-objs := wm1133-ev1.o
+snd-soc-imx-es8328-objs := imx-es8328.o
snd-soc-imx-sgtl5000-objs := imx-sgtl5000.o
snd-soc-imx-wm8962-objs := imx-wm8962.o
snd-soc-imx-spdif-objs := imx-spdif.o
@@ -59,6 +62,7 @@ obj-$(CONFIG_SND_SOC_EUKREA_TLV320) += snd-soc-eukrea-tlv320.o
obj-$(CONFIG_SND_SOC_PHYCORE_AC97) += snd-soc-phycore-ac97.o
obj-$(CONFIG_SND_SOC_MX27VIS_AIC32X4) += snd-soc-mx27vis-aic32x4.o
obj-$(CONFIG_SND_MXC_SOC_WM1133_EV1) += snd-soc-wm1133-ev1.o
+obj-$(CONFIG_SND_SOC_IMX_ES8328) += snd-soc-imx-es8328.o
obj-$(CONFIG_SND_SOC_IMX_SGTL5000) += snd-soc-imx-sgtl5000.o
obj-$(CONFIG_SND_SOC_IMX_WM8962) += snd-soc-imx-wm8962.o
obj-$(CONFIG_SND_SOC_IMX_SPDIF) += snd-soc-imx-spdif.o
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
new file mode 100644
index 000000000000..007c772f3cef
--- /dev/null
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -0,0 +1,574 @@
+/*
+ * Freescale Generic ASoC Sound Card driver with ASRC
+ *
+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ *
+ * Author: Nicolin Chen <nicoleotsuka@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "fsl_esai.h"
+#include "fsl_sai.h"
+#include "imx-audmux.h"
+
+#include "../codecs/sgtl5000.h"
+#include "../codecs/wm8962.h"
+
+#define RX 0
+#define TX 1
+
+/* Default DAI format without Master and Slave flag */
+#define DAI_FMT_BASE (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF)
+
+/**
+ * CODEC private data
+ *
+ * @mclk_freq: Clock rate of MCLK
+ * @mclk_id: MCLK (or main clock) id for set_sysclk()
+ * @fll_id: FLL (or secordary clock) id for set_sysclk()
+ * @pll_id: PLL id for set_pll()
+ */
+struct codec_priv {
+ unsigned long mclk_freq;
+ u32 mclk_id;
+ u32 fll_id;
+ u32 pll_id;
+};
+
+/**
+ * CPU private data
+ *
+ * @sysclk_freq[2]: SYSCLK rates for set_sysclk()
+ * @sysclk_dir[2]: SYSCLK directions for set_sysclk()
+ * @sysclk_id[2]: SYSCLK ids for set_sysclk()
+ *
+ * Note: [1] for tx and [0] for rx
+ */
+struct cpu_priv {
+ unsigned long sysclk_freq[2];
+ u32 sysclk_dir[2];
+ u32 sysclk_id[2];
+};
+
+/**
+ * Freescale Generic ASOC card private data
+ *
+ * @dai_link[3]: DAI link structure including normal one and DPCM link
+ * @pdev: platform device pointer
+ * @codec_priv: CODEC private data
+ * @cpu_priv: CPU private data
+ * @card: ASoC card structure
+ * @sample_rate: Current sample rate
+ * @sample_format: Current sample format
+ * @asrc_rate: ASRC sample rate used by Back-Ends
+ * @asrc_format: ASRC sample format used by Back-Ends
+ * @dai_fmt: DAI format between CPU and CODEC
+ * @name: Card name
+ */
+
+struct fsl_asoc_card_priv {
+ struct snd_soc_dai_link dai_link[3];
+ struct platform_device *pdev;
+ struct codec_priv codec_priv;
+ struct cpu_priv cpu_priv;
+ struct snd_soc_card card;
+ u32 sample_rate;
+ u32 sample_format;
+ u32 asrc_rate;
+ u32 asrc_format;
+ u32 dai_fmt;
+ char name[32];
+};
+
+/**
+ * This dapm route map exsits for DPCM link only.
+ * The other routes shall go through Device Tree.
+ */
+static const struct snd_soc_dapm_route audio_map[] = {
+ {"CPU-Playback", NULL, "ASRC-Playback"},
+ {"Playback", NULL, "CPU-Playback"},
+ {"ASRC-Capture", NULL, "CPU-Capture"},
+ {"CPU-Capture", NULL, "Capture"},
+};
+
+/* Add all possible widgets into here without being redundant */
+static const struct snd_soc_dapm_widget fsl_asoc_card_dapm_widgets[] = {
+ SND_SOC_DAPM_LINE("Line Out Jack", NULL),
+ SND_SOC_DAPM_LINE("Line In Jack", NULL),
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ SND_SOC_DAPM_MIC("AMIC", NULL),
+ SND_SOC_DAPM_MIC("DMIC", NULL),
+};
+
+static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct fsl_asoc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ struct cpu_priv *cpu_priv = &priv->cpu_priv;
+ struct device *dev = rtd->card->dev;
+ int ret;
+
+ priv->sample_rate = params_rate(params);
+ priv->sample_format = params_format(params);
+
+ if (priv->card.set_bias_level)
+ return 0;
+
+ /* Specific configurations of DAIs starts from here */
+ ret = snd_soc_dai_set_sysclk(rtd->cpu_dai, cpu_priv->sysclk_id[tx],
+ cpu_priv->sysclk_freq[tx],
+ cpu_priv->sysclk_dir[tx]);
+ if (ret) {
+ dev_err(dev, "failed to set sysclk for cpu dai\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops fsl_asoc_card_ops = {
+ .hw_params = fsl_asoc_card_hw_params,
+};
+
+static int be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct fsl_asoc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_interval *rate;
+ struct snd_mask *mask;
+
+ rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ rate->max = rate->min = priv->asrc_rate;
+
+ mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ snd_mask_none(mask);
+ snd_mask_set(mask, priv->asrc_format);
+
+ return 0;
+}
+
+static struct snd_soc_dai_link fsl_asoc_card_dai[] = {
+ /* Default ASoC DAI Link*/
+ {
+ .name = "HiFi",
+ .stream_name = "HiFi",
+ .ops = &fsl_asoc_card_ops,
+ },
+ /* DPCM Link between Front-End and Back-End (Optional) */
+ {
+ .name = "HiFi-ASRC-FE",
+ .stream_name = "HiFi-ASRC-FE",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .dynamic = 1,
+ },
+ {
+ .name = "HiFi-ASRC-BE",
+ .stream_name = "HiFi-ASRC-BE",
+ .platform_name = "snd-soc-dummy",
+ .be_hw_params_fixup = be_hw_params_fixup,
+ .ops = &fsl_asoc_card_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ },
+};
+
+static int fsl_asoc_card_set_bias_level(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level)
+{
+ struct fsl_asoc_card_priv *priv = snd_soc_card_get_drvdata(card);
+ struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
+ struct codec_priv *codec_priv = &priv->codec_priv;
+ struct device *dev = card->dev;
+ unsigned int pll_out;
+ int ret;
+
+ if (dapm->dev != codec_dai->dev)
+ return 0;
+
+ switch (level) {
+ case SND_SOC_BIAS_PREPARE:
+ if (dapm->bias_level != SND_SOC_BIAS_STANDBY)
+ break;
+
+ if (priv->sample_format == SNDRV_PCM_FORMAT_S24_LE)
+ pll_out = priv->sample_rate * 384;
+ else
+ pll_out = priv->sample_rate * 256;
+
+ ret = snd_soc_dai_set_pll(codec_dai, codec_priv->pll_id,
+ codec_priv->mclk_id,
+ codec_priv->mclk_freq, pll_out);
+ if (ret) {
+ dev_err(dev, "failed to start FLL: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, codec_priv->fll_id,
+ pll_out, SND_SOC_CLOCK_IN);
+ if (ret) {
+ dev_err(dev, "failed to set SYSCLK: %d\n", ret);
+ return ret;
+ }
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (dapm->bias_level != SND_SOC_BIAS_PREPARE)
+ break;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, codec_priv->mclk_id,
+ codec_priv->mclk_freq,
+ SND_SOC_CLOCK_IN);
+ if (ret) {
+ dev_err(dev, "failed to switch away from FLL: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_pll(codec_dai, codec_priv->pll_id, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed to stop FLL: %d\n", ret);
+ return ret;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int fsl_asoc_card_audmux_init(struct device_node *np,
+ struct fsl_asoc_card_priv *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+ u32 int_ptcr = 0, ext_ptcr = 0;
+ int int_port, ext_port;
+ int ret;
+
+ ret = of_property_read_u32(np, "mux-int-port", &int_port);
+ if (ret) {
+ dev_err(dev, "mux-int-port missing or invalid\n");
+ return ret;
+ }
+ ret = of_property_read_u32(np, "mux-ext-port", &ext_port);
+ if (ret) {
+ dev_err(dev, "mux-ext-port missing or invalid\n");
+ return ret;
+ }
+
+ /*
+ * The port numbering in the hardware manual starts at 1, while
+ * the AUDMUX API expects it starts at 0.
+ */
+ int_port--;
+ ext_port--;
+
+ /*
+ * Use asynchronous mode (6 wires) for all cases.
+ * If only 4 wires are needed, just set SSI into
+ * synchronous mode and enable 4 PADs in IOMUX.
+ */
+ switch (priv->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ int_ptcr = IMX_AUDMUX_V2_PTCR_RFSEL(8 | ext_port) |
+ IMX_AUDMUX_V2_PTCR_RCSEL(8 | ext_port) |
+ IMX_AUDMUX_V2_PTCR_TFSEL(ext_port) |
+ IMX_AUDMUX_V2_PTCR_TCSEL(ext_port) |
+ IMX_AUDMUX_V2_PTCR_RFSDIR |
+ IMX_AUDMUX_V2_PTCR_RCLKDIR |
+ IMX_AUDMUX_V2_PTCR_TFSDIR |
+ IMX_AUDMUX_V2_PTCR_TCLKDIR;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ int_ptcr = IMX_AUDMUX_V2_PTCR_RCSEL(8 | ext_port) |
+ IMX_AUDMUX_V2_PTCR_TCSEL(ext_port) |
+ IMX_AUDMUX_V2_PTCR_RCLKDIR |
+ IMX_AUDMUX_V2_PTCR_TCLKDIR;
+ ext_ptcr = IMX_AUDMUX_V2_PTCR_RFSEL(8 | int_port) |
+ IMX_AUDMUX_V2_PTCR_TFSEL(int_port) |
+ IMX_AUDMUX_V2_PTCR_RFSDIR |
+ IMX_AUDMUX_V2_PTCR_TFSDIR;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ int_ptcr = IMX_AUDMUX_V2_PTCR_RFSEL(8 | ext_port) |
+ IMX_AUDMUX_V2_PTCR_TFSEL(ext_port) |
+ IMX_AUDMUX_V2_PTCR_RFSDIR |
+ IMX_AUDMUX_V2_PTCR_TFSDIR;
+ ext_ptcr = IMX_AUDMUX_V2_PTCR_RCSEL(8 | int_port) |
+ IMX_AUDMUX_V2_PTCR_TCSEL(int_port) |
+ IMX_AUDMUX_V2_PTCR_RCLKDIR |
+ IMX_AUDMUX_V2_PTCR_TCLKDIR;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ ext_ptcr = IMX_AUDMUX_V2_PTCR_RFSEL(8 | int_port) |
+ IMX_AUDMUX_V2_PTCR_RCSEL(8 | int_port) |
+ IMX_AUDMUX_V2_PTCR_TFSEL(int_port) |
+ IMX_AUDMUX_V2_PTCR_TCSEL(int_port) |
+ IMX_AUDMUX_V2_PTCR_RFSDIR |
+ IMX_AUDMUX_V2_PTCR_RCLKDIR |
+ IMX_AUDMUX_V2_PTCR_TFSDIR |
+ IMX_AUDMUX_V2_PTCR_TCLKDIR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Asynchronous mode can not be set along with RCLKDIR */
+ ret = imx_audmux_v2_configure_port(int_port, 0,
+ IMX_AUDMUX_V2_PDCR_RXDSEL(ext_port));
+ if (ret) {
+ dev_err(dev, "audmux internal port setup failed\n");
+ return ret;
+ }
+
+ ret = imx_audmux_v2_configure_port(int_port, int_ptcr,
+ IMX_AUDMUX_V2_PDCR_RXDSEL(ext_port));
+ if (ret) {
+ dev_err(dev, "audmux internal port setup failed\n");
+ return ret;
+ }
+
+ ret = imx_audmux_v2_configure_port(ext_port, 0,
+ IMX_AUDMUX_V2_PDCR_RXDSEL(int_port));
+ if (ret) {
+ dev_err(dev, "audmux external port setup failed\n");
+ return ret;
+ }
+
+ ret = imx_audmux_v2_configure_port(ext_port, ext_ptcr,
+ IMX_AUDMUX_V2_PDCR_RXDSEL(int_port));
+ if (ret) {
+ dev_err(dev, "audmux external port setup failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fsl_asoc_card_late_probe(struct snd_soc_card *card)
+{
+ struct fsl_asoc_card_priv *priv = snd_soc_card_get_drvdata(card);
+ struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
+ struct codec_priv *codec_priv = &priv->codec_priv;
+ struct device *dev = card->dev;
+ int ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, codec_priv->mclk_id,
+ codec_priv->mclk_freq, SND_SOC_CLOCK_IN);
+ if (ret) {
+ dev_err(dev, "failed to set sysclk in %s\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fsl_asoc_card_probe(struct platform_device *pdev)
+{
+ struct device_node *cpu_np, *codec_np, *asrc_np;
+ struct device_node *np = pdev->dev.of_node;
+ struct platform_device *asrc_pdev = NULL;
+ struct platform_device *cpu_pdev;
+ struct fsl_asoc_card_priv *priv;
+ struct i2c_client *codec_dev;
+ struct clk *codec_clk;
+ u32 width;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ cpu_np = of_parse_phandle(np, "audio-cpu", 0);
+ /* Give a chance to old DT binding */
+ if (!cpu_np)
+ cpu_np = of_parse_phandle(np, "ssi-controller", 0);
+ codec_np = of_parse_phandle(np, "audio-codec", 0);
+ if (!cpu_np || !codec_np) {
+ dev_err(&pdev->dev, "phandle missing or invalid\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ cpu_pdev = of_find_device_by_node(cpu_np);
+ if (!cpu_pdev) {
+ dev_err(&pdev->dev, "failed to find CPU DAI device\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ codec_dev = of_find_i2c_device_by_node(codec_np);
+ if (!codec_dev) {
+ dev_err(&pdev->dev, "failed to find codec platform device\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ asrc_np = of_parse_phandle(np, "audio-asrc", 0);
+ if (asrc_np)
+ asrc_pdev = of_find_device_by_node(asrc_np);
+
+ /* Get the MCLK rate only, and leave it controlled by CODEC drivers */
+ codec_clk = clk_get(&codec_dev->dev, NULL);
+ if (!IS_ERR(codec_clk)) {
+ priv->codec_priv.mclk_freq = clk_get_rate(codec_clk);
+ clk_put(codec_clk);
+ }
+
+ /* Default sample rate and format, will be updated in hw_params() */
+ priv->sample_rate = 44100;
+ priv->sample_format = SNDRV_PCM_FORMAT_S16_LE;
+
+ /* Assign a default DAI format, and allow each card to overwrite it */
+ priv->dai_fmt = DAI_FMT_BASE;
+
+ /* Diversify the card configurations */
+ if (of_device_is_compatible(np, "fsl,imx-audio-cs42888")) {
+ priv->card.set_bias_level = NULL;
+ priv->cpu_priv.sysclk_freq[TX] = priv->codec_priv.mclk_freq;
+ priv->cpu_priv.sysclk_freq[RX] = priv->codec_priv.mclk_freq;
+ priv->cpu_priv.sysclk_dir[TX] = SND_SOC_CLOCK_OUT;
+ priv->cpu_priv.sysclk_dir[RX] = SND_SOC_CLOCK_OUT;
+ priv->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
+ } else if (of_device_is_compatible(np, "fsl,imx-audio-sgtl5000")) {
+ priv->codec_priv.mclk_id = SGTL5000_SYSCLK;
+ priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
+ } else if (of_device_is_compatible(np, "fsl,imx-audio-wm8962")) {
+ priv->card.set_bias_level = fsl_asoc_card_set_bias_level;
+ priv->codec_priv.mclk_id = WM8962_SYSCLK_MCLK;
+ priv->codec_priv.fll_id = WM8962_SYSCLK_FLL;
+ priv->codec_priv.pll_id = WM8962_FLL;
+ priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
+ } else {
+ dev_err(&pdev->dev, "unknown Device Tree compatible\n");
+ return -EINVAL;
+ }
+
+ /* Common settings for corresponding Freescale CPU DAI driver */
+ if (strstr(cpu_np->name, "ssi")) {
+ /* Only SSI needs to configure AUDMUX */
+ ret = fsl_asoc_card_audmux_init(np, priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init audmux\n");
+ goto asrc_fail;
+ }
+ } else if (strstr(cpu_np->name, "esai")) {
+ priv->cpu_priv.sysclk_id[1] = ESAI_HCKT_EXTAL;
+ priv->cpu_priv.sysclk_id[0] = ESAI_HCKR_EXTAL;
+ } else if (strstr(cpu_np->name, "sai")) {
+ priv->cpu_priv.sysclk_id[1] = FSL_SAI_CLK_MAST1;
+ priv->cpu_priv.sysclk_id[0] = FSL_SAI_CLK_MAST1;
+ }
+
+ sprintf(priv->name, "%s-audio", codec_dev->name);
+
+ /* Initialize sound card */
+ priv->pdev = pdev;
+ priv->card.dev = &pdev->dev;
+ priv->card.name = priv->name;
+ priv->card.dai_link = priv->dai_link;
+ priv->card.dapm_routes = audio_map;
+ priv->card.late_probe = fsl_asoc_card_late_probe;
+ priv->card.num_dapm_routes = ARRAY_SIZE(audio_map);
+ priv->card.dapm_widgets = fsl_asoc_card_dapm_widgets;
+ priv->card.num_dapm_widgets = ARRAY_SIZE(fsl_asoc_card_dapm_widgets);
+
+ memcpy(priv->dai_link, fsl_asoc_card_dai,
+ sizeof(struct snd_soc_dai_link) * ARRAY_SIZE(priv->dai_link));
+
+ /* Normal DAI Link */
+ priv->dai_link[0].cpu_of_node = cpu_np;
+ priv->dai_link[0].codec_of_node = codec_np;
+ priv->dai_link[0].codec_dai_name = codec_dev->name;
+ priv->dai_link[0].platform_of_node = cpu_np;
+ priv->dai_link[0].dai_fmt = priv->dai_fmt;
+ priv->card.num_links = 1;
+
+ if (asrc_pdev) {
+ /* DPCM DAI Links only if ASRC exsits */
+ priv->dai_link[1].cpu_of_node = asrc_np;
+ priv->dai_link[1].platform_of_node = asrc_np;
+ priv->dai_link[2].codec_dai_name = codec_dev->name;
+ priv->dai_link[2].codec_of_node = codec_np;
+ priv->dai_link[2].cpu_of_node = cpu_np;
+ priv->dai_link[2].dai_fmt = priv->dai_fmt;
+ priv->card.num_links = 3;
+
+ ret = of_property_read_u32(asrc_np, "fsl,asrc-rate",
+ &priv->asrc_rate);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get output rate\n");
+ ret = -EINVAL;
+ goto asrc_fail;
+ }
+
+ ret = of_property_read_u32(asrc_np, "fsl,asrc-width", &width);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get output rate\n");
+ ret = -EINVAL;
+ goto asrc_fail;
+ }
+
+ if (width == 24)
+ priv->asrc_format = SNDRV_PCM_FORMAT_S24_LE;
+ else
+ priv->asrc_format = SNDRV_PCM_FORMAT_S16_LE;
+ }
+
+ /* Finish card registering */
+ platform_set_drvdata(pdev, priv);
+ snd_soc_card_set_drvdata(&priv->card, priv);
+
+ ret = devm_snd_soc_register_card(&pdev->dev, &priv->card);
+ if (ret)
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+
+asrc_fail:
+ of_node_put(asrc_np);
+fail:
+ of_node_put(codec_np);
+ of_node_put(cpu_np);
+
+ return ret;
+}
+
+static const struct of_device_id fsl_asoc_card_dt_ids[] = {
+ { .compatible = "fsl,imx-audio-cs42888", },
+ { .compatible = "fsl,imx-audio-sgtl5000", },
+ { .compatible = "fsl,imx-audio-wm8962", },
+ {}
+};
+
+static struct platform_driver fsl_asoc_card_driver = {
+ .probe = fsl_asoc_card_probe,
+ .driver = {
+ .name = "fsl-asoc-card",
+ .pm = &snd_soc_pm_ops,
+ .of_match_table = fsl_asoc_card_dt_ids,
+ },
+};
+module_platform_driver(fsl_asoc_card_driver);
+
+MODULE_DESCRIPTION("Freescale Generic ASoC Sound Card driver with ASRC");
+MODULE_AUTHOR("Nicolin Chen <nicoleotsuka@gmail.com>");
+MODULE_ALIAS("platform:fsl-asoc-card");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index 822110420b71..3b145313f93e 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -684,7 +684,7 @@ static bool fsl_asrc_writeable_reg(struct device *dev, unsigned int reg)
}
}
-static struct regmap_config fsl_asrc_regmap_config = {
+static const struct regmap_config fsl_asrc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
@@ -802,10 +802,6 @@ static int fsl_asrc_probe(struct platform_device *pdev)
asrc_priv->paddr = res->start;
- /* Register regmap and let it prepare core clock */
- if (of_property_read_bool(np, "big-endian"))
- fsl_asrc_regmap_config.val_format_endian = REGMAP_ENDIAN_BIG;
-
asrc_priv->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "mem", regs,
&fsl_asrc_regmap_config);
if (IS_ERR(asrc_priv->regmap)) {
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index a3b29ed84963..8bcdfda09d7a 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -37,6 +37,7 @@
* @fsysclk: system clock source to derive HCK, SCK and FS
* @fifo_depth: depth of tx/rx FIFO
* @slot_width: width of each DAI slot
+ * @slots: number of slots
* @hck_rate: clock rate of desired HCKx clock
* @sck_rate: clock rate of desired SCKx clock
* @hck_dir: the direction of HCKx pads
@@ -55,6 +56,7 @@ struct fsl_esai {
struct clk *fsysclk;
u32 fifo_depth;
u32 slot_width;
+ u32 slots;
u32 hck_rate[2];
u32 sck_rate[2];
bool hck_dir[2];
@@ -362,6 +364,7 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
esai_priv->slot_width = slot_width;
+ esai_priv->slots = slots;
return 0;
}
@@ -509,10 +512,11 @@ static int fsl_esai_hw_params(struct snd_pcm_substream *substream,
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
u32 width = snd_pcm_format_width(params_format(params));
u32 channels = params_channels(params);
+ u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
u32 bclk, mask, val;
int ret;
- bclk = params_rate(params) * esai_priv->slot_width * 2;
+ bclk = params_rate(params) * esai_priv->slot_width * esai_priv->slots;
ret = fsl_esai_set_bclk(dai, tx, bclk);
if (ret)
@@ -529,7 +533,7 @@ static int fsl_esai_hw_params(struct snd_pcm_substream *substream,
mask = ESAI_xFCR_xFR_MASK | ESAI_xFCR_xWA_MASK | ESAI_xFCR_xFWM_MASK |
(tx ? ESAI_xFCR_TE_MASK | ESAI_xFCR_TIEN : ESAI_xFCR_RE_MASK);
val = ESAI_xFCR_xWA(width) | ESAI_xFCR_xFWM(esai_priv->fifo_depth) |
- (tx ? ESAI_xFCR_TE(channels) | ESAI_xFCR_TIEN : ESAI_xFCR_RE(channels));
+ (tx ? ESAI_xFCR_TE(pins) | ESAI_xFCR_TIEN : ESAI_xFCR_RE(pins));
regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx), mask, val);
@@ -564,6 +568,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
struct fsl_esai *esai_priv = snd_soc_dai_get_drvdata(dai);
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
u8 i, channels = substream->runtime->channels;
+ u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
@@ -578,7 +583,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
- tx ? ESAI_xCR_TE(channels) : ESAI_xCR_RE(channels));
+ tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
@@ -705,7 +710,7 @@ static bool fsl_esai_writeable_reg(struct device *dev, unsigned int reg)
}
}
-static struct regmap_config fsl_esai_regmap_config = {
+static const struct regmap_config fsl_esai_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
@@ -731,9 +736,6 @@ static int fsl_esai_probe(struct platform_device *pdev)
esai_priv->pdev = pdev;
strcpy(esai_priv->name, np->name);
- if (of_property_read_bool(np, "big-endian"))
- fsl_esai_regmap_config.val_format_endian = REGMAP_ENDIAN_BIG;
-
/* Get the addresses and IRQ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(&pdev->dev, res);
@@ -781,6 +783,9 @@ static int fsl_esai_probe(struct platform_device *pdev)
/* Set a default slot size */
esai_priv->slot_width = 32;
+ /* Set a default slot number */
+ esai_priv->slots = 2;
+
/* Set a default master/slave state */
esai_priv->slave_mode = true;
diff --git a/sound/soc/fsl/fsl_esai.h b/sound/soc/fsl/fsl_esai.h
index 75e14033e8d8..91a550f4a10d 100644
--- a/sound/soc/fsl/fsl_esai.h
+++ b/sound/soc/fsl/fsl_esai.h
@@ -130,8 +130,8 @@
#define ESAI_xFCR_RE_WIDTH 4
#define ESAI_xFCR_TE_MASK (((1 << ESAI_xFCR_TE_WIDTH) - 1) << ESAI_xFCR_xE_SHIFT)
#define ESAI_xFCR_RE_MASK (((1 << ESAI_xFCR_RE_WIDTH) - 1) << ESAI_xFCR_xE_SHIFT)
-#define ESAI_xFCR_TE(x) ((ESAI_xFCR_TE_MASK >> (ESAI_xFCR_TE_WIDTH - ((x + 1) >> 1))) & ESAI_xFCR_TE_MASK)
-#define ESAI_xFCR_RE(x) ((ESAI_xFCR_RE_MASK >> (ESAI_xFCR_RE_WIDTH - ((x + 1) >> 1))) & ESAI_xFCR_RE_MASK)
+#define ESAI_xFCR_TE(x) ((ESAI_xFCR_TE_MASK >> (ESAI_xFCR_TE_WIDTH - x)) & ESAI_xFCR_TE_MASK)
+#define ESAI_xFCR_RE(x) ((ESAI_xFCR_RE_MASK >> (ESAI_xFCR_RE_WIDTH - x)) & ESAI_xFCR_RE_MASK)
#define ESAI_xFCR_xFR_SHIFT 1
#define ESAI_xFCR_xFR_MASK (1 << ESAI_xFCR_xFR_SHIFT)
#define ESAI_xFCR_xFR (1 << ESAI_xFCR_xFR_SHIFT)
@@ -272,8 +272,8 @@
#define ESAI_xCR_RE_WIDTH 4
#define ESAI_xCR_TE_MASK (((1 << ESAI_xCR_TE_WIDTH) - 1) << ESAI_xCR_xE_SHIFT)
#define ESAI_xCR_RE_MASK (((1 << ESAI_xCR_RE_WIDTH) - 1) << ESAI_xCR_xE_SHIFT)
-#define ESAI_xCR_TE(x) ((ESAI_xCR_TE_MASK >> (ESAI_xCR_TE_WIDTH - ((x + 1) >> 1))) & ESAI_xCR_TE_MASK)
-#define ESAI_xCR_RE(x) ((ESAI_xCR_RE_MASK >> (ESAI_xCR_RE_WIDTH - ((x + 1) >> 1))) & ESAI_xCR_RE_MASK)
+#define ESAI_xCR_TE(x) ((ESAI_xCR_TE_MASK >> (ESAI_xCR_TE_WIDTH - x)) & ESAI_xCR_TE_MASK)
+#define ESAI_xCR_RE(x) ((ESAI_xCR_RE_MASK >> (ESAI_xCR_RE_WIDTH - x)) & ESAI_xCR_RE_MASK)
/*
* Transmit Clock Control Register -- REG_ESAI_TCCR 0xD8
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index faa049797897..7eeb1dd8ce27 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -175,7 +175,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
bool tx = fsl_dir == FSL_FMT_TRANSMITTER;
u32 val_cr2 = 0, val_cr4 = 0;
- if (!sai->big_endian_data)
+ if (!sai->is_lsb_first)
val_cr4 |= FSL_SAI_CR4_MF;
/* DAI mode */
@@ -304,7 +304,7 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
val_cr5 |= FSL_SAI_CR5_WNW(word_width);
val_cr5 |= FSL_SAI_CR5_W0W(word_width);
- if (sai->big_endian_data)
+ if (sai->is_lsb_first)
val_cr5 |= FSL_SAI_CR5_FBT(0);
else
val_cr5 |= FSL_SAI_CR5_FBT(word_width - 1);
@@ -330,13 +330,13 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
u32 xcsr, count = 100;
/*
- * The transmitter bit clock and frame sync are to be
- * used by both the transmitter and receiver.
+ * Asynchronous mode: Clear SYNC for both Tx and Rx.
+ * Rx sync with Tx clocks: Clear SYNC for Tx, set it for Rx.
+ * Tx sync with Rx clocks: Clear SYNC for Rx, set it for Tx.
*/
- regmap_update_bits(sai->regmap, FSL_SAI_TCR2, FSL_SAI_CR2_SYNC,
- ~FSL_SAI_CR2_SYNC);
+ regmap_update_bits(sai->regmap, FSL_SAI_TCR2, FSL_SAI_CR2_SYNC, 0);
regmap_update_bits(sai->regmap, FSL_SAI_RCR2, FSL_SAI_CR2_SYNC,
- FSL_SAI_CR2_SYNC);
+ sai->synchronous[RX] ? FSL_SAI_CR2_SYNC : 0);
/*
* It is recommended that the transmitter is the last enabled
@@ -437,8 +437,13 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai)
{
struct fsl_sai *sai = dev_get_drvdata(cpu_dai->dev);
- regmap_update_bits(sai->regmap, FSL_SAI_TCSR, 0xffffffff, 0x0);
- regmap_update_bits(sai->regmap, FSL_SAI_RCSR, 0xffffffff, 0x0);
+ /* Software Reset for both Tx and Rx */
+ regmap_write(sai->regmap, FSL_SAI_TCSR, FSL_SAI_CSR_SR);
+ regmap_write(sai->regmap, FSL_SAI_RCSR, FSL_SAI_CSR_SR);
+ /* Clear SR bit to finish the reset */
+ regmap_write(sai->regmap, FSL_SAI_TCSR, 0);
+ regmap_write(sai->regmap, FSL_SAI_RCSR, 0);
+
regmap_update_bits(sai->regmap, FSL_SAI_TCR1, FSL_SAI_CR1_RFW_MASK,
FSL_SAI_MAXBURST_TX * 2);
regmap_update_bits(sai->regmap, FSL_SAI_RCR1, FSL_SAI_CR1_RFW_MASK,
@@ -539,7 +544,7 @@ static bool fsl_sai_writeable_reg(struct device *dev, unsigned int reg)
}
}
-static struct regmap_config fsl_sai_regmap_config = {
+static const struct regmap_config fsl_sai_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
@@ -568,11 +573,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx6sx-sai"))
sai->sai_on_imx = true;
- sai->big_endian_regs = of_property_read_bool(np, "big-endian-regs");
- if (sai->big_endian_regs)
- fsl_sai_regmap_config.val_format_endian = REGMAP_ENDIAN_BIG;
-
- sai->big_endian_data = of_property_read_bool(np, "big-endian-data");
+ sai->is_lsb_first = of_property_read_bool(np, "lsb-first");
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
@@ -621,6 +622,33 @@ static int fsl_sai_probe(struct platform_device *pdev)
return ret;
}
+ /* Sync Tx with Rx as default by following old DT binding */
+ sai->synchronous[RX] = true;
+ sai->synchronous[TX] = false;
+ fsl_sai_dai.symmetric_rates = 1;
+ fsl_sai_dai.symmetric_channels = 1;
+ fsl_sai_dai.symmetric_samplebits = 1;
+
+ if (of_find_property(np, "fsl,sai-synchronous-rx", NULL) &&
+ of_find_property(np, "fsl,sai-asynchronous", NULL)) {
+ /* error out if both synchronous and asynchronous are present */
+ dev_err(&pdev->dev, "invalid binding for synchronous mode\n");
+ return -EINVAL;
+ }
+
+ if (of_find_property(np, "fsl,sai-synchronous-rx", NULL)) {
+ /* Sync Rx with Tx */
+ sai->synchronous[RX] = false;
+ sai->synchronous[TX] = true;
+ } else if (of_find_property(np, "fsl,sai-asynchronous", NULL)) {
+ /* Discard all settings for asynchronous mode */
+ sai->synchronous[RX] = false;
+ sai->synchronous[TX] = false;
+ fsl_sai_dai.symmetric_rates = 0;
+ fsl_sai_dai.symmetric_channels = 0;
+ fsl_sai_dai.symmetric_samplebits = 0;
+ }
+
sai->dma_params_rx.addr = res->start + FSL_SAI_RDR;
sai->dma_params_tx.addr = res->start + FSL_SAI_TDR;
sai->dma_params_rx.maxburst = FSL_SAI_MAXBURST_RX;
diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
index 0e6c9f595d75..34667209b607 100644
--- a/sound/soc/fsl/fsl_sai.h
+++ b/sound/soc/fsl/fsl_sai.h
@@ -48,6 +48,7 @@
/* SAI Transmit/Recieve Control Register */
#define FSL_SAI_CSR_TERE BIT(31)
#define FSL_SAI_CSR_FR BIT(25)
+#define FSL_SAI_CSR_SR BIT(24)
#define FSL_SAI_CSR_xF_SHIFT 16
#define FSL_SAI_CSR_xF_W_SHIFT 18
#define FSL_SAI_CSR_xF_MASK (0x1f << FSL_SAI_CSR_xF_SHIFT)
@@ -131,13 +132,16 @@ struct fsl_sai {
struct clk *bus_clk;
struct clk *mclk_clk[FSL_SAI_MCLK_MAX];
- bool big_endian_regs;
- bool big_endian_data;
+ bool is_lsb_first;
bool is_dsp_mode;
bool sai_on_imx;
+ bool synchronous[2];
struct snd_dmaengine_dai_dma_data dma_params_rx;
struct snd_dmaengine_dai_dma_data dma_params_tx;
};
+#define TX 1
+#define RX 0
+
#endif /* __FSL_SAI_H */
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 70acfe4a9bd5..9b791621294c 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -15,7 +15,6 @@
#include <linux/bitrev.h>
#include <linux/clk.h>
-#include <linux/clk-private.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -1040,7 +1039,7 @@ static bool fsl_spdif_writeable_reg(struct device *dev, unsigned int reg)
}
}
-static struct regmap_config fsl_spdif_regmap_config = {
+static const struct regmap_config fsl_spdif_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
@@ -1184,9 +1183,6 @@ static int fsl_spdif_probe(struct platform_device *pdev)
memcpy(&spdif_priv->cpu_dai_drv, &fsl_spdif_dai, sizeof(fsl_spdif_dai));
spdif_priv->cpu_dai_drv.name = spdif_priv->name;
- if (of_property_read_bool(np, "big-endian"))
- fsl_spdif_regmap_config.val_format_endian = REGMAP_ENDIAN_BIG;
-
/* Get the addresses and IRQ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index de6ab06f58a5..e6955170dc42 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -169,6 +169,7 @@ struct fsl_ssi_private {
u8 i2s_mode;
bool use_dma;
bool use_dual_fifo;
+ bool has_ipg_clk_name;
unsigned int fifo_depth;
struct fsl_ssi_rxtx_reg_val rxtx_reg_val;
@@ -259,6 +260,11 @@ static bool fsl_ssi_is_i2s_master(struct fsl_ssi_private *ssi_private)
SND_SOC_DAIFMT_CBS_CFS;
}
+static bool fsl_ssi_is_i2s_cbm_cfs(struct fsl_ssi_private *ssi_private)
+{
+ return (ssi_private->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
+ SND_SOC_DAIFMT_CBM_CFS;
+}
/**
* fsl_ssi_isr: SSI interrupt handler
*
@@ -525,6 +531,11 @@ static int fsl_ssi_startup(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct fsl_ssi_private *ssi_private =
snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ int ret;
+
+ ret = clk_prepare_enable(ssi_private->clk);
+ if (ret)
+ return ret;
/* When using dual fifo mode, it is safer to ensure an even period
* size. If appearing to an odd number while DMA always starts its
@@ -539,6 +550,21 @@ static int fsl_ssi_startup(struct snd_pcm_substream *substream,
}
/**
+ * fsl_ssi_shutdown: shutdown the SSI
+ *
+ */
+static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct fsl_ssi_private *ssi_private =
+ snd_soc_dai_get_drvdata(rtd->cpu_dai);
+
+ clk_disable_unprepare(ssi_private->clk);
+
+}
+
+/**
* fsl_ssi_set_bclk - configure Digital Audio Interface bit clock
*
* Note: This function can be only called when using SSI as DAI master
@@ -705,6 +731,23 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
}
}
+ if (!fsl_ssi_is_ac97(ssi_private)) {
+ u8 i2smode;
+ /*
+ * Switch to normal net mode in order to have a frame sync
+ * signal every 32 bits instead of 16 bits
+ */
+ if (fsl_ssi_is_i2s_cbm_cfs(ssi_private) && sample_size == 16)
+ i2smode = CCSR_SSI_SCR_I2S_MODE_NORMAL |
+ CCSR_SSI_SCR_NET;
+ else
+ i2smode = ssi_private->i2s_mode;
+
+ regmap_update_bits(regs, CCSR_SSI_SCR,
+ CCSR_SSI_SCR_NET | CCSR_SSI_SCR_I2S_MODE_MASK,
+ channels == 1 ? 0 : i2smode);
+ }
+
/*
* FIXME: The documentation says that SxCCR[WL] should not be
* modified while the SSI is enabled. The only time this can
@@ -724,11 +767,6 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
regmap_update_bits(regs, CCSR_SSI_SRCCR, CCSR_SSI_SxCCR_WL_MASK,
wl);
- if (!fsl_ssi_is_ac97(ssi_private))
- regmap_update_bits(regs, CCSR_SSI_SCR,
- CCSR_SSI_SCR_NET | CCSR_SSI_SCR_I2S_MODE_MASK,
- channels == 1 ? 0 : ssi_private->i2s_mode);
-
return 0;
}
@@ -781,6 +819,7 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFS:
case SND_SOC_DAIFMT_CBS_CFS:
ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER;
regmap_update_bits(regs, CCSR_SSI_STCCR,
@@ -854,6 +893,11 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
case SND_SOC_DAIFMT_CBM_CFM:
scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ strcr &= ~CCSR_SSI_STCR_TXDIR;
+ strcr |= CCSR_SSI_STCR_TFDIR;
+ scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
+ break;
default:
return -EINVAL;
}
@@ -1021,6 +1065,7 @@ static int fsl_ssi_dai_probe(struct snd_soc_dai *dai)
static const struct snd_soc_dai_ops fsl_ssi_dai_ops = {
.startup = fsl_ssi_startup,
+ .shutdown = fsl_ssi_shutdown,
.hw_params = fsl_ssi_hw_params,
.hw_free = fsl_ssi_hw_free,
.set_fmt = fsl_ssi_set_dai_fmt,
@@ -1146,17 +1191,22 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
u32 dmas[4];
int ret;
- ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
+ if (ssi_private->has_ipg_clk_name)
+ ssi_private->clk = devm_clk_get(&pdev->dev, "ipg");
+ else
+ ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ssi_private->clk)) {
ret = PTR_ERR(ssi_private->clk);
dev_err(&pdev->dev, "could not get clock: %d\n", ret);
return ret;
}
- ret = clk_prepare_enable(ssi_private->clk);
- if (ret) {
- dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
- return ret;
+ if (!ssi_private->has_ipg_clk_name) {
+ ret = clk_prepare_enable(ssi_private->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
}
/* For those SLAVE implementations, we ingore non-baudclk cases
@@ -1214,8 +1264,9 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
return 0;
error_pcm:
- clk_disable_unprepare(ssi_private->clk);
+ if (!ssi_private->has_ipg_clk_name)
+ clk_disable_unprepare(ssi_private->clk);
return ret;
}
@@ -1224,7 +1275,8 @@ static void fsl_ssi_imx_clean(struct platform_device *pdev,
{
if (!ssi_private->use_dma)
imx_pcm_fiq_exit(pdev);
- clk_disable_unprepare(ssi_private->clk);
+ if (!ssi_private->has_ipg_clk_name)
+ clk_disable_unprepare(ssi_private->clk);
}
static int fsl_ssi_probe(struct platform_device *pdev)
@@ -1263,9 +1315,6 @@ static int fsl_ssi_probe(struct platform_device *pdev)
if (sprop) {
if (!strcmp(sprop, "ac97-slave"))
ssi_private->dai_fmt = SND_SOC_DAIFMT_AC97;
- else if (!strcmp(sprop, "i2s-slave"))
- ssi_private->dai_fmt = SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_CBM_CFM;
}
ssi_private->use_dma = !of_property_read_bool(np,
@@ -1299,8 +1348,16 @@ static int fsl_ssi_probe(struct platform_device *pdev)
return -ENOMEM;
}
- ssi_private->regs = devm_regmap_init_mmio(&pdev->dev, iomem,
+ ret = of_property_match_string(np, "clock-names", "ipg");
+ if (ret < 0) {
+ ssi_private->has_ipg_clk_name = false;
+ ssi_private->regs = devm_regmap_init_mmio(&pdev->dev, iomem,
&fsl_ssi_regconfig);
+ } else {
+ ssi_private->has_ipg_clk_name = true;
+ ssi_private->regs = devm_regmap_init_mmio_clk(&pdev->dev,
+ "ipg", iomem, &fsl_ssi_regconfig);
+ }
if (IS_ERR(ssi_private->regs)) {
dev_err(&pdev->dev, "Failed to init register map\n");
return PTR_ERR(ssi_private->regs);
diff --git a/sound/soc/fsl/imx-es8328.c b/sound/soc/fsl/imx-es8328.c
new file mode 100644
index 000000000000..f8cf10e16ce9
--- /dev/null
+++ b/sound/soc/fsl/imx-es8328.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/i2c.h>
+#include <linux/of_gpio.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+
+#include "imx-audmux.h"
+
+#define DAI_NAME_SIZE 32
+#define MUX_PORT_MAX 7
+
+struct imx_es8328_data {
+ struct device *dev;
+ struct snd_soc_dai_link dai;
+ struct snd_soc_card card;
+ char codec_dai_name[DAI_NAME_SIZE];
+ char platform_name[DAI_NAME_SIZE];
+ int jack_gpio;
+};
+
+static struct snd_soc_jack_gpio headset_jack_gpios[] = {
+ {
+ .gpio = -1,
+ .name = "headset-gpio",
+ .report = SND_JACK_HEADSET,
+ .invert = 0,
+ .debounce_time = 200,
+ },
+};
+
+static struct snd_soc_jack headset_jack;
+
+static int imx_es8328_dai_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct imx_es8328_data *data = container_of(rtd->card,
+ struct imx_es8328_data, card);
+ int ret = 0;
+
+ /* Headphone jack detection */
+ if (gpio_is_valid(data->jack_gpio)) {
+ ret = snd_soc_jack_new(rtd->codec, "Headphone",
+ SND_JACK_HEADPHONE | SND_JACK_BTN_0,
+ &headset_jack);
+ if (ret)
+ return ret;
+
+ headset_jack_gpios[0].gpio = data->jack_gpio;
+ ret = snd_soc_jack_add_gpios(&headset_jack,
+ ARRAY_SIZE(headset_jack_gpios),
+ headset_jack_gpios);
+ }
+
+ return ret;
+}
+
+static const struct snd_soc_dapm_widget imx_es8328_dapm_widgets[] = {
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_REGULATOR_SUPPLY("audio-amp", 1, 0),
+};
+
+static int imx_es8328_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *ssi_np = NULL, *codec_np = NULL;
+ struct platform_device *ssi_pdev;
+ struct imx_es8328_data *data;
+ u32 int_port, ext_port;
+ int ret;
+ struct device *dev = &pdev->dev;
+
+ ret = of_property_read_u32(np, "mux-int-port", &int_port);
+ if (ret) {
+ dev_err(dev, "mux-int-port missing or invalid\n");
+ goto fail;
+ }
+ if (int_port > MUX_PORT_MAX || int_port == 0) {
+ dev_err(dev, "mux-int-port: hardware only has %d mux ports\n",
+ MUX_PORT_MAX);
+ goto fail;
+ }
+
+ ret = of_property_read_u32(np, "mux-ext-port", &ext_port);
+ if (ret) {
+ dev_err(dev, "mux-ext-port missing or invalid\n");
+ goto fail;
+ }
+ if (ext_port > MUX_PORT_MAX || ext_port == 0) {
+ dev_err(dev, "mux-ext-port: hardware only has %d mux ports\n",
+ MUX_PORT_MAX);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /*
+ * The port numbering in the hardware manual starts at 1, while
+ * the audmux API expects it starts at 0.
+ */
+ int_port--;
+ ext_port--;
+ ret = imx_audmux_v2_configure_port(int_port,
+ IMX_AUDMUX_V2_PTCR_SYN |
+ IMX_AUDMUX_V2_PTCR_TFSEL(ext_port) |
+ IMX_AUDMUX_V2_PTCR_TCSEL(ext_port) |
+ IMX_AUDMUX_V2_PTCR_TFSDIR |
+ IMX_AUDMUX_V2_PTCR_TCLKDIR,
+ IMX_AUDMUX_V2_PDCR_RXDSEL(ext_port));
+ if (ret) {
+ dev_err(dev, "audmux internal port setup failed\n");
+ return ret;
+ }
+ ret = imx_audmux_v2_configure_port(ext_port,
+ IMX_AUDMUX_V2_PTCR_SYN,
+ IMX_AUDMUX_V2_PDCR_RXDSEL(int_port));
+ if (ret) {
+ dev_err(dev, "audmux external port setup failed\n");
+ return ret;
+ }
+
+ ssi_np = of_parse_phandle(pdev->dev.of_node, "ssi-controller", 0);
+ codec_np = of_parse_phandle(pdev->dev.of_node, "audio-codec", 0);
+ if (!ssi_np || !codec_np) {
+ dev_err(dev, "phandle missing or invalid\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ssi_pdev = of_find_device_by_node(ssi_np);
+ if (!ssi_pdev) {
+ dev_err(dev, "failed to find SSI platform device\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ data->dev = dev;
+
+ data->jack_gpio = of_get_named_gpio(pdev->dev.of_node, "jack-gpio", 0);
+
+ data->dai.name = "hifi";
+ data->dai.stream_name = "hifi";
+ data->dai.codec_dai_name = "es8328-hifi-analog";
+ data->dai.codec_of_node = codec_np;
+ data->dai.cpu_of_node = ssi_np;
+ data->dai.platform_of_node = ssi_np;
+ data->dai.init = &imx_es8328_dai_init;
+ data->dai.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM;
+
+ data->card.dev = dev;
+ data->card.dapm_widgets = imx_es8328_dapm_widgets;
+ data->card.num_dapm_widgets = ARRAY_SIZE(imx_es8328_dapm_widgets);
+ ret = snd_soc_of_parse_card_name(&data->card, "model");
+ if (ret) {
+ dev_err(dev, "Unable to parse card name\n");
+ goto fail;
+ }
+ ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing");
+ if (ret) {
+ dev_err(dev, "Unable to parse routing: %d\n", ret);
+ goto fail;
+ }
+ data->card.num_links = 1;
+ data->card.owner = THIS_MODULE;
+ data->card.dai_link = &data->dai;
+
+ ret = snd_soc_register_card(&data->card);
+ if (ret) {
+ dev_err(dev, "Unable to register: %d\n", ret);
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, data);
+fail:
+ of_node_put(ssi_np);
+ of_node_put(codec_np);
+
+ return ret;
+}
+
+static int imx_es8328_remove(struct platform_device *pdev)
+{
+ struct imx_es8328_data *data = platform_get_drvdata(pdev);
+
+ snd_soc_jack_free_gpios(&headset_jack, ARRAY_SIZE(headset_jack_gpios),
+ headset_jack_gpios);
+
+ snd_soc_unregister_card(&data->card);
+
+ return 0;
+}
+
+static const struct of_device_id imx_es8328_dt_ids[] = {
+ { .compatible = "fsl,imx-audio-es8328", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_es8328_dt_ids);
+
+static struct platform_driver imx_es8328_driver = {
+ .driver = {
+ .name = "imx-es8328",
+ .of_match_table = imx_es8328_dt_ids,
+ },
+ .probe = imx_es8328_probe,
+ .remove = imx_es8328_remove,
+};
+module_platform_driver(imx_es8328_driver);
+
+MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
+MODULE_DESCRIPTION("Kosagi i.MX6 ES8328 ASoC machine driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:imx-audio-es8328");
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index cef7776b712c..d1b7293c133e 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -10,10 +10,13 @@
*/
#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/string.h>
+#include <sound/jack.h>
#include <sound/simple_card.h>
#include <sound/soc-dai.h>
#include <sound/soc.h>
@@ -25,9 +28,15 @@ struct simple_card_data {
struct asoc_simple_dai codec_dai;
} *dai_props;
unsigned int mclk_fs;
+ int gpio_hp_det;
+ int gpio_mic_det;
struct snd_soc_dai_link dai_link[]; /* dynamically allocated */
};
+#define simple_priv_to_dev(priv) ((priv)->snd_card.dev)
+#define simple_priv_to_link(priv, i) ((priv)->snd_card.dai_link + i)
+#define simple_priv_to_props(priv, i) ((priv)->dai_props + i)
+
static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
@@ -50,6 +59,32 @@ static struct snd_soc_ops asoc_simple_card_ops = {
.hw_params = asoc_simple_card_hw_params,
};
+static struct snd_soc_jack simple_card_hp_jack;
+static struct snd_soc_jack_pin simple_card_hp_jack_pins[] = {
+ {
+ .pin = "Headphones",
+ .mask = SND_JACK_HEADPHONE,
+ },
+};
+static struct snd_soc_jack_gpio simple_card_hp_jack_gpio = {
+ .name = "Headphone detection",
+ .report = SND_JACK_HEADPHONE,
+ .debounce_time = 150,
+};
+
+static struct snd_soc_jack simple_card_mic_jack;
+static struct snd_soc_jack_pin simple_card_mic_jack_pins[] = {
+ {
+ .pin = "Mic Jack",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+static struct snd_soc_jack_gpio simple_card_mic_jack_gpio = {
+ .name = "Mic detection",
+ .report = SND_JACK_MICROPHONE,
+ .debounce_time = 150,
+};
+
static int __asoc_simple_card_dai_init(struct snd_soc_dai *dai,
struct asoc_simple_dai *set)
{
@@ -105,42 +140,70 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
if (ret < 0)
return ret;
+ if (gpio_is_valid(priv->gpio_hp_det)) {
+ snd_soc_jack_new(codec->codec, "Headphones", SND_JACK_HEADPHONE,
+ &simple_card_hp_jack);
+ snd_soc_jack_add_pins(&simple_card_hp_jack,
+ ARRAY_SIZE(simple_card_hp_jack_pins),
+ simple_card_hp_jack_pins);
+
+ simple_card_hp_jack_gpio.gpio = priv->gpio_hp_det;
+ snd_soc_jack_add_gpios(&simple_card_hp_jack, 1,
+ &simple_card_hp_jack_gpio);
+ }
+
+ if (gpio_is_valid(priv->gpio_mic_det)) {
+ snd_soc_jack_new(codec->codec, "Mic Jack", SND_JACK_MICROPHONE,
+ &simple_card_mic_jack);
+ snd_soc_jack_add_pins(&simple_card_mic_jack,
+ ARRAY_SIZE(simple_card_mic_jack_pins),
+ simple_card_mic_jack_pins);
+ simple_card_mic_jack_gpio.gpio = priv->gpio_mic_det;
+ snd_soc_jack_add_gpios(&simple_card_mic_jack, 1,
+ &simple_card_mic_jack_gpio);
+ }
return 0;
}
static int
asoc_simple_card_sub_parse_of(struct device_node *np,
struct asoc_simple_dai *dai,
- const struct device_node **p_node,
- const char **name)
+ struct device_node **p_node,
+ const char **name,
+ int *args_count)
{
- struct device_node *node;
+ struct of_phandle_args args;
struct clk *clk;
u32 val;
int ret;
/*
- * get node via "sound-dai = <&phandle port>"
+ * Get node via "sound-dai = <&phandle port>"
* it will be used as xxx_of_node on soc_bind_dai_link()
*/
- node = of_parse_phandle(np, "sound-dai", 0);
- if (!node)
- return -ENODEV;
- *p_node = node;
+ ret = of_parse_phandle_with_args(np, "sound-dai",
+ "#sound-dai-cells", 0, &args);
+ if (ret)
+ return ret;
+
+ *p_node = args.np;
+
+ if (args_count)
+ *args_count = args.args_count;
- /* get dai->name */
+ /* Get dai->name */
ret = snd_soc_of_get_dai_name(np, name);
if (ret < 0)
return ret;
- /* parse TDM slot */
+ /* Parse TDM slot */
ret = snd_soc_of_parse_tdm_slot(np, &dai->slots, &dai->slot_width);
if (ret)
return ret;
/*
- * dai->sysclk come from
- * "clocks = <&xxx>" (if system has common clock)
+ * Parse dai->sysclk come from "clocks = <&xxx>"
+ * (if system has common clock)
* or "system-clock-frequency = <xxx>"
* or device's module clock.
*/
@@ -155,7 +218,7 @@ asoc_simple_card_sub_parse_of(struct device_node *np,
} else if (!of_property_read_u32(np, "system-clock-frequency", &val)) {
dai->sysclk = val;
} else {
- clk = of_clk_get(node, 0);
+ clk = of_clk_get(args.np, 0);
if (!IS_ERR(clk))
dai->sysclk = clk_get_rate(clk);
}
@@ -163,12 +226,14 @@ asoc_simple_card_sub_parse_of(struct device_node *np,
return 0;
}
-static int simple_card_dai_link_of(struct device_node *node,
- struct device *dev,
- struct snd_soc_dai_link *dai_link,
- struct simple_dai_props *dai_props,
- bool is_top_level_node)
+static int asoc_simple_card_dai_link_of(struct device_node *node,
+ struct simple_card_data *priv,
+ int idx,
+ bool is_top_level_node)
{
+ struct device *dev = simple_priv_to_dev(priv);
+ struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, idx);
+ struct simple_dai_props *dai_props = simple_priv_to_props(priv, idx);
struct device_node *np = NULL;
struct device_node *bitclkmaster = NULL;
struct device_node *framemaster = NULL;
@@ -176,8 +241,9 @@ static int simple_card_dai_link_of(struct device_node *node,
char *name;
char prop[128];
char *prefix = "";
- int ret;
+ int ret, cpu_args;
+ /* For single DAI link & old style of DT node */
if (is_top_level_node)
prefix = "simple-audio-card,";
@@ -195,7 +261,8 @@ static int simple_card_dai_link_of(struct device_node *node,
ret = asoc_simple_card_sub_parse_of(np, &dai_props->cpu_dai,
&dai_link->cpu_of_node,
- &dai_link->cpu_dai_name);
+ &dai_link->cpu_dai_name,
+ &cpu_args);
if (ret < 0)
goto dai_link_of_err;
@@ -226,14 +293,16 @@ static int simple_card_dai_link_of(struct device_node *node,
ret = asoc_simple_card_sub_parse_of(np, &dai_props->codec_dai,
&dai_link->codec_of_node,
- &dai_link->codec_dai_name);
+ &dai_link->codec_dai_name, NULL);
if (ret < 0)
goto dai_link_of_err;
if (strlen(prefix) && !bitclkmaster && !framemaster) {
- /* No dai-link level and master setting was not found from
- sound node level, revert back to legacy DT parsing and
- take the settings from codec node. */
+ /*
+ * No DAI link level and master setting was found
+ * from sound node level, revert back to legacy DT
+ * parsing and take the settings from codec node.
+ */
dev_dbg(dev, "%s: Revert to legacy daifmt parsing\n",
__func__);
dai_props->cpu_dai.fmt = dai_props->codec_dai.fmt =
@@ -262,10 +331,10 @@ static int simple_card_dai_link_of(struct device_node *node,
goto dai_link_of_err;
}
- /* simple-card assumes platform == cpu */
+ /* Simple Card assumes platform == cpu */
dai_link->platform_of_node = dai_link->cpu_of_node;
- /* Link name is created from CPU/CODEC dai name */
+ /* DAI link name is created from CPU/CODEC dai name */
name = devm_kzalloc(dev,
strlen(dai_link->cpu_dai_name) +
strlen(dai_link->codec_dai_name) + 2,
@@ -274,6 +343,7 @@ static int simple_card_dai_link_of(struct device_node *node,
dai_link->codec_dai_name);
dai_link->name = dai_link->stream_name = name;
dai_link->ops = &asoc_simple_card_ops;
+ dai_link->init = asoc_simple_card_dai_init;
dev_dbg(dev, "\tname : %s\n", dai_link->stream_name);
dev_dbg(dev, "\tcpu : %s / %04x / %d\n",
@@ -285,6 +355,18 @@ static int simple_card_dai_link_of(struct device_node *node,
dai_props->codec_dai.fmt,
dai_props->codec_dai.sysclk);
+ /*
+ * In soc_bind_dai_link() will check cpu name after
+ * of_node matching if dai_link has cpu_dai_name.
+ * but, it will never match if name was created by
+ * fmt_single_name() remove cpu_dai_name if cpu_args
+ * was 0. See:
+ * fmt_single_name()
+ * fmt_multiple_name()
+ */
+ if (!cpu_args)
+ dai_link->cpu_dai_name = NULL;
+
dai_link_of_err:
if (np)
of_node_put(np);
@@ -296,19 +378,19 @@ dai_link_of_err:
}
static int asoc_simple_card_parse_of(struct device_node *node,
- struct simple_card_data *priv,
- struct device *dev,
- int multi)
+ struct simple_card_data *priv)
{
- struct snd_soc_dai_link *dai_link = priv->snd_card.dai_link;
- struct simple_dai_props *dai_props = priv->dai_props;
+ struct device *dev = simple_priv_to_dev(priv);
u32 val;
int ret;
- /* parsing the card name from DT */
+ if (!node)
+ return -EINVAL;
+
+ /* Parse the card name from DT */
snd_soc_of_parse_card_name(&priv->snd_card, "simple-audio-card,name");
- /* off-codec widgets */
+ /* The off-codec widgets */
if (of_property_read_bool(node, "simple-audio-card,widgets")) {
ret = snd_soc_of_parse_audio_simple_widgets(&priv->snd_card,
"simple-audio-card,widgets");
@@ -332,32 +414,45 @@ static int asoc_simple_card_parse_of(struct device_node *node,
dev_dbg(dev, "New simple-card: %s\n", priv->snd_card.name ?
priv->snd_card.name : "");
- if (multi) {
+ /* Single/Muti DAI link(s) & New style of DT node */
+ if (of_get_child_by_name(node, "simple-audio-card,dai-link")) {
struct device_node *np = NULL;
- int i;
- for (i = 0; (np = of_get_next_child(node, np)); i++) {
+ int i = 0;
+
+ for_each_child_of_node(node, np) {
dev_dbg(dev, "\tlink %d:\n", i);
- ret = simple_card_dai_link_of(np, dev, dai_link + i,
- dai_props + i, false);
+ ret = asoc_simple_card_dai_link_of(np, priv,
+ i, false);
if (ret < 0) {
of_node_put(np);
return ret;
}
+ i++;
}
} else {
- ret = simple_card_dai_link_of(node, dev, dai_link, dai_props,
- true);
+ /* For single DAI link & old style of DT node */
+ ret = asoc_simple_card_dai_link_of(node, priv, 0, true);
if (ret < 0)
return ret;
}
+ priv->gpio_hp_det = of_get_named_gpio(node,
+ "simple-audio-card,hp-det-gpio", 0);
+ if (priv->gpio_hp_det == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ priv->gpio_mic_det = of_get_named_gpio(node,
+ "simple-audio-card,mic-det-gpio", 0);
+ if (priv->gpio_mic_det == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
if (!priv->snd_card.name)
priv->snd_card.name = priv->snd_card.dai_link->name;
return 0;
}
-/* update the reference count of the devices nodes at end of probe */
+/* Decrease the reference count of the device nodes */
static int asoc_simple_card_unref(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
@@ -384,34 +479,32 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
struct snd_soc_dai_link *dai_link;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- int num_links, multi, ret;
+ int num_links, ret;
- /* get the number of DAI links */
- if (np && of_get_child_by_name(np, "simple-audio-card,dai-link")) {
+ /* Get the number of DAI links */
+ if (np && of_get_child_by_name(np, "simple-audio-card,dai-link"))
num_links = of_get_child_count(np);
- multi = 1;
- } else {
+ else
num_links = 1;
- multi = 0;
- }
- /* allocate the private data and the DAI link array */
+ /* Allocate the private data and the DAI link array */
priv = devm_kzalloc(dev,
sizeof(*priv) + sizeof(*dai_link) * num_links,
GFP_KERNEL);
if (!priv)
return -ENOMEM;
- /*
- * init snd_soc_card
- */
+ /* Init snd_soc_card */
priv->snd_card.owner = THIS_MODULE;
priv->snd_card.dev = dev;
dai_link = priv->dai_link;
priv->snd_card.dai_link = dai_link;
priv->snd_card.num_links = num_links;
- /* get room for the other properties */
+ priv->gpio_hp_det = -ENOENT;
+ priv->gpio_mic_det = -ENOENT;
+
+ /* Get room for the other properties */
priv->dai_props = devm_kzalloc(dev,
sizeof(*priv->dai_props) * num_links,
GFP_KERNEL);
@@ -420,25 +513,13 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
if (np && of_device_is_available(np)) {
- ret = asoc_simple_card_parse_of(np, priv, dev, multi);
+ ret = asoc_simple_card_parse_of(np, priv);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "parse error %d\n", ret);
goto err;
}
- /*
- * soc_bind_dai_link() will check cpu name
- * after of_node matching if dai_link has cpu_dai_name.
- * but, it will never match if name was created by fmt_single_name()
- * remove cpu_dai_name to escape name matching.
- * see
- * fmt_single_name()
- * fmt_multiple_name()
- */
- if (num_links == 1)
- dai_link->cpu_dai_name = NULL;
-
} else {
struct asoc_simple_card_info *cinfo;
@@ -464,6 +545,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
dai_link->codec_name = cinfo->codec;
dai_link->cpu_dai_name = cinfo->cpu_dai.name;
dai_link->codec_dai_name = cinfo->codec_dai.name;
+ dai_link->init = asoc_simple_card_dai_init;
memcpy(&priv->dai_props->cpu_dai, &cinfo->cpu_dai,
sizeof(priv->dai_props->cpu_dai));
memcpy(&priv->dai_props->codec_dai, &cinfo->codec_dai,
@@ -473,11 +555,6 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
priv->dai_props->codec_dai.fmt |= cinfo->daifmt;
}
- /*
- * init snd_soc_dai_link
- */
- dai_link->init = asoc_simple_card_dai_init;
-
snd_soc_card_set_drvdata(&priv->snd_card, priv);
ret = devm_snd_soc_register_card(&pdev->dev, &priv->snd_card);
@@ -491,6 +568,16 @@ err:
static int asoc_simple_card_remove(struct platform_device *pdev)
{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct simple_card_data *priv = snd_soc_card_get_drvdata(card);
+
+ if (gpio_is_valid(priv->gpio_hp_det))
+ snd_soc_jack_free_gpios(&simple_card_hp_jack, 1,
+ &simple_card_hp_jack_gpio);
+ if (gpio_is_valid(priv->gpio_mic_det))
+ snd_soc_jack_free_gpios(&simple_card_mic_jack, 1,
+ &simple_card_mic_jack_gpio);
+
return asoc_simple_card_unref(pdev);
}
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index 7acbfc43a0c6..f841786dad15 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -2,7 +2,8 @@
snd-soc-sst-dsp-objs := sst-dsp.o sst-firmware.o
snd-soc-sst-acpi-objs := sst-acpi.o
-snd-soc-sst-mfld-platform-objs := sst-mfld-platform-pcm.o sst-mfld-platform-compress.o
+snd-soc-sst-mfld-platform-objs := sst-mfld-platform-pcm.o \
+ sst-mfld-platform-compress.o sst-atom-controls.o
snd-soc-mfld-machine-objs := mfld_machine.o
obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += snd-soc-sst-mfld-platform.o
diff --git a/sound/soc/intel/byt-max98090.c b/sound/soc/intel/byt-max98090.c
index b8b8af571ef1..d52681e7225e 100644
--- a/sound/soc/intel/byt-max98090.c
+++ b/sound/soc/intel/byt-max98090.c
@@ -139,6 +139,7 @@ static struct snd_soc_card byt_max98090_card = {
.num_dapm_routes = ARRAY_SIZE(byt_max98090_audio_map),
.controls = byt_max98090_controls,
.num_controls = ARRAY_SIZE(byt_max98090_controls),
+ .fully_routed = true,
};
static int byt_max98090_probe(struct platform_device *pdev)
diff --git a/sound/soc/intel/byt-rt5640.c b/sound/soc/intel/byt-rt5640.c
index 234a58de3c53..e03abdf21c1b 100644
--- a/sound/soc/intel/byt-rt5640.c
+++ b/sound/soc/intel/byt-rt5640.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/dmi.h>
#include <linux/slab.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -36,8 +37,6 @@ static const struct snd_soc_dapm_widget byt_rt5640_widgets[] = {
static const struct snd_soc_dapm_route byt_rt5640_audio_map[] = {
{"Headset Mic", NULL, "MICBIAS1"},
{"IN2P", NULL, "Headset Mic"},
- {"IN2N", NULL, "Headset Mic"},
- {"DMIC1", NULL, "Internal Mic"},
{"Headphone", NULL, "HPOL"},
{"Headphone", NULL, "HPOR"},
{"Speaker", NULL, "SPOLP"},
@@ -46,6 +45,31 @@ static const struct snd_soc_dapm_route byt_rt5640_audio_map[] = {
{"Speaker", NULL, "SPORN"},
};
+static const struct snd_soc_dapm_route byt_rt5640_intmic_dmic1_map[] = {
+ {"DMIC1", NULL, "Internal Mic"},
+};
+
+static const struct snd_soc_dapm_route byt_rt5640_intmic_dmic2_map[] = {
+ {"DMIC2", NULL, "Internal Mic"},
+};
+
+static const struct snd_soc_dapm_route byt_rt5640_intmic_in1_map[] = {
+ {"Internal Mic", NULL, "MICBIAS1"},
+ {"IN1P", NULL, "Internal Mic"},
+};
+
+enum {
+ BYT_RT5640_DMIC1_MAP,
+ BYT_RT5640_DMIC2_MAP,
+ BYT_RT5640_IN1_MAP,
+};
+
+#define BYT_RT5640_MAP(quirk) ((quirk) & 0xff)
+#define BYT_RT5640_DMIC_EN BIT(16)
+
+static unsigned long byt_rt5640_quirk = BYT_RT5640_DMIC1_MAP |
+ BYT_RT5640_DMIC_EN;
+
static const struct snd_kcontrol_new byt_rt5640_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
@@ -77,12 +101,41 @@ static int byt_rt5640_hw_params(struct snd_pcm_substream *substream,
return 0;
}
+static int byt_rt5640_quirk_cb(const struct dmi_system_id *id)
+{
+ byt_rt5640_quirk = (unsigned long)id->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ {
+ .callback = byt_rt5640_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ },
+ .driver_data = (unsigned long *)BYT_RT5640_IN1_MAP,
+ },
+ {
+ .callback = byt_rt5640_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "DellInc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Venue 8 Pro 5830"),
+ },
+ .driver_data = (unsigned long *)(BYT_RT5640_DMIC2_MAP |
+ BYT_RT5640_DMIC_EN),
+ },
+ {}
+};
+
static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
{
int ret;
struct snd_soc_codec *codec = runtime->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
struct snd_soc_card *card = runtime->card;
+ const struct snd_soc_dapm_route *custom_map;
+ int num_routes;
card->dapm.idle_bias_off = true;
@@ -93,6 +146,31 @@ static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
return ret;
}
+ dmi_check_system(byt_rt5640_quirk_table);
+ switch (BYT_RT5640_MAP(byt_rt5640_quirk)) {
+ case BYT_RT5640_IN1_MAP:
+ custom_map = byt_rt5640_intmic_in1_map;
+ num_routes = ARRAY_SIZE(byt_rt5640_intmic_in1_map);
+ break;
+ case BYT_RT5640_DMIC2_MAP:
+ custom_map = byt_rt5640_intmic_dmic2_map;
+ num_routes = ARRAY_SIZE(byt_rt5640_intmic_dmic2_map);
+ break;
+ default:
+ custom_map = byt_rt5640_intmic_dmic1_map;
+ num_routes = ARRAY_SIZE(byt_rt5640_intmic_dmic1_map);
+ }
+
+ ret = snd_soc_dapm_add_routes(dapm, custom_map, num_routes);
+ if (ret)
+ return ret;
+
+ if (byt_rt5640_quirk & BYT_RT5640_DMIC_EN) {
+ ret = rt5640_dmic_enable(codec, 0, 0);
+ if (ret)
+ return ret;
+ }
+
snd_soc_dapm_ignore_suspend(dapm, "HPOL");
snd_soc_dapm_ignore_suspend(dapm, "HPOR");
@@ -131,6 +209,7 @@ static struct snd_soc_card byt_rt5640_card = {
.num_dapm_widgets = ARRAY_SIZE(byt_rt5640_widgets),
.dapm_routes = byt_rt5640_audio_map,
.num_dapm_routes = ARRAY_SIZE(byt_rt5640_audio_map),
+ .fully_routed = true,
};
static int byt_rt5640_probe(struct platform_device *pdev)
diff --git a/sound/soc/intel/sst-atom-controls.c b/sound/soc/intel/sst-atom-controls.c
new file mode 100644
index 000000000000..7104a34181a9
--- /dev/null
+++ b/sound/soc/intel/sst-atom-controls.c
@@ -0,0 +1,218 @@
+/*
+ * sst-atom-controls.c - Intel MID Platform driver DPCM ALSA controls for Mrfld
+ *
+ * Copyright (C) 2013-14 Intel Corp
+ * Author: Omair Mohammed Abdullah <omair.m.abdullah@intel.com>
+ * Vinod Koul <vinod.koul@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include "sst-mfld-platform.h"
+#include "sst-atom-controls.h"
+
+static int sst_fill_byte_control(struct sst_data *drv,
+ u8 ipc_msg, u8 block,
+ u8 task_id, u8 pipe_id,
+ u16 len, void *cmd_data)
+{
+ struct snd_sst_bytes_v2 *byte_data = drv->byte_stream;
+
+ byte_data->type = SST_CMD_BYTES_SET;
+ byte_data->ipc_msg = ipc_msg;
+ byte_data->block = block;
+ byte_data->task_id = task_id;
+ byte_data->pipe_id = pipe_id;
+
+ if (len > SST_MAX_BIN_BYTES - sizeof(*byte_data)) {
+ dev_err(&drv->pdev->dev, "command length too big (%u)", len);
+ return -EINVAL;
+ }
+ byte_data->len = len;
+ memcpy(byte_data->bytes, cmd_data, len);
+ print_hex_dump_bytes("writing to lpe: ", DUMP_PREFIX_OFFSET,
+ byte_data, len + sizeof(*byte_data));
+ return 0;
+}
+
+static int sst_fill_and_send_cmd_unlocked(struct sst_data *drv,
+ u8 ipc_msg, u8 block, u8 task_id, u8 pipe_id,
+ void *cmd_data, u16 len)
+{
+ int ret = 0;
+
+ ret = sst_fill_byte_control(drv, ipc_msg,
+ block, task_id, pipe_id, len, cmd_data);
+ if (ret < 0)
+ return ret;
+ return sst->ops->send_byte_stream(sst->dev, drv->byte_stream);
+}
+
+/**
+ * sst_fill_and_send_cmd - generate the IPC message and send it to the FW
+ * @ipc_msg: type of IPC (CMD, SET_PARAMS, GET_PARAMS)
+ * @cmd_data: the IPC payload
+ */
+static int sst_fill_and_send_cmd(struct sst_data *drv,
+ u8 ipc_msg, u8 block, u8 task_id, u8 pipe_id,
+ void *cmd_data, u16 len)
+{
+ int ret;
+
+ mutex_lock(&drv->lock);
+ ret = sst_fill_and_send_cmd_unlocked(drv, ipc_msg, block,
+ task_id, pipe_id, cmd_data, len);
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int sst_send_algo_cmd(struct sst_data *drv,
+ struct sst_algo_control *bc)
+{
+ int len, ret = 0;
+ struct sst_cmd_set_params *cmd;
+
+ /*bc->max includes sizeof algos + length field*/
+ len = sizeof(cmd->dst) + sizeof(cmd->command_id) + bc->max;
+
+ cmd = kzalloc(len, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ SST_FILL_DESTINATION(2, cmd->dst, bc->pipe_id, bc->module_id);
+ cmd->command_id = bc->cmd_id;
+ memcpy(cmd->params, bc->params, bc->max);
+
+ ret = sst_fill_and_send_cmd_unlocked(drv, SST_IPC_IA_SET_PARAMS,
+ SST_FLAG_BLOCKED, bc->task_id, 0, cmd, len);
+ kfree(cmd);
+ return ret;
+}
+
+static int sst_algo_bytes_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct sst_algo_control *bc = (void *)kcontrol->private_value;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ uinfo->count = bc->max;
+
+ return 0;
+}
+
+static int sst_algo_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct sst_algo_control *bc = (void *)kcontrol->private_value;
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+
+ switch (bc->type) {
+ case SST_ALGO_PARAMS:
+ memcpy(ucontrol->value.bytes.data, bc->params, bc->max);
+ break;
+ default:
+ dev_err(component->dev, "Invalid Input- algo type:%d\n",
+ bc->type);
+ return -EINVAL;
+
+ }
+ return 0;
+}
+
+static int sst_algo_control_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int ret = 0;
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct sst_data *drv = snd_soc_component_get_drvdata(cmpnt);
+ struct sst_algo_control *bc = (void *)kcontrol->private_value;
+
+ dev_dbg(cmpnt->dev, "control_name=%s\n", kcontrol->id.name);
+ mutex_lock(&drv->lock);
+ switch (bc->type) {
+ case SST_ALGO_PARAMS:
+ memcpy(bc->params, ucontrol->value.bytes.data, bc->max);
+ break;
+ default:
+ mutex_unlock(&drv->lock);
+ dev_err(cmpnt->dev, "Invalid Input- algo type:%d\n",
+ bc->type);
+ return -EINVAL;
+ }
+ /*if pipe is enabled, need to send the algo params from here*/
+ if (bc->w && bc->w->power)
+ ret = sst_send_algo_cmd(drv, bc);
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static const struct snd_kcontrol_new sst_algo_controls[] = {
+ SST_ALGO_KCONTROL_BYTES("media_loop1_out", "fir", 272, SST_MODULE_ID_FIR_24,
+ SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR),
+ SST_ALGO_KCONTROL_BYTES("media_loop1_out", "iir", 300, SST_MODULE_ID_IIR_24,
+ SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+ SST_ALGO_KCONTROL_BYTES("media_loop1_out", "mdrp", 286, SST_MODULE_ID_MDRP,
+ SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_SET_MDRP),
+ SST_ALGO_KCONTROL_BYTES("media_loop2_out", "fir", 272, SST_MODULE_ID_FIR_24,
+ SST_PATH_INDEX_MEDIA_LOOP2_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR),
+ SST_ALGO_KCONTROL_BYTES("media_loop2_out", "iir", 300, SST_MODULE_ID_IIR_24,
+ SST_PATH_INDEX_MEDIA_LOOP2_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+ SST_ALGO_KCONTROL_BYTES("media_loop2_out", "mdrp", 286, SST_MODULE_ID_MDRP,
+ SST_PATH_INDEX_MEDIA_LOOP2_OUT, 0, SST_TASK_SBA, SBA_SET_MDRP),
+ SST_ALGO_KCONTROL_BYTES("sprot_loop_out", "lpro", 192, SST_MODULE_ID_SPROT,
+ SST_PATH_INDEX_SPROT_LOOP_OUT, 0, SST_TASK_SBA, SBA_VB_LPRO),
+ SST_ALGO_KCONTROL_BYTES("codec_in0", "dcr", 52, SST_MODULE_ID_FILT_DCR,
+ SST_PATH_INDEX_CODEC_IN0, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+ SST_ALGO_KCONTROL_BYTES("codec_in1", "dcr", 52, SST_MODULE_ID_FILT_DCR,
+ SST_PATH_INDEX_CODEC_IN1, 0, SST_TASK_SBA, SBA_VB_SET_IIR),
+
+};
+
+static int sst_algo_control_init(struct device *dev)
+{
+ int i = 0;
+ struct sst_algo_control *bc;
+ /*allocate space to cache the algo parameters in the driver*/
+ for (i = 0; i < ARRAY_SIZE(sst_algo_controls); i++) {
+ bc = (struct sst_algo_control *)sst_algo_controls[i].private_value;
+ bc->params = devm_kzalloc(dev, bc->max, GFP_KERNEL);
+ if (bc->params == NULL)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int sst_dsp_init_v2_dpcm(struct snd_soc_platform *platform)
+{
+ int ret = 0;
+ struct sst_data *drv = snd_soc_platform_get_drvdata(platform);
+
+ drv->byte_stream = devm_kzalloc(platform->dev,
+ SST_MAX_BIN_BYTES, GFP_KERNEL);
+ if (!drv->byte_stream)
+ return -ENOMEM;
+
+ /*Initialize algo control params*/
+ ret = sst_algo_control_init(platform->dev);
+ if (ret)
+ return ret;
+ ret = snd_soc_add_platform_controls(platform, sst_algo_controls,
+ ARRAY_SIZE(sst_algo_controls));
+ return ret;
+}
diff --git a/sound/soc/intel/sst-atom-controls.h b/sound/soc/intel/sst-atom-controls.h
index 14063ab8c7c5..a73e894b175c 100644
--- a/sound/soc/intel/sst-atom-controls.h
+++ b/sound/soc/intel/sst-atom-controls.h
@@ -1,4 +1,6 @@
/*
+ * sst-atom-controls.h - Intel MID Platform driver header file
+ *
* Copyright (C) 2013-14 Intel Corp
* Author: Ramesh Babu <ramesh.babu.koul@intel.com>
* Omair M Abdullah <omair.m.abdullah@intel.com>
@@ -18,13 +20,423 @@
*
*/
-#ifndef __SST_CONTROLS_V2_H__
-#define __SST_CONTROLS_V2_H__
+#ifndef __SST_ATOM_CONTROLS_H__
+#define __SST_ATOM_CONTROLS_H__
enum {
MERR_DPCM_AUDIO = 0,
MERR_DPCM_COMPR,
};
+/* define a bit for each mixer input */
+#define SST_MIX_IP(x) (x)
+
+#define SST_IP_CODEC0 SST_MIX_IP(2)
+#define SST_IP_CODEC1 SST_MIX_IP(3)
+#define SST_IP_LOOP0 SST_MIX_IP(4)
+#define SST_IP_LOOP1 SST_MIX_IP(5)
+#define SST_IP_LOOP2 SST_MIX_IP(6)
+#define SST_IP_PROBE SST_MIX_IP(7)
+#define SST_IP_VOIP SST_MIX_IP(12)
+#define SST_IP_PCM0 SST_MIX_IP(13)
+#define SST_IP_PCM1 SST_MIX_IP(14)
+#define SST_IP_MEDIA0 SST_MIX_IP(17)
+#define SST_IP_MEDIA1 SST_MIX_IP(18)
+#define SST_IP_MEDIA2 SST_MIX_IP(19)
+#define SST_IP_MEDIA3 SST_MIX_IP(20)
+
+#define SST_IP_LAST SST_IP_MEDIA3
+
+#define SST_SWM_INPUT_COUNT (SST_IP_LAST + 1)
+#define SST_CMD_SWM_MAX_INPUTS 6
+
+#define SST_PATH_ID_SHIFT 8
+#define SST_DEFAULT_LOCATION_ID 0xFFFF
+#define SST_DEFAULT_CELL_NBR 0xFF
+#define SST_DEFAULT_MODULE_ID 0xFFFF
+
+/*
+ * Audio DSP Path Ids. Specified by the audio DSP FW
+ */
+enum sst_path_index {
+ SST_PATH_INDEX_CODEC_OUT0 = (0x02 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_CODEC_OUT1 = (0x03 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_SPROT_LOOP_OUT = (0x04 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_MEDIA_LOOP1_OUT = (0x05 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_MEDIA_LOOP2_OUT = (0x06 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_VOIP_OUT = (0x0C << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PCM0_OUT = (0x0D << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PCM1_OUT = (0x0E << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PCM2_OUT = (0x0F << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_MEDIA0_OUT = (0x12 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_MEDIA1_OUT = (0x13 << SST_PATH_ID_SHIFT),
+
+
+ /* Start of input paths */
+ SST_PATH_INDEX_CODEC_IN0 = (0x82 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_CODEC_IN1 = (0x83 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_SPROT_LOOP_IN = (0x84 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_MEDIA_LOOP1_IN = (0x85 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_MEDIA_LOOP2_IN = (0x86 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_VOIP_IN = (0x8C << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_PCM0_IN = (0x8D << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_PCM1_IN = (0x8E << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_MEDIA0_IN = (0x8F << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_MEDIA1_IN = (0x90 << SST_PATH_ID_SHIFT),
+ SST_PATH_INDEX_MEDIA2_IN = (0x91 << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_MEDIA3_IN = (0x9C << SST_PATH_ID_SHIFT),
+
+ SST_PATH_INDEX_RESERVED = (0xFF << SST_PATH_ID_SHIFT),
+};
+
+/*
+ * path IDs
+ */
+enum sst_swm_inputs {
+ SST_SWM_IN_CODEC0 = (SST_PATH_INDEX_CODEC_IN0 | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_CODEC1 = (SST_PATH_INDEX_CODEC_IN1 | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_SPROT_LOOP = (SST_PATH_INDEX_SPROT_LOOP_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_MEDIA_LOOP1 = (SST_PATH_INDEX_MEDIA_LOOP1_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_MEDIA_LOOP2 = (SST_PATH_INDEX_MEDIA_LOOP2_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_VOIP = (SST_PATH_INDEX_VOIP_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_PCM0 = (SST_PATH_INDEX_PCM0_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_PCM1 = (SST_PATH_INDEX_PCM1_IN | SST_DEFAULT_CELL_NBR),
+ SST_SWM_IN_MEDIA0 = (SST_PATH_INDEX_MEDIA0_IN | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+ SST_SWM_IN_MEDIA1 = (SST_PATH_INDEX_MEDIA1_IN | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+ SST_SWM_IN_MEDIA2 = (SST_PATH_INDEX_MEDIA2_IN | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+ SST_SWM_IN_MEDIA3 = (SST_PATH_INDEX_MEDIA3_IN | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+ SST_SWM_IN_END = (SST_PATH_INDEX_RESERVED | SST_DEFAULT_CELL_NBR)
+};
+
+/*
+ * path IDs
+ */
+enum sst_swm_outputs {
+ SST_SWM_OUT_CODEC0 = (SST_PATH_INDEX_CODEC_OUT0 | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_CODEC1 = (SST_PATH_INDEX_CODEC_OUT1 | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_SPROT_LOOP = (SST_PATH_INDEX_SPROT_LOOP_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_MEDIA_LOOP1 = (SST_PATH_INDEX_MEDIA_LOOP1_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_MEDIA_LOOP2 = (SST_PATH_INDEX_MEDIA_LOOP2_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_VOIP = (SST_PATH_INDEX_VOIP_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_PCM0 = (SST_PATH_INDEX_PCM0_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_PCM1 = (SST_PATH_INDEX_PCM1_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_PCM2 = (SST_PATH_INDEX_PCM2_OUT | SST_DEFAULT_CELL_NBR),
+ SST_SWM_OUT_MEDIA0 = (SST_PATH_INDEX_MEDIA0_OUT | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+ SST_SWM_OUT_MEDIA1 = (SST_PATH_INDEX_MEDIA1_OUT | SST_DEFAULT_CELL_NBR), /* Part of Media Mixer */
+ SST_SWM_OUT_END = (SST_PATH_INDEX_RESERVED | SST_DEFAULT_CELL_NBR),
+};
+
+enum sst_ipc_msg {
+ SST_IPC_IA_CMD = 1,
+ SST_IPC_IA_SET_PARAMS,
+ SST_IPC_IA_GET_PARAMS,
+};
+
+enum sst_cmd_type {
+ SST_CMD_BYTES_SET = 1,
+ SST_CMD_BYTES_GET = 2,
+};
+
+enum sst_task {
+ SST_TASK_SBA = 1,
+ SST_TASK_MMX,
+};
+
+enum sst_type {
+ SST_TYPE_CMD = 1,
+ SST_TYPE_PARAMS,
+};
+
+enum sst_flag {
+ SST_FLAG_BLOCKED = 1,
+ SST_FLAG_NONBLOCK,
+};
+
+/*
+ * Enumeration for indexing the gain cells in VB_SET_GAIN DSP command
+ */
+enum sst_gain_index {
+ /* GAIN IDs for SB task start here */
+ SST_GAIN_INDEX_CODEC_OUT0,
+ SST_GAIN_INDEX_CODEC_OUT1,
+ SST_GAIN_INDEX_CODEC_IN0,
+ SST_GAIN_INDEX_CODEC_IN1,
+
+ SST_GAIN_INDEX_SPROT_LOOP_OUT,
+ SST_GAIN_INDEX_MEDIA_LOOP1_OUT,
+ SST_GAIN_INDEX_MEDIA_LOOP2_OUT,
+
+ SST_GAIN_INDEX_PCM0_IN_LEFT,
+ SST_GAIN_INDEX_PCM0_IN_RIGHT,
+
+ SST_GAIN_INDEX_PCM1_OUT_LEFT,
+ SST_GAIN_INDEX_PCM1_OUT_RIGHT,
+ SST_GAIN_INDEX_PCM1_IN_LEFT,
+ SST_GAIN_INDEX_PCM1_IN_RIGHT,
+ SST_GAIN_INDEX_PCM2_OUT_LEFT,
+
+ SST_GAIN_INDEX_PCM2_OUT_RIGHT,
+ SST_GAIN_INDEX_VOIP_OUT,
+ SST_GAIN_INDEX_VOIP_IN,
+
+ /* Gain IDs for MMX task start here */
+ SST_GAIN_INDEX_MEDIA0_IN_LEFT,
+ SST_GAIN_INDEX_MEDIA0_IN_RIGHT,
+ SST_GAIN_INDEX_MEDIA1_IN_LEFT,
+ SST_GAIN_INDEX_MEDIA1_IN_RIGHT,
+
+ SST_GAIN_INDEX_MEDIA2_IN_LEFT,
+ SST_GAIN_INDEX_MEDIA2_IN_RIGHT,
+
+ SST_GAIN_INDEX_GAIN_END
+};
+
+/*
+ * Audio DSP module IDs specified by FW spec
+ * TODO: Update with all modules
+ */
+enum sst_module_id {
+ SST_MODULE_ID_PCM = 0x0001,
+ SST_MODULE_ID_MP3 = 0x0002,
+ SST_MODULE_ID_MP24 = 0x0003,
+ SST_MODULE_ID_AAC = 0x0004,
+ SST_MODULE_ID_AACP = 0x0005,
+ SST_MODULE_ID_EAACP = 0x0006,
+ SST_MODULE_ID_WMA9 = 0x0007,
+ SST_MODULE_ID_WMA10 = 0x0008,
+ SST_MODULE_ID_WMA10P = 0x0009,
+ SST_MODULE_ID_RA = 0x000A,
+ SST_MODULE_ID_DDAC3 = 0x000B,
+ SST_MODULE_ID_TRUE_HD = 0x000C,
+ SST_MODULE_ID_HD_PLUS = 0x000D,
+
+ SST_MODULE_ID_SRC = 0x0064,
+ SST_MODULE_ID_DOWNMIX = 0x0066,
+ SST_MODULE_ID_GAIN_CELL = 0x0067,
+ SST_MODULE_ID_SPROT = 0x006D,
+ SST_MODULE_ID_BASS_BOOST = 0x006E,
+ SST_MODULE_ID_STEREO_WDNG = 0x006F,
+ SST_MODULE_ID_AV_REMOVAL = 0x0070,
+ SST_MODULE_ID_MIC_EQ = 0x0071,
+ SST_MODULE_ID_SPL = 0x0072,
+ SST_MODULE_ID_ALGO_VTSV = 0x0073,
+ SST_MODULE_ID_NR = 0x0076,
+ SST_MODULE_ID_BWX = 0x0077,
+ SST_MODULE_ID_DRP = 0x0078,
+ SST_MODULE_ID_MDRP = 0x0079,
+
+ SST_MODULE_ID_ANA = 0x007A,
+ SST_MODULE_ID_AEC = 0x007B,
+ SST_MODULE_ID_NR_SNS = 0x007C,
+ SST_MODULE_ID_SER = 0x007D,
+ SST_MODULE_ID_AGC = 0x007E,
+
+ SST_MODULE_ID_CNI = 0x007F,
+ SST_MODULE_ID_CONTEXT_ALGO_AWARE = 0x0080,
+ SST_MODULE_ID_FIR_24 = 0x0081,
+ SST_MODULE_ID_IIR_24 = 0x0082,
+
+ SST_MODULE_ID_ASRC = 0x0083,
+ SST_MODULE_ID_TONE_GEN = 0x0084,
+ SST_MODULE_ID_BMF = 0x0086,
+ SST_MODULE_ID_EDL = 0x0087,
+ SST_MODULE_ID_GLC = 0x0088,
+
+ SST_MODULE_ID_FIR_16 = 0x0089,
+ SST_MODULE_ID_IIR_16 = 0x008A,
+ SST_MODULE_ID_DNR = 0x008B,
+
+ SST_MODULE_ID_VIRTUALIZER = 0x008C,
+ SST_MODULE_ID_VISUALIZATION = 0x008D,
+ SST_MODULE_ID_LOUDNESS_OPTIMIZER = 0x008E,
+ SST_MODULE_ID_REVERBERATION = 0x008F,
+
+ SST_MODULE_ID_CNI_TX = 0x0090,
+ SST_MODULE_ID_REF_LINE = 0x0091,
+ SST_MODULE_ID_VOLUME = 0x0092,
+ SST_MODULE_ID_FILT_DCR = 0x0094,
+ SST_MODULE_ID_SLV = 0x009A,
+ SST_MODULE_ID_NLF = 0x009B,
+ SST_MODULE_ID_TNR = 0x009C,
+ SST_MODULE_ID_WNR = 0x009D,
+
+ SST_MODULE_ID_LOG = 0xFF00,
+
+ SST_MODULE_ID_TASK = 0xFFFF,
+};
+
+enum sst_cmd {
+ SBA_IDLE = 14,
+ SBA_VB_SET_SPEECH_PATH = 26,
+ MMX_SET_GAIN = 33,
+ SBA_VB_SET_GAIN = 33,
+ FBA_VB_RX_CNI = 35,
+ MMX_SET_GAIN_TIMECONST = 36,
+ SBA_VB_SET_TIMECONST = 36,
+ SBA_VB_START = 85,
+ SBA_SET_SWM = 114,
+ SBA_SET_MDRP = 116,
+ SBA_HW_SET_SSP = 117,
+ SBA_SET_MEDIA_LOOP_MAP = 118,
+ SBA_SET_MEDIA_PATH = 119,
+ MMX_SET_MEDIA_PATH = 119,
+ SBA_VB_LPRO = 126,
+ SBA_VB_SET_FIR = 128,
+ SBA_VB_SET_IIR = 129,
+ SBA_SET_SSP_SLOT_MAP = 130,
+};
+
+enum sst_dsp_switch {
+ SST_SWITCH_OFF = 0,
+ SST_SWITCH_ON = 3,
+};
+
+enum sst_path_switch {
+ SST_PATH_OFF = 0,
+ SST_PATH_ON = 1,
+};
+
+enum sst_swm_state {
+ SST_SWM_OFF = 0,
+ SST_SWM_ON = 3,
+};
+
+#define SST_FILL_LOCATION_IDS(dst, cell_idx, pipe_id) do { \
+ dst.location_id.p.cell_nbr_idx = (cell_idx); \
+ dst.location_id.p.path_id = (pipe_id); \
+ } while (0)
+#define SST_FILL_LOCATION_ID(dst, loc_id) (\
+ dst.location_id.f = (loc_id))
+#define SST_FILL_MODULE_ID(dst, mod_id) (\
+ dst.module_id = (mod_id))
+
+#define SST_FILL_DESTINATION1(dst, id) do { \
+ SST_FILL_LOCATION_ID(dst, (id) & 0xFFFF); \
+ SST_FILL_MODULE_ID(dst, ((id) & 0xFFFF0000) >> 16); \
+ } while (0)
+#define SST_FILL_DESTINATION2(dst, loc_id, mod_id) do { \
+ SST_FILL_LOCATION_ID(dst, loc_id); \
+ SST_FILL_MODULE_ID(dst, mod_id); \
+ } while (0)
+#define SST_FILL_DESTINATION3(dst, cell_idx, path_id, mod_id) do { \
+ SST_FILL_LOCATION_IDS(dst, cell_idx, path_id); \
+ SST_FILL_MODULE_ID(dst, mod_id); \
+ } while (0)
+
+#define SST_FILL_DESTINATION(level, dst, ...) \
+ SST_FILL_DESTINATION##level(dst, __VA_ARGS__)
+#define SST_FILL_DEFAULT_DESTINATION(dst) \
+ SST_FILL_DESTINATION(2, dst, SST_DEFAULT_LOCATION_ID, SST_DEFAULT_MODULE_ID)
+
+struct sst_destination_id {
+ union sst_location_id {
+ struct {
+ u8 cell_nbr_idx; /* module index */
+ u8 path_id; /* pipe_id */
+ } __packed p; /* part */
+ u16 f; /* full */
+ } __packed location_id;
+ u16 module_id;
+} __packed;
+struct sst_dsp_header {
+ struct sst_destination_id dst;
+ u16 command_id;
+ u16 length;
+} __packed;
+
+/*
+ *
+ * Common Commands
+ *
+ */
+struct sst_cmd_generic {
+ struct sst_dsp_header header;
+} __packed;
+struct sst_cmd_set_params {
+ struct sst_destination_id dst;
+ u16 command_id;
+ char params[0];
+} __packed;
+#define SST_CONTROL_NAME(xpname, xmname, xinstance, xtype) \
+ xpname " " xmname " " #xinstance " " xtype
+
+#define SST_COMBO_CONTROL_NAME(xpname, xmname, xinstance, xtype, xsubmodule) \
+ xpname " " xmname " " #xinstance " " xtype " " xsubmodule
+enum sst_algo_kcontrol_type {
+ SST_ALGO_PARAMS,
+ SST_ALGO_BYPASS,
+};
+
+struct sst_algo_control {
+ enum sst_algo_kcontrol_type type;
+ int max;
+ u16 module_id;
+ u16 pipe_id;
+ u16 task_id;
+ u16 cmd_id;
+ bool bypass;
+ unsigned char *params;
+ struct snd_soc_dapm_widget *w;
+};
+
+/* size of the control = size of params + size of length field */
+#define SST_ALGO_CTL_VALUE(xcount, xtype, xpipe, xmod, xtask, xcmd) \
+ (struct sst_algo_control){ \
+ .max = xcount + sizeof(u16), .type = xtype, .module_id = xmod, \
+ .pipe_id = xpipe, .task_id = xtask, .cmd_id = xcmd, \
+ }
+
+#define SST_ALGO_KCONTROL(xname, xcount, xmod, xpipe, \
+ xtask, xcmd, xtype, xinfo, xget, xput) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .info = xinfo, .get = xget, .put = xput, \
+ .private_value = (unsigned long)& \
+ SST_ALGO_CTL_VALUE(xcount, xtype, xpipe, \
+ xmod, xtask, xcmd), \
+}
+
+#define SST_ALGO_KCONTROL_BYTES(xpname, xmname, xcount, xmod, \
+ xpipe, xinstance, xtask, xcmd) \
+ SST_ALGO_KCONTROL(SST_CONTROL_NAME(xpname, xmname, xinstance, "params"), \
+ xcount, xmod, xpipe, xtask, xcmd, SST_ALGO_PARAMS, \
+ sst_algo_bytes_ctl_info, \
+ sst_algo_control_get, sst_algo_control_set)
+
+#define SST_ALGO_KCONTROL_BOOL(xpname, xmname, xmod, xpipe, xinstance, xtask) \
+ SST_ALGO_KCONTROL(SST_CONTROL_NAME(xpname, xmname, xinstance, "bypass"), \
+ 0, xmod, xpipe, xtask, 0, SST_ALGO_BYPASS, \
+ snd_soc_info_bool_ext, \
+ sst_algo_control_get, sst_algo_control_set)
+
+#define SST_ALGO_BYPASS_PARAMS(xpname, xmname, xcount, xmod, xpipe, \
+ xinstance, xtask, xcmd) \
+ SST_ALGO_KCONTROL_BOOL(xpname, xmname, xmod, xpipe, xinstance, xtask), \
+ SST_ALGO_KCONTROL_BYTES(xpname, xmname, xcount, xmod, xpipe, xinstance, xtask, xcmd)
+
+#define SST_COMBO_ALGO_KCONTROL_BYTES(xpname, xmname, xsubmod, xcount, xmod, \
+ xpipe, xinstance, xtask, xcmd) \
+ SST_ALGO_KCONTROL(SST_COMBO_CONTROL_NAME(xpname, xmname, xinstance, "params", \
+ xsubmod), \
+ xcount, xmod, xpipe, xtask, xcmd, SST_ALGO_PARAMS, \
+ sst_algo_bytes_ctl_info, \
+ sst_algo_control_get, sst_algo_control_set)
+
+
+struct sst_enum {
+ bool tx;
+ unsigned short reg;
+ unsigned int max;
+ const char * const *texts;
+ struct snd_soc_dapm_widget *w;
+};
#endif
diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/sst-haswell-pcm.c
index 61bf6da4bb02..33fc5c3abf55 100644
--- a/sound/soc/intel/sst-haswell-pcm.c
+++ b/sound/soc/intel/sst-haswell-pcm.c
@@ -138,11 +138,10 @@ static inline unsigned int hsw_ipc_to_mixer(u32 value)
static int hsw_stream_volume_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(cmpnt);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- struct hsw_priv_data *pdata =
- snd_soc_platform_get_drvdata(platform);
struct hsw_pcm_data *pcm_data = &pdata->pcm[mc->reg];
struct sst_hsw *hsw = pdata->hsw;
u32 volume;
@@ -176,11 +175,10 @@ static int hsw_stream_volume_put(struct snd_kcontrol *kcontrol,
static int hsw_stream_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(cmpnt);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- struct hsw_priv_data *pdata =
- snd_soc_platform_get_drvdata(platform);
struct hsw_pcm_data *pcm_data = &pdata->pcm[mc->reg];
struct sst_hsw *hsw = pdata->hsw;
u32 volume;
@@ -208,8 +206,8 @@ static int hsw_stream_volume_get(struct snd_kcontrol *kcontrol,
static int hsw_volume_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
- struct hsw_priv_data *pdata = snd_soc_platform_get_drvdata(platform);
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(cmpnt);
struct sst_hsw *hsw = pdata->hsw;
u32 volume;
@@ -233,8 +231,8 @@ static int hsw_volume_put(struct snd_kcontrol *kcontrol,
static int hsw_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_platform *platform = snd_soc_kcontrol_platform(kcontrol);
- struct hsw_priv_data *pdata = snd_soc_platform_get_drvdata(platform);
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(cmpnt);
struct sst_hsw *hsw = pdata->hsw;
unsigned int volume = 0;
@@ -778,20 +776,11 @@ static const struct snd_soc_dapm_route graph[] = {
static int hsw_pcm_probe(struct snd_soc_platform *platform)
{
+ struct hsw_priv_data *priv_data = snd_soc_platform_get_drvdata(platform);
struct sst_pdata *pdata = dev_get_platdata(platform->dev);
- struct hsw_priv_data *priv_data;
- struct device *dma_dev;
+ struct device *dma_dev = pdata->dma_dev;
int i, ret = 0;
- if (!pdata)
- return -ENODEV;
-
- dma_dev = pdata->dma_dev;
-
- priv_data = devm_kzalloc(platform->dev, sizeof(*priv_data), GFP_KERNEL);
- priv_data->hsw = pdata->dsp;
- snd_soc_platform_set_drvdata(platform, priv_data);
-
/* allocate DSP buffer page tables */
for (i = 0; i < ARRAY_SIZE(hsw_dais); i++) {
@@ -848,27 +837,38 @@ static struct snd_soc_platform_driver hsw_soc_platform = {
.ops = &hsw_pcm_ops,
.pcm_new = hsw_pcm_new,
.pcm_free = hsw_pcm_free,
- .controls = hsw_volume_controls,
- .num_controls = ARRAY_SIZE(hsw_volume_controls),
- .dapm_widgets = widgets,
- .num_dapm_widgets = ARRAY_SIZE(widgets),
- .dapm_routes = graph,
- .num_dapm_routes = ARRAY_SIZE(graph),
};
static const struct snd_soc_component_driver hsw_dai_component = {
- .name = "haswell-dai",
+ .name = "haswell-dai",
+ .controls = hsw_volume_controls,
+ .num_controls = ARRAY_SIZE(hsw_volume_controls),
+ .dapm_widgets = widgets,
+ .num_dapm_widgets = ARRAY_SIZE(widgets),
+ .dapm_routes = graph,
+ .num_dapm_routes = ARRAY_SIZE(graph),
};
static int hsw_pcm_dev_probe(struct platform_device *pdev)
{
struct sst_pdata *sst_pdata = dev_get_platdata(&pdev->dev);
+ struct hsw_priv_data *priv_data;
int ret;
+ if (!sst_pdata)
+ return -EINVAL;
+
+ priv_data = devm_kzalloc(&pdev->dev, sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data)
+ return -ENOMEM;
+
ret = sst_hsw_dsp_init(&pdev->dev, sst_pdata);
if (ret < 0)
return -ENODEV;
+ priv_data->hsw = sst_pdata->dsp;
+ platform_set_drvdata(pdev, priv_data);
+
ret = snd_soc_register_platform(&pdev->dev, &hsw_soc_platform);
if (ret < 0)
goto err_plat;
diff --git a/sound/soc/intel/sst-mfld-platform-compress.c b/sound/soc/intel/sst-mfld-platform-compress.c
index 29c059ca19e8..59467775c9b8 100644
--- a/sound/soc/intel/sst-mfld-platform-compress.c
+++ b/sound/soc/intel/sst-mfld-platform-compress.c
@@ -86,7 +86,7 @@ static int sst_platform_compr_free(struct snd_compr_stream *cstream)
/*need to check*/
str_id = stream->id;
if (str_id)
- ret_val = stream->compr_ops->close(str_id);
+ ret_val = stream->compr_ops->close(sst->dev, str_id);
module_put(sst->dev->driver->owner);
kfree(stream);
pr_debug("%s: %d\n", __func__, ret_val);
@@ -158,7 +158,7 @@ static int sst_platform_compr_set_params(struct snd_compr_stream *cstream,
cb.drain_cb_param = cstream;
cb.drain_notify = sst_drain_notify;
- retval = stream->compr_ops->open(&str_params, &cb);
+ retval = stream->compr_ops->open(sst->dev, &str_params, &cb);
if (retval < 0) {
pr_err("stream allocation failed %d\n", retval);
return retval;
@@ -170,10 +170,30 @@ static int sst_platform_compr_set_params(struct snd_compr_stream *cstream,
static int sst_platform_compr_trigger(struct snd_compr_stream *cstream, int cmd)
{
- struct sst_runtime_stream *stream =
- cstream->runtime->private_data;
-
- return stream->compr_ops->control(cmd, stream->id);
+ struct sst_runtime_stream *stream = cstream->runtime->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ if (stream->compr_ops->stream_start)
+ return stream->compr_ops->stream_start(sst->dev, stream->id);
+ case SNDRV_PCM_TRIGGER_STOP:
+ if (stream->compr_ops->stream_drop)
+ return stream->compr_ops->stream_drop(sst->dev, stream->id);
+ case SND_COMPR_TRIGGER_DRAIN:
+ if (stream->compr_ops->stream_drain)
+ return stream->compr_ops->stream_drain(sst->dev, stream->id);
+ case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
+ if (stream->compr_ops->stream_partial_drain)
+ return stream->compr_ops->stream_partial_drain(sst->dev, stream->id);
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (stream->compr_ops->stream_pause)
+ return stream->compr_ops->stream_pause(sst->dev, stream->id);
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (stream->compr_ops->stream_pause_release)
+ return stream->compr_ops->stream_pause_release(sst->dev, stream->id);
+ default:
+ return -EINVAL;
+ }
}
static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
@@ -182,7 +202,7 @@ static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
struct sst_runtime_stream *stream;
stream = cstream->runtime->private_data;
- stream->compr_ops->tstamp(stream->id, tstamp);
+ stream->compr_ops->tstamp(sst->dev, stream->id, tstamp);
tstamp->byte_offset = tstamp->copied_total %
(u32)cstream->runtime->buffer_size;
pr_debug("calc bytes offset/copied bytes as %d\n", tstamp->byte_offset);
@@ -195,7 +215,7 @@ static int sst_platform_compr_ack(struct snd_compr_stream *cstream,
struct sst_runtime_stream *stream;
stream = cstream->runtime->private_data;
- stream->compr_ops->ack(stream->id, (unsigned long)bytes);
+ stream->compr_ops->ack(sst->dev, stream->id, (unsigned long)bytes);
stream->bytes_written += bytes;
return 0;
@@ -225,7 +245,7 @@ static int sst_platform_compr_set_metadata(struct snd_compr_stream *cstream,
struct sst_runtime_stream *stream =
cstream->runtime->private_data;
- return stream->compr_ops->set_metadata(stream->id, metadata);
+ return stream->compr_ops->set_metadata(sst->dev, stream->id, metadata);
}
struct snd_compr_ops sst_platform_compr_ops = {
diff --git a/sound/soc/intel/sst-mfld-platform-pcm.c b/sound/soc/intel/sst-mfld-platform-pcm.c
index 706212a6a68c..aa9b600dfc9b 100644
--- a/sound/soc/intel/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/sst-mfld-platform-pcm.c
@@ -43,12 +43,12 @@ int sst_register_dsp(struct sst_device *dev)
return -ENODEV;
mutex_lock(&sst_lock);
if (sst) {
- pr_err("we already have a device %s\n", sst->name);
+ dev_err(dev->dev, "we already have a device %s\n", sst->name);
module_put(dev->dev->driver->owner);
mutex_unlock(&sst_lock);
return -EEXIST;
}
- pr_debug("registering device %s\n", dev->name);
+ dev_dbg(dev->dev, "registering device %s\n", dev->name);
sst = dev;
mutex_unlock(&sst_lock);
return 0;
@@ -70,7 +70,7 @@ int sst_unregister_dsp(struct sst_device *dev)
}
module_put(sst->dev->driver->owner);
- pr_debug("unreg %s\n", sst->name);
+ dev_dbg(dev->dev, "unreg %s\n", sst->name);
sst = NULL;
mutex_unlock(&sst_lock);
return 0;
@@ -252,7 +252,7 @@ int sst_fill_stream_params(void *substream,
}
static int sst_platform_alloc_stream(struct snd_pcm_substream *substream,
- struct snd_soc_platform *platform)
+ struct snd_soc_dai *dai)
{
struct sst_runtime_stream *stream =
substream->runtime->private_data;
@@ -260,7 +260,7 @@ static int sst_platform_alloc_stream(struct snd_pcm_substream *substream,
struct snd_sst_params str_params = {0};
struct snd_sst_alloc_params_ext alloc_params = {0};
int ret_val = 0;
- struct sst_data *ctx = snd_soc_platform_get_drvdata(platform);
+ struct sst_data *ctx = snd_soc_dai_get_drvdata(dai);
/* set codec params and inform SST driver the same */
sst_fill_pcm_params(substream, &param);
@@ -277,7 +277,7 @@ static int sst_platform_alloc_stream(struct snd_pcm_substream *substream,
stream->stream_info.str_id = str_params.stream_id;
- ret_val = stream->ops->open(&str_params);
+ ret_val = stream->ops->open(sst->dev, &str_params);
if (ret_val <= 0)
return ret_val;
@@ -306,22 +306,31 @@ static int sst_platform_init_stream(struct snd_pcm_substream *substream)
{
struct sst_runtime_stream *stream =
substream->runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
int ret_val;
- pr_debug("setting buffer ptr param\n");
+ dev_dbg(rtd->dev, "setting buffer ptr param\n");
sst_set_stream_status(stream, SST_PLATFORM_INIT);
stream->stream_info.period_elapsed = sst_period_elapsed;
stream->stream_info.arg = substream;
stream->stream_info.buffer_ptr = 0;
stream->stream_info.sfreq = substream->runtime->rate;
- ret_val = stream->ops->device_control(
- SST_SND_STREAM_INIT, &stream->stream_info);
+ ret_val = stream->ops->stream_init(sst->dev, &stream->stream_info);
if (ret_val)
- pr_err("control_set ret error %d\n", ret_val);
+ dev_err(rtd->dev, "control_set ret error %d\n", ret_val);
return ret_val;
}
-/* end -- helper functions */
+
+static int power_up_sst(struct sst_runtime_stream *stream)
+{
+ return stream->ops->power(sst->dev, true);
+}
+
+static void power_down_sst(struct sst_runtime_stream *stream)
+{
+ stream->ops->power(sst->dev, false);
+}
static int sst_media_open(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
@@ -339,7 +348,7 @@ static int sst_media_open(struct snd_pcm_substream *substream,
mutex_lock(&sst_lock);
if (!sst ||
!try_module_get(sst->dev->driver->owner)) {
- pr_err("no device available to run\n");
+ dev_err(dai->dev, "no device available to run\n");
ret_val = -ENODEV;
goto out_ops;
}
@@ -352,6 +361,10 @@ static int sst_media_open(struct snd_pcm_substream *substream,
/* allocate memory for SST API set */
runtime->private_data = stream;
+ ret_val = power_up_sst(stream);
+ if (ret_val < 0)
+ return ret_val;
+
/* Make sure, that the period size is always even */
snd_pcm_hw_constraint_step(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_PERIODS, 2);
@@ -371,26 +384,29 @@ static void sst_media_close(struct snd_pcm_substream *substream,
int ret_val = 0, str_id;
stream = substream->runtime->private_data;
+ power_down_sst(stream);
+
str_id = stream->stream_info.str_id;
if (str_id)
- ret_val = stream->ops->close(str_id);
+ ret_val = stream->ops->close(sst->dev, str_id);
module_put(sst->dev->driver->owner);
kfree(stream);
}
-static inline unsigned int get_current_pipe_id(struct snd_soc_platform *platform,
+static inline unsigned int get_current_pipe_id(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream)
{
- struct sst_data *sst = snd_soc_platform_get_drvdata(platform);
+ struct sst_data *sst = snd_soc_dai_get_drvdata(dai);
struct sst_dev_stream_map *map = sst->pdata->pdev_strm_map;
struct sst_runtime_stream *stream =
substream->runtime->private_data;
u32 str_id = stream->stream_info.str_id;
unsigned int pipe_id;
+
pipe_id = map[str_id].device_id;
- pr_debug("%s: got pipe_id = %#x for str_id = %d\n",
- __func__, pipe_id, str_id);
+ dev_dbg(dai->dev, "got pipe_id = %#x for str_id = %d\n",
+ pipe_id, str_id);
return pipe_id;
}
@@ -403,12 +419,11 @@ static int sst_media_prepare(struct snd_pcm_substream *substream,
stream = substream->runtime->private_data;
str_id = stream->stream_info.str_id;
if (stream->stream_info.str_id) {
- ret_val = stream->ops->device_control(
- SST_SND_DROP, &str_id);
+ ret_val = stream->ops->stream_drop(sst->dev, str_id);
return ret_val;
}
- ret_val = sst_platform_alloc_stream(substream, dai->platform);
+ ret_val = sst_platform_alloc_stream(substream, dai);
if (ret_val <= 0)
return ret_val;
snprintf(substream->pcm->id, sizeof(substream->pcm->id),
@@ -461,37 +476,40 @@ static int sst_platform_pcm_trigger(struct snd_pcm_substream *substream,
{
int ret_val = 0, str_id;
struct sst_runtime_stream *stream;
- int str_cmd, status;
+ int status;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
- pr_debug("sst_platform_pcm_trigger called\n");
+ dev_dbg(rtd->dev, "sst_platform_pcm_trigger called\n");
+ if (substream->pcm->internal)
+ return 0;
stream = substream->runtime->private_data;
str_id = stream->stream_info.str_id;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- pr_debug("sst: Trigger Start\n");
- str_cmd = SST_SND_START;
+ dev_dbg(rtd->dev, "sst: Trigger Start\n");
status = SST_PLATFORM_RUNNING;
stream->stream_info.arg = substream;
+ ret_val = stream->ops->stream_start(sst->dev, str_id);
break;
case SNDRV_PCM_TRIGGER_STOP:
- pr_debug("sst: in stop\n");
- str_cmd = SST_SND_DROP;
+ dev_dbg(rtd->dev, "sst: in stop\n");
status = SST_PLATFORM_DROPPED;
+ ret_val = stream->ops->stream_drop(sst->dev, str_id);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- pr_debug("sst: in pause\n");
- str_cmd = SST_SND_PAUSE;
+ dev_dbg(rtd->dev, "sst: in pause\n");
status = SST_PLATFORM_PAUSED;
+ ret_val = stream->ops->stream_pause(sst->dev, str_id);
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- pr_debug("sst: in pause release\n");
- str_cmd = SST_SND_RESUME;
+ dev_dbg(rtd->dev, "sst: in pause release\n");
status = SST_PLATFORM_RUNNING;
+ ret_val = stream->ops->stream_pause_release(sst->dev, str_id);
break;
default:
return -EINVAL;
}
- ret_val = stream->ops->device_control(str_cmd, &str_id);
+
if (!ret_val)
sst_set_stream_status(stream, status);
@@ -505,16 +523,16 @@ static snd_pcm_uframes_t sst_platform_pcm_pointer
struct sst_runtime_stream *stream;
int ret_val, status;
struct pcm_stream_info *str_info;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
stream = substream->runtime->private_data;
status = sst_get_stream_status(stream);
if (status == SST_PLATFORM_INIT)
return 0;
str_info = &stream->stream_info;
- ret_val = stream->ops->device_control(
- SST_SND_BUFFER_POINTER, str_info);
+ ret_val = stream->ops->stream_read_tstamp(sst->dev, str_info);
if (ret_val) {
- pr_err("sst: error code = %d\n", ret_val);
+ dev_err(rtd->dev, "sst: error code = %d\n", ret_val);
return ret_val;
}
substream->runtime->delay = str_info->pcm_delay;
@@ -530,7 +548,7 @@ static struct snd_pcm_ops sst_platform_ops = {
static void sst_pcm_free(struct snd_pcm *pcm)
{
- pr_debug("sst_pcm_free called\n");
+ dev_dbg(pcm->dev, "sst_pcm_free called\n");
snd_pcm_lib_preallocate_free_for_all(pcm);
}
@@ -547,14 +565,20 @@ static int sst_pcm_new(struct snd_soc_pcm_runtime *rtd)
snd_dma_continuous_data(GFP_DMA),
SST_MIN_BUFFER, SST_MAX_BUFFER);
if (retval) {
- pr_err("dma buffer allocationf fail\n");
+ dev_err(rtd->dev, "dma buffer allocationf fail\n");
return retval;
}
}
return retval;
}
-static struct snd_soc_platform_driver sst_soc_platform_drv = {
+static int sst_soc_probe(struct snd_soc_platform *platform)
+{
+ return sst_dsp_init_v2_dpcm(platform);
+}
+
+static struct snd_soc_platform_driver sst_soc_platform_drv = {
+ .probe = sst_soc_probe,
.ops = &sst_platform_ops,
.compr_ops = &sst_platform_compr_ops,
.pcm_new = sst_pcm_new,
@@ -574,13 +598,11 @@ static int sst_platform_probe(struct platform_device *pdev)
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (drv == NULL) {
- pr_err("kzalloc failed\n");
return -ENOMEM;
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (pdata == NULL) {
- pr_err("kzalloc failed for pdata\n");
return -ENOMEM;
}
@@ -592,14 +614,14 @@ static int sst_platform_probe(struct platform_device *pdev)
ret = snd_soc_register_platform(&pdev->dev, &sst_soc_platform_drv);
if (ret) {
- pr_err("registering soc platform failed\n");
+ dev_err(&pdev->dev, "registering soc platform failed\n");
return ret;
}
ret = snd_soc_register_component(&pdev->dev, &sst_component,
sst_platform_dai, ARRAY_SIZE(sst_platform_dai));
if (ret) {
- pr_err("registering cpu dais failed\n");
+ dev_err(&pdev->dev, "registering cpu dais failed\n");
snd_soc_unregister_platform(&pdev->dev);
}
return ret;
@@ -610,7 +632,7 @@ static int sst_platform_remove(struct platform_device *pdev)
snd_soc_unregister_component(&pdev->dev);
snd_soc_unregister_platform(&pdev->dev);
- pr_debug("sst_platform_remove success\n");
+ dev_dbg(&pdev->dev, "sst_platform_remove success\n");
return 0;
}
diff --git a/sound/soc/intel/sst-mfld-platform.h b/sound/soc/intel/sst-mfld-platform.h
index 6c6a42c08e24..19f83ec51613 100644
--- a/sound/soc/intel/sst-mfld-platform.h
+++ b/sound/soc/intel/sst-mfld-platform.h
@@ -54,20 +54,6 @@ enum sst_drv_status {
SST_PLATFORM_DROPPED,
};
-enum sst_controls {
- SST_SND_ALLOC = 0x00,
- SST_SND_PAUSE = 0x01,
- SST_SND_RESUME = 0x02,
- SST_SND_DROP = 0x03,
- SST_SND_FREE = 0x04,
- SST_SND_BUFFER_POINTER = 0x05,
- SST_SND_STREAM_INIT = 0x06,
- SST_SND_START = 0x07,
- SST_SET_BYTE_STREAM = 0x100A,
- SST_GET_BYTE_STREAM = 0x100B,
- SST_MAX_CONTROLS = SST_GET_BYTE_STREAM,
-};
-
enum sst_stream_ops {
STREAM_OPS_PLAYBACK = 0,
STREAM_OPS_CAPTURE,
@@ -113,24 +99,37 @@ struct sst_compress_cb {
struct compress_sst_ops {
const char *name;
- int (*open) (struct snd_sst_params *str_params,
- struct sst_compress_cb *cb);
- int (*control) (unsigned int cmd, unsigned int str_id);
- int (*tstamp) (unsigned int str_id, struct snd_compr_tstamp *tstamp);
- int (*ack) (unsigned int str_id, unsigned long bytes);
- int (*close) (unsigned int str_id);
- int (*get_caps) (struct snd_compr_caps *caps);
- int (*get_codec_caps) (struct snd_compr_codec_caps *codec);
- int (*set_metadata) (unsigned int str_id,
+ int (*open)(struct device *dev,
+ struct snd_sst_params *str_params, struct sst_compress_cb *cb);
+ int (*stream_start)(struct device *dev, unsigned int str_id);
+ int (*stream_drop)(struct device *dev, unsigned int str_id);
+ int (*stream_drain)(struct device *dev, unsigned int str_id);
+ int (*stream_partial_drain)(struct device *dev, unsigned int str_id);
+ int (*stream_pause)(struct device *dev, unsigned int str_id);
+ int (*stream_pause_release)(struct device *dev, unsigned int str_id);
+
+ int (*tstamp)(struct device *dev, unsigned int str_id,
+ struct snd_compr_tstamp *tstamp);
+ int (*ack)(struct device *dev, unsigned int str_id,
+ unsigned long bytes);
+ int (*close)(struct device *dev, unsigned int str_id);
+ int (*get_caps)(struct snd_compr_caps *caps);
+ int (*get_codec_caps)(struct snd_compr_codec_caps *codec);
+ int (*set_metadata)(struct device *dev, unsigned int str_id,
struct snd_compr_metadata *mdata);
-
};
struct sst_ops {
- int (*open) (struct snd_sst_params *str_param);
- int (*device_control) (int cmd, void *arg);
- int (*set_generic_params)(enum sst_controls cmd, void *arg);
- int (*close) (unsigned int str_id);
+ int (*open)(struct device *dev, struct snd_sst_params *str_param);
+ int (*stream_init)(struct device *dev, struct pcm_stream_info *str_info);
+ int (*stream_start)(struct device *dev, int str_id);
+ int (*stream_drop)(struct device *dev, int str_id);
+ int (*stream_pause)(struct device *dev, int str_id);
+ int (*stream_pause_release)(struct device *dev, int str_id);
+ int (*stream_read_tstamp)(struct device *dev, struct pcm_stream_info *str_info);
+ int (*send_byte_stream)(struct device *dev, struct snd_sst_bytes_v2 *bytes);
+ int (*close)(struct device *dev, unsigned int str_id);
+ int (*power)(struct device *dev, bool state);
};
struct sst_runtime_stream {
@@ -152,6 +151,8 @@ struct sst_device {
};
struct sst_data;
+
+int sst_dsp_init_v2_dpcm(struct snd_soc_platform *platform);
void sst_set_stream_status(struct sst_runtime_stream *stream, int state);
int sst_fill_stream_params(void *substream, const struct sst_data *ctx,
struct snd_sst_params *str_params, bool is_compress);
@@ -166,6 +167,7 @@ struct sst_algo_int_control_v2 {
struct sst_data {
struct platform_device *pdev;
struct sst_platform_data *pdata;
+ struct snd_sst_bytes_v2 *byte_stream;
struct mutex lock;
};
int sst_register_dsp(struct sst_device *sst);
diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c
index 943922c79f78..b10ae8074461 100644
--- a/sound/soc/omap/rx51.c
+++ b/sound/soc/omap/rx51.c
@@ -168,7 +168,7 @@ static int rx51_spk_event(struct snd_soc_dapm_widget *w,
static int rx51_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
- struct snd_soc_codec *codec = w->dapm->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
if (SND_SOC_DAPM_EVENT_ON(event))
tpa6130a2_stereo_enable(codec, 1);
diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig
index c196a466eef6..78fc159559b0 100644
--- a/sound/soc/rockchip/Kconfig
+++ b/sound/soc/rockchip/Kconfig
@@ -2,11 +2,10 @@ config SND_SOC_ROCKCHIP
tristate "ASoC support for Rockchip"
depends on COMPILE_TEST || ARCH_ROCKCHIP
select SND_SOC_GENERIC_DMAENGINE_PCM
- select SND_ROCKCHIP_I2S
help
Say Y or M if you want to add support for codecs attached to
the Rockchip SoCs' Audio interfaces. You will also need to
select the audio interfaces to support below.
-config SND_ROCKCHIP_I2S
+config SND_SOC_ROCKCHIP_I2S
tristate
diff --git a/sound/soc/rockchip/Makefile b/sound/soc/rockchip/Makefile
index 1006418e1394..b9219092b47f 100644
--- a/sound/soc/rockchip/Makefile
+++ b/sound/soc/rockchip/Makefile
@@ -1,4 +1,4 @@
# ROCKCHIP Platform Support
snd-soc-i2s-objs := rockchip_i2s.o
-obj-$(CONFIG_SND_ROCKCHIP_I2S) += snd-soc-i2s.o
+obj-$(CONFIG_SND_SOC_ROCKCHIP_I2S) += snd-soc-i2s.o
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index fb9e05c9f471..f373e37f8305 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -108,8 +108,10 @@ static void rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
while (val) {
regmap_read(i2s->regmap, I2S_CLR, &val);
retry--;
- if (!retry)
+ if (!retry) {
dev_warn(i2s->dev, "fail to clear\n");
+ break;
+ }
}
}
}
@@ -244,16 +246,6 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
regmap_update_bits(i2s->regmap, I2S_TXCR, I2S_TXCR_VDW_MASK, val);
regmap_update_bits(i2s->regmap, I2S_RXCR, I2S_RXCR_VDW_MASK, val);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- dai->playback_dma_data = &i2s->playback_dma_data;
- regmap_update_bits(i2s->regmap, I2S_DMACR, I2S_DMACR_TDL_MASK,
- I2S_DMACR_TDL(1) | I2S_DMACR_TDE_ENABLE);
- } else {
- dai->capture_dma_data = &i2s->capture_dma_data;
- regmap_update_bits(i2s->regmap, I2S_DMACR, I2S_DMACR_RDL_MASK,
- I2S_DMACR_RDL(1) | I2S_DMACR_RDE_ENABLE);
- }
-
return 0;
}
@@ -301,6 +293,16 @@ static int rockchip_i2s_set_sysclk(struct snd_soc_dai *cpu_dai, int clk_id,
return ret;
}
+static int rockchip_i2s_dai_probe(struct snd_soc_dai *dai)
+{
+ struct rk_i2s_dev *i2s = snd_soc_dai_get_drvdata(dai);
+
+ dai->capture_dma_data = &i2s->capture_dma_data;
+ dai->playback_dma_data = &i2s->playback_dma_data;
+
+ return 0;
+}
+
static const struct snd_soc_dai_ops rockchip_i2s_dai_ops = {
.hw_params = rockchip_i2s_hw_params,
.set_sysclk = rockchip_i2s_set_sysclk,
@@ -309,7 +311,9 @@ static const struct snd_soc_dai_ops rockchip_i2s_dai_ops = {
};
static struct snd_soc_dai_driver rockchip_i2s_dai = {
+ .probe = rockchip_i2s_dai_probe,
.playback = {
+ .stream_name = "Playback",
.channels_min = 2,
.channels_max = 8,
.rates = SNDRV_PCM_RATE_8000_192000,
@@ -319,6 +323,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
SNDRV_PCM_FMTBIT_S24_LE),
},
.capture = {
+ .stream_name = "Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
@@ -420,6 +425,11 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Can't retrieve i2s bus clock\n");
return PTR_ERR(i2s->hclk);
}
+ ret = clk_prepare_enable(i2s->hclk);
+ if (ret) {
+ dev_err(i2s->dev, "hclock enable failed %d\n", ret);
+ return ret;
+ }
i2s->mclk = devm_clk_get(&pdev->dev, "i2s_clk");
if (IS_ERR(i2s->mclk)) {
diff --git a/sound/soc/samsung/idma.c b/sound/soc/samsung/idma.c
index db6cefa18017..0e8dd985fcb3 100644
--- a/sound/soc/samsung/idma.c
+++ b/sound/soc/samsung/idma.c
@@ -351,7 +351,7 @@ static void idma_free(struct snd_pcm *pcm)
if (!buf->area)
return;
- iounmap(buf->area);
+ iounmap((void __iomem *)buf->area);
buf->area = NULL;
buf->addr = 0;
@@ -369,7 +369,7 @@ static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
buf->dev.type = SNDRV_DMA_TYPE_CONTINUOUS;
buf->addr = idma.lp_tx_addr;
buf->bytes = idma_hardware.buffer_bytes_max;
- buf->area = (unsigned char *)ioremap(buf->addr, buf->bytes);
+ buf->area = (unsigned char * __force)ioremap(buf->addr, buf->bytes);
return 0;
}
diff --git a/sound/soc/samsung/odroidx2_max98090.c b/sound/soc/samsung/odroidx2_max98090.c
index 278edf9e2a87..3c8f60423e82 100644
--- a/sound/soc/samsung/odroidx2_max98090.c
+++ b/sound/soc/samsung/odroidx2_max98090.c
@@ -66,12 +66,12 @@ static struct snd_soc_card odroidx2 = {
.late_probe = odroidx2_late_probe,
};
-struct odroidx2_drv_data odroidx2_drvdata = {
+static const struct odroidx2_drv_data odroidx2_drvdata = {
.dapm_widgets = odroidx2_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(odroidx2_dapm_widgets),
};
-struct odroidx2_drv_data odroidu3_drvdata = {
+static const struct odroidx2_drv_data odroidu3_drvdata = {
.dapm_widgets = odroidu3_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(odroidu3_dapm_widgets),
};
diff --git a/sound/soc/samsung/speyside.c b/sound/soc/samsung/speyside.c
index 9902efcb8ea1..a05482651aae 100644
--- a/sound/soc/samsung/speyside.c
+++ b/sound/soc/samsung/speyside.c
@@ -228,10 +228,12 @@ static struct snd_soc_dai_link speyside_dai[] = {
},
};
-static int speyside_wm9081_init(struct snd_soc_dapm_context *dapm)
+static int speyside_wm9081_init(struct snd_soc_component *component)
{
+ struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
+
/* At any time the WM9081 is active it will have this clock */
- return snd_soc_codec_set_sysclk(dapm->codec, WM9081_SYSCLK_MCLK, 0,
+ return snd_soc_codec_set_sysclk(codec, WM9081_SYSCLK_MCLK, 0,
MCLK_AUDIO_RATE, 0);
}
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index c76344350e44..66fddec9543d 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1297,9 +1297,14 @@ static int fsi_dma_transfer(struct fsi_priv *fsi, struct fsi_stream *io)
struct snd_pcm_substream *substream = io->substream;
struct dma_async_tx_descriptor *desc;
int is_play = fsi_stream_is_play(fsi, io);
- enum dma_data_direction dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ enum dma_transfer_direction dir;
int ret = -EIO;
+ if (is_play)
+ dir = DMA_MEM_TO_DEV;
+ else
+ dir = DMA_DEV_TO_MEM;
+
desc = dmaengine_prep_dma_cyclic(io->chan,
substream->runtime->dma_addr,
snd_pcm_lib_buffer_bytes(substream),
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 19f78963e8b9..1922ec57d10a 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -798,10 +798,8 @@ if (name##_node) { \
mod_parse(src);
mod_parse(dvc);
- if (playback)
- of_node_put(playback);
- if (capture)
- of_node_put(capture);
+ of_node_put(playback);
+ of_node_put(capture);
}
dai_i++;
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index 488f9becb44f..32eb6da2d2bd 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -139,7 +139,7 @@ static int siu_pcm_wr_set(struct siu_port *port_info,
desc->callback = siu_dma_tx_complete;
desc->callback_param = siu_stream;
- cookie = desc->tx_submit(desc);
+ cookie = dmaengine_submit(desc);
if (cookie < 0) {
dev_err(dev, "Failed to submit a dma transfer\n");
return cookie;
@@ -189,7 +189,7 @@ static int siu_pcm_rd_set(struct siu_port *port_info,
desc->callback = siu_dma_tx_complete;
desc->callback_param = siu_stream;
- cookie = desc->tx_submit(desc);
+ cookie = dmaengine_submit(desc);
if (cookie < 0) {
dev_err(dev, "Failed to submit dma descriptor\n");
return cookie;
diff --git a/sound/soc/sirf/sirf-usp.c b/sound/soc/sirf/sirf-usp.c
index 3a730374e259..186dc7f33a55 100644
--- a/sound/soc/sirf/sirf-usp.c
+++ b/sound/soc/sirf/sirf-usp.c
@@ -100,6 +100,16 @@ static int sirf_usp_pcm_set_dai_fmt(struct snd_soc_dai *dai,
return -EINVAL;
}
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ usp->daifmt_format |= (fmt & SND_SOC_DAIFMT_INV_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
return 0;
}
@@ -177,7 +187,7 @@ static int sirf_usp_pcm_hw_params(struct snd_pcm_substream *substream,
shifter_len = data_len;
- switch (usp->daifmt_format) {
+ switch (usp->daifmt_format & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
regmap_update_bits(usp->regmap, USP_RX_FRAME_CTRL,
USP_I2S_SYNC_CHG, USP_I2S_SYNC_CHG);
@@ -193,6 +203,18 @@ static int sirf_usp_pcm_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
+ switch (usp->daifmt_format & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ regmap_update_bits(usp->regmap, USP_MODE1,
+ USP_RXD_ACT_EDGE_FALLING | USP_TXD_ACT_EDGE_FALLING,
+ USP_RXD_ACT_EDGE_FALLING);
+ break;
+ default:
+ return -EINVAL;
+ }
+
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
regmap_update_bits(usp->regmap, USP_TX_FRAME_CTRL,
USP_TXC_DATA_LEN_MASK | USP_TXC_FRAME_LEN_MASK
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index d074aa91b023..4c8f8a23a0e9 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -270,79 +270,54 @@ static const struct file_operations codec_reg_fops = {
.llseek = default_llseek,
};
-static struct dentry *soc_debugfs_create_dir(struct dentry *parent,
- const char *fmt, ...)
+static void soc_init_component_debugfs(struct snd_soc_component *component)
{
- struct dentry *de;
- va_list ap;
- char *s;
+ if (component->debugfs_prefix) {
+ char *name;
- va_start(ap, fmt);
- s = kvasprintf(GFP_KERNEL, fmt, ap);
- va_end(ap);
+ name = kasprintf(GFP_KERNEL, "%s:%s",
+ component->debugfs_prefix, component->name);
+ if (name) {
+ component->debugfs_root = debugfs_create_dir(name,
+ component->card->debugfs_card_root);
+ kfree(name);
+ }
+ } else {
+ component->debugfs_root = debugfs_create_dir(component->name,
+ component->card->debugfs_card_root);
+ }
- if (!s)
- return NULL;
+ if (!component->debugfs_root) {
+ dev_warn(component->dev,
+ "ASoC: Failed to create component debugfs directory\n");
+ return;
+ }
- de = debugfs_create_dir(s, parent);
- kfree(s);
+ snd_soc_dapm_debugfs_init(snd_soc_component_get_dapm(component),
+ component->debugfs_root);
- return de;
+ if (component->init_debugfs)
+ component->init_debugfs(component);
}
-static void soc_init_codec_debugfs(struct snd_soc_codec *codec)
+static void soc_cleanup_component_debugfs(struct snd_soc_component *component)
{
- struct dentry *debugfs_card_root = codec->component.card->debugfs_card_root;
+ debugfs_remove_recursive(component->debugfs_root);
+}
- codec->debugfs_codec_root = soc_debugfs_create_dir(debugfs_card_root,
- "codec:%s",
- codec->component.name);
- if (!codec->debugfs_codec_root) {
- dev_warn(codec->dev,
- "ASoC: Failed to create codec debugfs directory\n");
- return;
- }
+static void soc_init_codec_debugfs(struct snd_soc_component *component)
+{
+ struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
- debugfs_create_bool("cache_sync", 0444, codec->debugfs_codec_root,
+ debugfs_create_bool("cache_sync", 0444, codec->component.debugfs_root,
&codec->cache_sync);
- debugfs_create_bool("cache_only", 0444, codec->debugfs_codec_root,
- &codec->cache_only);
codec->debugfs_reg = debugfs_create_file("codec_reg", 0644,
- codec->debugfs_codec_root,
+ codec->component.debugfs_root,
codec, &codec_reg_fops);
if (!codec->debugfs_reg)
dev_warn(codec->dev,
"ASoC: Failed to create codec register debugfs file\n");
-
- snd_soc_dapm_debugfs_init(&codec->dapm, codec->debugfs_codec_root);
-}
-
-static void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec)
-{
- debugfs_remove_recursive(codec->debugfs_codec_root);
-}
-
-static void soc_init_platform_debugfs(struct snd_soc_platform *platform)
-{
- struct dentry *debugfs_card_root = platform->component.card->debugfs_card_root;
-
- platform->debugfs_platform_root = soc_debugfs_create_dir(debugfs_card_root,
- "platform:%s",
- platform->component.name);
- if (!platform->debugfs_platform_root) {
- dev_warn(platform->dev,
- "ASoC: Failed to create platform debugfs directory\n");
- return;
- }
-
- snd_soc_dapm_debugfs_init(&platform->component.dapm,
- platform->debugfs_platform_root);
-}
-
-static void soc_cleanup_platform_debugfs(struct snd_soc_platform *platform)
-{
- debugfs_remove_recursive(platform->debugfs_platform_root);
}
static ssize_t codec_list_read_file(struct file *file, char __user *user_buf,
@@ -474,19 +449,15 @@ static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
#else
-static inline void soc_init_codec_debugfs(struct snd_soc_codec *codec)
-{
-}
+#define soc_init_codec_debugfs NULL
-static inline void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec)
+static inline void soc_init_component_debugfs(
+ struct snd_soc_component *component)
{
}
-static inline void soc_init_platform_debugfs(struct snd_soc_platform *platform)
-{
-}
-
-static inline void soc_cleanup_platform_debugfs(struct snd_soc_platform *platform)
+static inline void soc_cleanup_component_debugfs(
+ struct snd_soc_component *component)
{
}
@@ -579,10 +550,8 @@ int snd_soc_suspend(struct device *dev)
struct snd_soc_codec *codec;
int i, j;
- /* If the initialization of this soc device failed, there is no codec
- * associated with it. Just bail out in this case.
- */
- if (list_empty(&card->codec_dev_list))
+ /* If the card is not initialized yet there is nothing to do */
+ if (!card->instantiated)
return 0;
/* Due to the resume being scheduled into a workqueue we could
@@ -668,7 +637,7 @@ int snd_soc_suspend(struct device *dev)
list_for_each_entry(codec, &card->codec_dev_list, card_list) {
/* If there are paths active then the CODEC will be held with
* bias _ON and should not be suspended. */
- if (!codec->suspended && codec->driver->suspend) {
+ if (!codec->suspended) {
switch (codec->dapm.bias_level) {
case SND_SOC_BIAS_STANDBY:
/*
@@ -682,8 +651,10 @@ int snd_soc_suspend(struct device *dev)
"ASoC: idle_bias_off CODEC on over suspend\n");
break;
}
+
case SND_SOC_BIAS_OFF:
- codec->driver->suspend(codec);
+ if (codec->driver->suspend)
+ codec->driver->suspend(codec);
codec->suspended = 1;
codec->cache_sync = 1;
if (codec->component.regmap)
@@ -757,11 +728,12 @@ static void soc_resume_deferred(struct work_struct *work)
* left with bias OFF or STANDBY and suspended so we must now
* resume. Otherwise the suspend was suppressed.
*/
- if (codec->driver->resume && codec->suspended) {
+ if (codec->suspended) {
switch (codec->dapm.bias_level) {
case SND_SOC_BIAS_STANDBY:
case SND_SOC_BIAS_OFF:
- codec->driver->resume(codec);
+ if (codec->driver->resume)
+ codec->driver->resume(codec);
codec->suspended = 0;
break;
default:
@@ -835,10 +807,8 @@ int snd_soc_resume(struct device *dev)
struct snd_soc_card *card = dev_get_drvdata(dev);
int i, ac97_control = 0;
- /* If the initialization of this soc device failed, there is no codec
- * associated with it. Just bail out in this case.
- */
- if (list_empty(&card->codec_dev_list))
+ /* If the card is not initialized yet there is nothing to do */
+ if (!card->instantiated)
return 0;
/* activate pins from sleep state */
@@ -887,35 +857,40 @@ EXPORT_SYMBOL_GPL(snd_soc_resume);
static const struct snd_soc_dai_ops null_dai_ops = {
};
-static struct snd_soc_codec *soc_find_codec(
- const struct device_node *codec_of_node,
- const char *codec_name)
+static struct snd_soc_component *soc_find_component(
+ const struct device_node *of_node, const char *name)
{
- struct snd_soc_codec *codec;
+ struct snd_soc_component *component;
- list_for_each_entry(codec, &codec_list, list) {
- if (codec_of_node) {
- if (codec->dev->of_node != codec_of_node)
- continue;
- } else {
- if (strcmp(codec->component.name, codec_name))
- continue;
+ list_for_each_entry(component, &component_list, list) {
+ if (of_node) {
+ if (component->dev->of_node == of_node)
+ return component;
+ } else if (strcmp(component->name, name) == 0) {
+ return component;
}
-
- return codec;
}
return NULL;
}
-static struct snd_soc_dai *soc_find_codec_dai(struct snd_soc_codec *codec,
- const char *codec_dai_name)
+static struct snd_soc_dai *snd_soc_find_dai(
+ const struct snd_soc_dai_link_component *dlc)
{
- struct snd_soc_dai *codec_dai;
+ struct snd_soc_component *component;
+ struct snd_soc_dai *dai;
- list_for_each_entry(codec_dai, &codec->component.dai_list, list) {
- if (!strcmp(codec_dai->name, codec_dai_name)) {
- return codec_dai;
+ /* Find CPU DAI from registered DAIs*/
+ list_for_each_entry(component, &component_list, list) {
+ if (dlc->of_node && component->dev->of_node != dlc->of_node)
+ continue;
+ if (dlc->name && strcmp(dev_name(component->dev), dlc->name))
+ continue;
+ list_for_each_entry(dai, &component->dai_list, list) {
+ if (dlc->dai_name && strcmp(dai->name, dlc->dai_name))
+ continue;
+
+ return dai;
}
}
@@ -926,33 +901,19 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
{
struct snd_soc_dai_link *dai_link = &card->dai_link[num];
struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
- struct snd_soc_component *component;
struct snd_soc_dai_link_component *codecs = dai_link->codecs;
+ struct snd_soc_dai_link_component cpu_dai_component;
struct snd_soc_dai **codec_dais = rtd->codec_dais;
struct snd_soc_platform *platform;
- struct snd_soc_dai *cpu_dai;
const char *platform_name;
int i;
dev_dbg(card->dev, "ASoC: binding %s at idx %d\n", dai_link->name, num);
- /* Find CPU DAI from registered DAIs*/
- list_for_each_entry(component, &component_list, list) {
- if (dai_link->cpu_of_node &&
- component->dev->of_node != dai_link->cpu_of_node)
- continue;
- if (dai_link->cpu_name &&
- strcmp(dev_name(component->dev), dai_link->cpu_name))
- continue;
- list_for_each_entry(cpu_dai, &component->dai_list, list) {
- if (dai_link->cpu_dai_name &&
- strcmp(cpu_dai->name, dai_link->cpu_dai_name))
- continue;
-
- rtd->cpu_dai = cpu_dai;
- }
- }
-
+ cpu_dai_component.name = dai_link->cpu_name;
+ cpu_dai_component.of_node = dai_link->cpu_of_node;
+ cpu_dai_component.dai_name = dai_link->cpu_dai_name;
+ rtd->cpu_dai = snd_soc_find_dai(&cpu_dai_component);
if (!rtd->cpu_dai) {
dev_err(card->dev, "ASoC: CPU DAI %s not registered\n",
dai_link->cpu_dai_name);
@@ -963,15 +924,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
/* Find CODEC from registered CODECs */
for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_codec *codec;
- codec = soc_find_codec(codecs[i].of_node, codecs[i].name);
- if (!codec) {
- dev_err(card->dev, "ASoC: CODEC %s not registered\n",
- codecs[i].name);
- return -EPROBE_DEFER;
- }
-
- codec_dais[i] = soc_find_codec_dai(codec, codecs[i].dai_name);
+ codec_dais[i] = snd_soc_find_dai(&codecs[i]);
if (!codec_dais[i]) {
dev_err(card->dev, "ASoC: CODEC DAI %s not registered\n",
codecs[i].dai_name);
@@ -1012,68 +965,46 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
return 0;
}
-static int soc_remove_platform(struct snd_soc_platform *platform)
+static void soc_remove_component(struct snd_soc_component *component)
{
- int ret;
-
- if (platform->driver->remove) {
- ret = platform->driver->remove(platform);
- if (ret < 0)
- dev_err(platform->dev, "ASoC: failed to remove %d\n",
- ret);
- }
-
- /* Make sure all DAPM widgets are freed */
- snd_soc_dapm_free(&platform->component.dapm);
-
- soc_cleanup_platform_debugfs(platform);
- platform->probed = 0;
- module_put(platform->dev->driver->owner);
-
- return 0;
-}
+ if (!component->probed)
+ return;
-static void soc_remove_codec(struct snd_soc_codec *codec)
-{
- int err;
+ /* This is a HACK and will be removed soon */
+ if (component->codec)
+ list_del(&component->codec->card_list);
- if (codec->driver->remove) {
- err = codec->driver->remove(codec);
- if (err < 0)
- dev_err(codec->dev, "ASoC: failed to remove %d\n", err);
- }
+ if (component->remove)
+ component->remove(component);
- /* Make sure all DAPM widgets are freed */
- snd_soc_dapm_free(&codec->dapm);
+ snd_soc_dapm_free(snd_soc_component_get_dapm(component));
- soc_cleanup_codec_debugfs(codec);
- codec->probed = 0;
- list_del(&codec->card_list);
- module_put(codec->dev->driver->owner);
+ soc_cleanup_component_debugfs(component);
+ component->probed = 0;
+ module_put(component->dev->driver->owner);
}
-static void soc_remove_codec_dai(struct snd_soc_dai *codec_dai, int order)
+static void soc_remove_dai(struct snd_soc_dai *dai, int order)
{
int err;
- if (codec_dai && codec_dai->probed &&
- codec_dai->driver->remove_order == order) {
- if (codec_dai->driver->remove) {
- err = codec_dai->driver->remove(codec_dai);
+ if (dai && dai->probed &&
+ dai->driver->remove_order == order) {
+ if (dai->driver->remove) {
+ err = dai->driver->remove(dai);
if (err < 0)
- dev_err(codec_dai->dev,
+ dev_err(dai->dev,
"ASoC: failed to remove %s: %d\n",
- codec_dai->name, err);
+ dai->name, err);
}
- codec_dai->probed = 0;
+ dai->probed = 0;
}
}
static void soc_remove_link_dais(struct snd_soc_card *card, int num, int order)
{
struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int i, err;
+ int i;
/* unregister the rtd device */
if (rtd->dev_registered) {
@@ -1085,22 +1016,9 @@ static void soc_remove_link_dais(struct snd_soc_card *card, int num, int order)
/* remove the CODEC DAI */
for (i = 0; i < rtd->num_codecs; i++)
- soc_remove_codec_dai(rtd->codec_dais[i], order);
+ soc_remove_dai(rtd->codec_dais[i], order);
- /* remove the cpu_dai */
- if (cpu_dai && cpu_dai->probed &&
- cpu_dai->driver->remove_order == order) {
- if (cpu_dai->driver->remove) {
- err = cpu_dai->driver->remove(cpu_dai);
- if (err < 0)
- dev_err(cpu_dai->dev,
- "ASoC: failed to remove %s: %d\n",
- cpu_dai->name, err);
- }
- cpu_dai->probed = 0;
- if (!cpu_dai->codec)
- module_put(cpu_dai->dev->driver->owner);
- }
+ soc_remove_dai(rtd->cpu_dai, order);
}
static void soc_remove_link_components(struct snd_soc_card *card, int num,
@@ -1109,29 +1027,24 @@ static void soc_remove_link_components(struct snd_soc_card *card, int num,
struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_platform *platform = rtd->platform;
- struct snd_soc_codec *codec;
+ struct snd_soc_component *component;
int i;
/* remove the platform */
- if (platform && platform->probed &&
- platform->driver->remove_order == order) {
- soc_remove_platform(platform);
- }
+ if (platform && platform->component.driver->remove_order == order)
+ soc_remove_component(&platform->component);
/* remove the CODEC-side CODEC */
for (i = 0; i < rtd->num_codecs; i++) {
- codec = rtd->codec_dais[i]->codec;
- if (codec && codec->probed &&
- codec->driver->remove_order == order)
- soc_remove_codec(codec);
+ component = rtd->codec_dais[i]->component;
+ if (component->driver->remove_order == order)
+ soc_remove_component(component);
}
/* remove any CPU-side CODEC */
if (cpu_dai) {
- codec = cpu_dai->codec;
- if (codec && codec->probed &&
- codec->driver->remove_order == order)
- soc_remove_codec(codec);
+ if (cpu_dai->component->driver->remove_order == order)
+ soc_remove_component(cpu_dai->component);
}
}
@@ -1173,137 +1086,78 @@ static void soc_set_name_prefix(struct snd_soc_card *card,
}
}
-static int soc_probe_codec(struct snd_soc_card *card,
- struct snd_soc_codec *codec)
+static int soc_probe_component(struct snd_soc_card *card,
+ struct snd_soc_component *component)
{
- int ret = 0;
- const struct snd_soc_codec_driver *driver = codec->driver;
+ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
struct snd_soc_dai *dai;
+ int ret;
- codec->component.card = card;
- codec->dapm.card = card;
- soc_set_name_prefix(card, &codec->component);
+ if (component->probed)
+ return 0;
+
+ component->card = card;
+ dapm->card = card;
+ soc_set_name_prefix(card, component);
- if (!try_module_get(codec->dev->driver->owner))
+ if (!try_module_get(component->dev->driver->owner))
return -ENODEV;
- soc_init_codec_debugfs(codec);
+ soc_init_component_debugfs(component);
- if (driver->dapm_widgets) {
- ret = snd_soc_dapm_new_controls(&codec->dapm,
- driver->dapm_widgets,
- driver->num_dapm_widgets);
+ if (component->dapm_widgets) {
+ ret = snd_soc_dapm_new_controls(dapm, component->dapm_widgets,
+ component->num_dapm_widgets);
if (ret != 0) {
- dev_err(codec->dev,
+ dev_err(component->dev,
"Failed to create new controls %d\n", ret);
goto err_probe;
}
}
- /* Create DAPM widgets for each DAI stream */
- list_for_each_entry(dai, &codec->component.dai_list, list) {
- ret = snd_soc_dapm_new_dai_widgets(&codec->dapm, dai);
-
+ list_for_each_entry(dai, &component->dai_list, list) {
+ ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
if (ret != 0) {
- dev_err(codec->dev,
+ dev_err(component->dev,
"Failed to create DAI widgets %d\n", ret);
goto err_probe;
}
}
- codec->dapm.idle_bias_off = driver->idle_bias_off;
-
- if (driver->probe) {
- ret = driver->probe(codec);
+ if (component->probe) {
+ ret = component->probe(component);
if (ret < 0) {
- dev_err(codec->dev,
- "ASoC: failed to probe CODEC %d\n", ret);
+ dev_err(component->dev,
+ "ASoC: failed to probe component %d\n", ret);
goto err_probe;
}
- WARN(codec->dapm.idle_bias_off &&
- codec->dapm.bias_level != SND_SOC_BIAS_OFF,
+
+ WARN(dapm->idle_bias_off &&
+ dapm->bias_level != SND_SOC_BIAS_OFF,
"codec %s can not start from non-off bias with idle_bias_off==1\n",
- codec->component.name);
+ component->name);
}
- if (driver->controls)
- snd_soc_add_codec_controls(codec, driver->controls,
- driver->num_controls);
- if (driver->dapm_routes)
- snd_soc_dapm_add_routes(&codec->dapm, driver->dapm_routes,
- driver->num_dapm_routes);
-
- /* mark codec as probed and add to card codec list */
- codec->probed = 1;
- list_add(&codec->card_list, &card->codec_dev_list);
- list_add(&codec->dapm.list, &card->dapm_list);
-
- return 0;
-
-err_probe:
- soc_cleanup_codec_debugfs(codec);
- module_put(codec->dev->driver->owner);
-
- return ret;
-}
-
-static int soc_probe_platform(struct snd_soc_card *card,
- struct snd_soc_platform *platform)
-{
- int ret = 0;
- const struct snd_soc_platform_driver *driver = platform->driver;
- struct snd_soc_component *component;
- struct snd_soc_dai *dai;
-
- platform->component.card = card;
- platform->component.dapm.card = card;
+ if (component->controls)
+ snd_soc_add_component_controls(component, component->controls,
+ component->num_controls);
+ if (component->dapm_routes)
+ snd_soc_dapm_add_routes(dapm, component->dapm_routes,
+ component->num_dapm_routes);
- if (!try_module_get(platform->dev->driver->owner))
- return -ENODEV;
-
- soc_init_platform_debugfs(platform);
+ component->probed = 1;
+ list_add(&dapm->list, &card->dapm_list);
- if (driver->dapm_widgets)
- snd_soc_dapm_new_controls(&platform->component.dapm,
- driver->dapm_widgets, driver->num_dapm_widgets);
-
- /* Create DAPM widgets for each DAI stream */
- list_for_each_entry(component, &component_list, list) {
- if (component->dev != platform->dev)
- continue;
- list_for_each_entry(dai, &component->dai_list, list)
- snd_soc_dapm_new_dai_widgets(&platform->component.dapm,
- dai);
- }
-
- platform->component.dapm.idle_bias_off = 1;
-
- if (driver->probe) {
- ret = driver->probe(platform);
- if (ret < 0) {
- dev_err(platform->dev,
- "ASoC: failed to probe platform %d\n", ret);
- goto err_probe;
- }
- }
-
- if (driver->controls)
- snd_soc_add_platform_controls(platform, driver->controls,
- driver->num_controls);
- if (driver->dapm_routes)
- snd_soc_dapm_add_routes(&platform->component.dapm,
- driver->dapm_routes, driver->num_dapm_routes);
-
- /* mark platform as probed and add to card platform list */
- platform->probed = 1;
- list_add(&platform->component.dapm.list, &card->dapm_list);
+ /* This is a HACK and will be removed soon */
+ if (component->codec)
+ list_add(&component->codec->card_list, &card->codec_dev_list);
return 0;
err_probe:
- soc_cleanup_platform_debugfs(platform);
- module_put(platform->dev->driver->owner);
+ soc_cleanup_component_debugfs(component);
+ module_put(component->dev->driver->owner);
return ret;
}
@@ -1342,17 +1196,21 @@ static int soc_post_component_init(struct snd_soc_pcm_runtime *rtd,
}
rtd->dev_registered = 1;
- /* add DAPM sysfs entries for this codec */
- ret = snd_soc_dapm_sys_add(rtd->dev);
- if (ret < 0)
- dev_err(rtd->dev,
- "ASoC: failed to add codec dapm sysfs entries: %d\n", ret);
+ if (rtd->codec) {
+ /* add DAPM sysfs entries for this codec */
+ ret = snd_soc_dapm_sys_add(rtd->dev);
+ if (ret < 0)
+ dev_err(rtd->dev,
+ "ASoC: failed to add codec dapm sysfs entries: %d\n",
+ ret);
- /* add codec sysfs entries */
- ret = device_create_file(rtd->dev, &dev_attr_codec_reg);
- if (ret < 0)
- dev_err(rtd->dev,
- "ASoC: failed to add codec sysfs files: %d\n", ret);
+ /* add codec sysfs entries */
+ ret = device_create_file(rtd->dev, &dev_attr_codec_reg);
+ if (ret < 0)
+ dev_err(rtd->dev,
+ "ASoC: failed to add codec sysfs files: %d\n",
+ ret);
+ }
return 0;
}
@@ -1361,33 +1219,31 @@ static int soc_probe_link_components(struct snd_soc_card *card, int num,
int order)
{
struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
int i, ret;
/* probe the CPU-side component, if it is a CODEC */
- if (cpu_dai->codec &&
- !cpu_dai->codec->probed &&
- cpu_dai->codec->driver->probe_order == order) {
- ret = soc_probe_codec(card, cpu_dai->codec);
+ component = rtd->cpu_dai->component;
+ if (component->driver->probe_order == order) {
+ ret = soc_probe_component(card, component);
if (ret < 0)
return ret;
}
/* probe the CODEC-side components */
for (i = 0; i < rtd->num_codecs; i++) {
- if (!rtd->codec_dais[i]->codec->probed &&
- rtd->codec_dais[i]->codec->driver->probe_order == order) {
- ret = soc_probe_codec(card, rtd->codec_dais[i]->codec);
+ component = rtd->codec_dais[i]->component;
+ if (component->driver->probe_order == order) {
+ ret = soc_probe_component(card, component);
if (ret < 0)
return ret;
}
}
/* probe the platform */
- if (!platform->probed &&
- platform->driver->probe_order == order) {
- ret = soc_probe_platform(card, platform);
+ if (platform->component.driver->probe_order == order) {
+ ret = soc_probe_component(card, &platform->component);
if (ret < 0)
return ret;
}
@@ -1482,18 +1338,12 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
/* probe the cpu_dai */
if (!cpu_dai->probed &&
cpu_dai->driver->probe_order == order) {
- if (!cpu_dai->codec) {
- if (!try_module_get(cpu_dai->dev->driver->owner))
- return -ENODEV;
- }
-
if (cpu_dai->driver->probe) {
ret = cpu_dai->driver->probe(cpu_dai);
if (ret < 0) {
dev_err(cpu_dai->dev,
"ASoC: failed to probe CPU DAI %s: %d\n",
cpu_dai->name, ret);
- module_put(cpu_dai->dev->driver->owner);
return ret;
}
}
@@ -1654,17 +1504,24 @@ static int soc_bind_aux_dev(struct snd_soc_card *card, int num)
{
struct snd_soc_pcm_runtime *rtd = &card->rtd_aux[num];
struct snd_soc_aux_dev *aux_dev = &card->aux_dev[num];
- const char *codecname = aux_dev->codec_name;
+ const char *name = aux_dev->codec_name;
- rtd->codec = soc_find_codec(aux_dev->codec_of_node, codecname);
- if (!rtd->codec) {
+ rtd->component = soc_find_component(aux_dev->codec_of_node, name);
+ if (!rtd->component) {
if (aux_dev->codec_of_node)
- codecname = of_node_full_name(aux_dev->codec_of_node);
+ name = of_node_full_name(aux_dev->codec_of_node);
- dev_err(card->dev, "ASoC: %s not registered\n", codecname);
+ dev_err(card->dev, "ASoC: %s not registered\n", name);
return -EPROBE_DEFER;
}
+ /*
+ * Some places still reference rtd->codec, so we have to keep that
+ * initialized if the component is a CODEC. Once all those references
+ * have been removed, this code can be removed as well.
+ */
+ rtd->codec = rtd->component->codec;
+
return 0;
}
@@ -1674,18 +1531,13 @@ static int soc_probe_aux_dev(struct snd_soc_card *card, int num)
struct snd_soc_aux_dev *aux_dev = &card->aux_dev[num];
int ret;
- if (rtd->codec->probed) {
- dev_err(rtd->codec->dev, "ASoC: codec already probed\n");
- return -EBUSY;
- }
-
- ret = soc_probe_codec(card, rtd->codec);
+ ret = soc_probe_component(card, rtd->component);
if (ret < 0)
return ret;
/* do machine specific initialization */
if (aux_dev->init) {
- ret = aux_dev->init(&rtd->codec->dapm);
+ ret = aux_dev->init(rtd->component);
if (ret < 0) {
dev_err(card->dev, "ASoC: failed to init %s: %d\n",
aux_dev->name, ret);
@@ -1699,7 +1551,7 @@ static int soc_probe_aux_dev(struct snd_soc_card *card, int num)
static void soc_remove_aux_dev(struct snd_soc_card *card, int num)
{
struct snd_soc_pcm_runtime *rtd = &card->rtd_aux[num];
- struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_component *component = rtd->component;
/* unregister the rtd device */
if (rtd->dev_registered) {
@@ -1708,8 +1560,8 @@ static void soc_remove_aux_dev(struct snd_soc_card *card, int num)
rtd->dev_registered = 0;
}
- if (codec && codec->probed)
- soc_remove_codec(codec);
+ if (component && component->probed)
+ soc_remove_component(component);
}
static int snd_soc_init_codec_cache(struct snd_soc_codec *codec)
@@ -2107,19 +1959,14 @@ static struct platform_driver soc_driver = {
int snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
struct snd_ac97_bus_ops *ops, int num)
{
- mutex_lock(&codec->mutex);
-
codec->ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL);
- if (codec->ac97 == NULL) {
- mutex_unlock(&codec->mutex);
+ if (codec->ac97 == NULL)
return -ENOMEM;
- }
codec->ac97->bus = kzalloc(sizeof(struct snd_ac97_bus), GFP_KERNEL);
if (codec->ac97->bus == NULL) {
kfree(codec->ac97);
codec->ac97 = NULL;
- mutex_unlock(&codec->mutex);
return -ENOMEM;
}
@@ -2132,7 +1979,6 @@ int snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
*/
codec->ac97_created = 1;
- mutex_unlock(&codec->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec);
@@ -2302,7 +2148,6 @@ EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops_of_reset);
*/
void snd_soc_free_ac97_codec(struct snd_soc_codec *codec)
{
- mutex_lock(&codec->mutex);
#ifdef CONFIG_SND_SOC_AC97_BUS
soc_unregister_ac97_codec(codec);
#endif
@@ -2310,7 +2155,6 @@ void snd_soc_free_ac97_codec(struct snd_soc_codec *codec)
kfree(codec->ac97);
codec->ac97 = NULL;
codec->ac97_created = 0;
- mutex_unlock(&codec->mutex);
}
EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec);
@@ -3027,9 +2871,10 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
unsigned int val, val_mask;
int ret;
- val = ((ucontrol->value.integer.value[0] + min) & mask);
if (invert)
- val = max - val;
+ val = (max - ucontrol->value.integer.value[0]) & mask;
+ else
+ val = ((ucontrol->value.integer.value[0] + min) & mask);
val_mask = mask << shift;
val = val << shift;
@@ -3038,9 +2883,10 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
return ret;
if (snd_soc_volsw_is_stereo(mc)) {
- val = ((ucontrol->value.integer.value[1] + min) & mask);
if (invert)
- val = max - val;
+ val = (max - ucontrol->value.integer.value[1]) & mask;
+ else
+ val = ((ucontrol->value.integer.value[1] + min) & mask);
val_mask = mask << shift;
val = val << shift;
@@ -3085,8 +2931,9 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
if (invert)
ucontrol->value.integer.value[0] =
max - ucontrol->value.integer.value[0];
- ucontrol->value.integer.value[0] =
- ucontrol->value.integer.value[0] - min;
+ else
+ ucontrol->value.integer.value[0] =
+ ucontrol->value.integer.value[0] - min;
if (snd_soc_volsw_is_stereo(mc)) {
ret = snd_soc_component_read(component, rreg, &val);
@@ -3097,8 +2944,9 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
if (invert)
ucontrol->value.integer.value[1] =
max - ucontrol->value.integer.value[1];
- ucontrol->value.integer.value[1] =
- ucontrol->value.integer.value[1] - min;
+ else
+ ucontrol->value.integer.value[1] =
+ ucontrol->value.integer.value[1] - min;
}
return 0;
@@ -3928,8 +3776,11 @@ EXPORT_SYMBOL_GPL(snd_soc_register_card);
*/
int snd_soc_unregister_card(struct snd_soc_card *card)
{
- if (card->instantiated)
+ if (card->instantiated) {
+ card->instantiated = false;
+ snd_soc_dapm_shutdown(card);
soc_cleanup_card_resources(card);
+ }
dev_dbg(card->dev, "ASoC: Unregistered card '%s'\n", card->name);
return 0;
@@ -4116,6 +3967,8 @@ static int snd_soc_component_initialize(struct snd_soc_component *component,
component->dev = dev;
component->driver = driver;
+ component->probe = component->driver->probe;
+ component->remove = component->driver->remove;
if (!component->dapm_ptr)
component->dapm_ptr = &component->dapm;
@@ -4124,19 +3977,42 @@ static int snd_soc_component_initialize(struct snd_soc_component *component,
dapm->dev = dev;
dapm->component = component;
dapm->bias_level = SND_SOC_BIAS_OFF;
+ dapm->idle_bias_off = true;
if (driver->seq_notifier)
dapm->seq_notifier = snd_soc_component_seq_notifier;
if (driver->stream_event)
dapm->stream_event = snd_soc_component_stream_event;
+ component->controls = driver->controls;
+ component->num_controls = driver->num_controls;
+ component->dapm_widgets = driver->dapm_widgets;
+ component->num_dapm_widgets = driver->num_dapm_widgets;
+ component->dapm_routes = driver->dapm_routes;
+ component->num_dapm_routes = driver->num_dapm_routes;
+
INIT_LIST_HEAD(&component->dai_list);
mutex_init(&component->io_mutex);
return 0;
}
+static void snd_soc_component_init_regmap(struct snd_soc_component *component)
+{
+ if (!component->regmap)
+ component->regmap = dev_get_regmap(component->dev, NULL);
+ if (component->regmap) {
+ int val_bytes = regmap_get_val_bytes(component->regmap);
+ /* Errors are legitimate for non-integer byte multiples */
+ if (val_bytes > 0)
+ component->val_bytes = val_bytes;
+ }
+}
+
static void snd_soc_component_add_unlocked(struct snd_soc_component *component)
{
+ if (!component->write && !component->read)
+ snd_soc_component_init_regmap(component);
+
list_add(&component->list, &component_list);
}
@@ -4225,22 +4101,18 @@ found:
}
EXPORT_SYMBOL_GPL(snd_soc_unregister_component);
-static int snd_soc_platform_drv_write(struct snd_soc_component *component,
- unsigned int reg, unsigned int val)
+static int snd_soc_platform_drv_probe(struct snd_soc_component *component)
{
struct snd_soc_platform *platform = snd_soc_component_to_platform(component);
- return platform->driver->write(platform, reg, val);
+ return platform->driver->probe(platform);
}
-static int snd_soc_platform_drv_read(struct snd_soc_component *component,
- unsigned int reg, unsigned int *val)
+static void snd_soc_platform_drv_remove(struct snd_soc_component *component)
{
struct snd_soc_platform *platform = snd_soc_component_to_platform(component);
- *val = platform->driver->read(platform, reg);
-
- return 0;
+ platform->driver->remove(platform);
}
/**
@@ -4261,10 +4133,15 @@ int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform,
platform->dev = dev;
platform->driver = platform_drv;
- if (platform_drv->write)
- platform->component.write = snd_soc_platform_drv_write;
- if (platform_drv->read)
- platform->component.read = snd_soc_platform_drv_read;
+
+ if (platform_drv->probe)
+ platform->component.probe = snd_soc_platform_drv_probe;
+ if (platform_drv->remove)
+ platform->component.remove = snd_soc_platform_drv_remove;
+
+#ifdef CONFIG_DEBUG_FS
+ platform->component.debugfs_prefix = "platform";
+#endif
mutex_lock(&client_mutex);
snd_soc_component_add_unlocked(&platform->component);
@@ -4315,10 +4192,10 @@ void snd_soc_remove_platform(struct snd_soc_platform *platform)
snd_soc_component_del_unlocked(&platform->component);
mutex_unlock(&client_mutex);
- snd_soc_component_cleanup(&platform->component);
-
dev_dbg(platform->dev, "ASoC: Unregistered platform '%s'\n",
platform->component.name);
+
+ snd_soc_component_cleanup(&platform->component);
}
EXPORT_SYMBOL_GPL(snd_soc_remove_platform);
@@ -4386,6 +4263,20 @@ static void fixup_codec_formats(struct snd_soc_pcm_stream *stream)
stream->formats |= codec_format_map[i];
}
+static int snd_soc_codec_drv_probe(struct snd_soc_component *component)
+{
+ struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
+
+ return codec->driver->probe(codec);
+}
+
+static void snd_soc_codec_drv_remove(struct snd_soc_component *component)
+{
+ struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
+
+ codec->driver->remove(codec);
+}
+
static int snd_soc_codec_drv_write(struct snd_soc_component *component,
unsigned int reg, unsigned int val)
{
@@ -4424,7 +4315,6 @@ int snd_soc_register_codec(struct device *dev,
{
struct snd_soc_codec *codec;
struct snd_soc_dai *dai;
- struct regmap *regmap;
int ret, i;
dev_dbg(dev, "codec register %s\n", dev_name(dev));
@@ -4434,18 +4324,37 @@ int snd_soc_register_codec(struct device *dev,
return -ENOMEM;
codec->component.dapm_ptr = &codec->dapm;
+ codec->component.codec = codec;
ret = snd_soc_component_initialize(&codec->component,
&codec_drv->component_driver, dev);
if (ret)
goto err_free;
+ if (codec_drv->controls) {
+ codec->component.controls = codec_drv->controls;
+ codec->component.num_controls = codec_drv->num_controls;
+ }
+ if (codec_drv->dapm_widgets) {
+ codec->component.dapm_widgets = codec_drv->dapm_widgets;
+ codec->component.num_dapm_widgets = codec_drv->num_dapm_widgets;
+ }
+ if (codec_drv->dapm_routes) {
+ codec->component.dapm_routes = codec_drv->dapm_routes;
+ codec->component.num_dapm_routes = codec_drv->num_dapm_routes;
+ }
+
+ if (codec_drv->probe)
+ codec->component.probe = snd_soc_codec_drv_probe;
+ if (codec_drv->remove)
+ codec->component.remove = snd_soc_codec_drv_remove;
if (codec_drv->write)
codec->component.write = snd_soc_codec_drv_write;
if (codec_drv->read)
codec->component.read = snd_soc_codec_drv_read;
codec->component.ignore_pmdown_time = codec_drv->ignore_pmdown_time;
- codec->dapm.codec = codec;
+ codec->dapm.idle_bias_off = codec_drv->idle_bias_off;
+ codec->dapm.suspend_bias_off = codec_drv->suspend_bias_off;
if (codec_drv->seq_notifier)
codec->dapm.seq_notifier = codec_drv->seq_notifier;
if (codec_drv->set_bias_level)
@@ -4455,23 +4364,13 @@ int snd_soc_register_codec(struct device *dev,
codec->component.val_bytes = codec_drv->reg_word_size;
mutex_init(&codec->mutex);
- if (!codec->component.write) {
- if (codec_drv->get_regmap)
- regmap = codec_drv->get_regmap(dev);
- else
- regmap = dev_get_regmap(dev, NULL);
-
- if (regmap) {
- ret = snd_soc_component_init_io(&codec->component,
- regmap);
- if (ret) {
- dev_err(codec->dev,
- "Failed to set cache I/O:%d\n",
- ret);
- goto err_cleanup;
- }
- }
- }
+#ifdef CONFIG_DEBUG_FS
+ codec->component.init_debugfs = soc_init_codec_debugfs;
+ codec->component.debugfs_prefix = "codec";
+#endif
+
+ if (codec_drv->get_regmap)
+ codec->component.regmap = codec_drv->get_regmap(dev);
for (i = 0; i < num_dai; i++) {
fixup_codec_formats(&dai_drv[i].playback);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 177bd8639ef9..c61cb9cedbcd 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -326,12 +326,13 @@ static struct list_head *dapm_kcontrol_get_path_list(
list_for_each_entry(path, dapm_kcontrol_get_path_list(kcontrol), \
list_kcontrol)
-static unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol)
+unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol)
{
struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
return data->value;
}
+EXPORT_SYMBOL_GPL(dapm_kcontrol_get_value);
static bool dapm_kcontrol_set_value(const struct snd_kcontrol *kcontrol,
unsigned int value)
@@ -591,9 +592,9 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
int shared;
struct snd_kcontrol *kcontrol;
bool wname_in_long_name, kcname_in_long_name;
- char *long_name;
+ char *long_name = NULL;
const char *name;
- int ret;
+ int ret = 0;
prefix = soc_dapm_prefix(dapm);
if (prefix)
@@ -652,15 +653,17 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
kcontrol = snd_soc_cnew(&w->kcontrol_news[kci], NULL, name,
prefix);
- kfree(long_name);
- if (!kcontrol)
- return -ENOMEM;
+ if (!kcontrol) {
+ ret = -ENOMEM;
+ goto exit_free;
+ }
+
kcontrol->private_free = dapm_kcontrol_free;
ret = dapm_kcontrol_data_alloc(w, kcontrol);
if (ret) {
snd_ctl_free_one(kcontrol);
- return ret;
+ goto exit_free;
}
ret = snd_ctl_add(card, kcontrol);
@@ -668,17 +671,18 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
dev_err(dapm->dev,
"ASoC: failed to add widget %s dapm kcontrol %s: %d\n",
w->name, name, ret);
- return ret;
+ goto exit_free;
}
}
ret = dapm_kcontrol_add_widget(kcontrol, w);
- if (ret)
- return ret;
+ if (ret == 0)
+ w->kcontrols[kci] = kcontrol;
- w->kcontrols[kci] = kcontrol;
+exit_free:
+ kfree(long_name);
- return 0;
+ return ret;
}
/* create new dapm mixer control */
@@ -1683,6 +1687,22 @@ static void dapm_power_one_widget(struct snd_soc_dapm_widget *w,
}
}
+static bool dapm_idle_bias_off(struct snd_soc_dapm_context *dapm)
+{
+ if (dapm->idle_bias_off)
+ return true;
+
+ switch (snd_power_get_state(dapm->card->snd_card)) {
+ case SNDRV_CTL_POWER_D3hot:
+ case SNDRV_CTL_POWER_D3cold:
+ return dapm->suspend_bias_off;
+ default:
+ break;
+ }
+
+ return false;
+}
+
/*
* Scan each dapm widget for complete audio path.
* A complete path is a route that has valid endpoints i.e.:-
@@ -1706,7 +1726,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event)
trace_snd_soc_dapm_start(card);
list_for_each_entry(d, &card->dapm_list, list) {
- if (d->idle_bias_off)
+ if (dapm_idle_bias_off(d))
d->target_bias_level = SND_SOC_BIAS_OFF;
else
d->target_bias_level = SND_SOC_BIAS_STANDBY;
@@ -1772,7 +1792,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event)
if (d->target_bias_level > bias)
bias = d->target_bias_level;
list_for_each_entry(d, &card->dapm_list, list)
- if (!d->idle_bias_off)
+ if (!dapm_idle_bias_off(d))
d->target_bias_level = bias;
trace_snd_soc_dapm_walk_done(card);
@@ -3109,7 +3129,8 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
}
w->dapm = dapm;
- w->codec = dapm->codec;
+ if (dapm->component)
+ w->codec = dapm->component->codec;
INIT_LIST_HEAD(&w->sources);
INIT_LIST_HEAD(&w->sinks);
INIT_LIST_HEAD(&w->list);
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index 6307f85e871b..b329b84bc5af 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -336,10 +336,12 @@ static const struct snd_pcm_ops dmaengine_pcm_ops = {
};
static const struct snd_soc_platform_driver dmaengine_pcm_platform = {
+ .component_driver = {
+ .probe_order = SND_SOC_COMP_ORDER_LATE,
+ },
.ops = &dmaengine_pcm_ops,
.pcm_new = dmaengine_pcm_new,
.pcm_free = dmaengine_pcm_free,
- .probe_order = SND_SOC_COMP_ORDER_LATE,
};
static const char * const dmaengine_pcm_dma_channel_names[] = {
diff --git a/sound/soc/soc-io.c b/sound/soc/soc-io.c
index 7767fbd73eb7..9b3939049cef 100644
--- a/sound/soc/soc-io.c
+++ b/sound/soc/soc-io.c
@@ -271,31 +271,3 @@ int snd_soc_platform_write(struct snd_soc_platform *platform,
return snd_soc_component_write(&platform->component, reg, val);
}
EXPORT_SYMBOL_GPL(snd_soc_platform_write);
-
-/**
- * snd_soc_component_init_io() - Initialize regmap IO
- *
- * @component: component to initialize
- * @regmap: regmap instance to use for IO operations
- *
- * Return: 0 on success, a negative error code otherwise
- */
-int snd_soc_component_init_io(struct snd_soc_component *component,
- struct regmap *regmap)
-{
- int ret;
-
- if (!regmap)
- return -EINVAL;
-
- ret = regmap_get_val_bytes(regmap);
- /* Errors are legitimate for non-integer byte
- * multiples */
- if (ret > 0)
- component->val_bytes = ret;
-
- component->regmap = regmap;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_component_init_io);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 642c86240752..002311afdeaa 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -352,7 +352,7 @@ static void soc_pcm_apply_msb(struct snd_pcm_substream *substream)
} else {
for (i = 0; i < rtd->num_codecs; i++) {
codec_dai = rtd->codec_dais[i];
- if (codec_dai->driver->playback.sig_bits == 0) {
+ if (codec_dai->driver->capture.sig_bits == 0) {
bits = 0;
break;
}
diff --git a/sound/soc/tegra/tegra_max98090.c b/sound/soc/tegra/tegra_max98090.c
index b86cd9936ef1..01921d7e73fa 100644
--- a/sound/soc/tegra/tegra_max98090.c
+++ b/sound/soc/tegra/tegra_max98090.c
@@ -42,6 +42,7 @@
struct tegra_max98090 {
struct tegra_asoc_utils_data util_data;
int gpio_hp_det;
+ int gpio_mic_det;
};
static int tegra_max98090_asoc_hw_params(struct snd_pcm_substream *substream,
@@ -112,6 +113,22 @@ static struct snd_soc_jack_gpio tegra_max98090_hp_jack_gpio = {
.invert = 1,
};
+static struct snd_soc_jack tegra_max98090_mic_jack;
+
+static struct snd_soc_jack_pin tegra_max98090_mic_jack_pins[] = {
+ {
+ .pin = "Mic Jack",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
+static struct snd_soc_jack_gpio tegra_max98090_mic_jack_gpio = {
+ .name = "Mic detection",
+ .report = SND_JACK_MICROPHONE,
+ .debounce_time = 150,
+ .invert = 1,
+};
+
static const struct snd_soc_dapm_widget tegra_max98090_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphones", NULL),
SND_SOC_DAPM_SPK("Speakers", NULL),
@@ -141,6 +158,19 @@ static int tegra_max98090_asoc_init(struct snd_soc_pcm_runtime *rtd)
&tegra_max98090_hp_jack_gpio);
}
+ if (gpio_is_valid(machine->gpio_mic_det)) {
+ snd_soc_jack_new(codec, "Mic Jack", SND_JACK_MICROPHONE,
+ &tegra_max98090_mic_jack);
+ snd_soc_jack_add_pins(&tegra_max98090_mic_jack,
+ ARRAY_SIZE(tegra_max98090_mic_jack_pins),
+ tegra_max98090_mic_jack_pins);
+
+ tegra_max98090_mic_jack_gpio.gpio = machine->gpio_mic_det;
+ snd_soc_jack_add_gpios(&tegra_max98090_mic_jack,
+ 1,
+ &tegra_max98090_mic_jack_gpio);
+ }
+
return 0;
}
@@ -153,6 +183,11 @@ static int tegra_max98090_card_remove(struct snd_soc_card *card)
&tegra_max98090_hp_jack_gpio);
}
+ if (gpio_is_valid(machine->gpio_mic_det)) {
+ snd_soc_jack_free_gpios(&tegra_max98090_mic_jack, 1,
+ &tegra_max98090_mic_jack_gpio);
+ }
+
return 0;
}
@@ -201,6 +236,11 @@ static int tegra_max98090_probe(struct platform_device *pdev)
if (machine->gpio_hp_det == -EPROBE_DEFER)
return -EPROBE_DEFER;
+ machine->gpio_mic_det =
+ of_get_named_gpio(np, "nvidia,mic-det-gpios", 0);
+ if (machine->gpio_mic_det == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
ret = snd_soc_of_parse_card_name(card, "nvidia,model");
if (ret)
goto err;
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index f0829de28708..cd71fd889d8b 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/dmaengine.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -137,7 +138,7 @@ txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr)
}
desc->callback = txx9aclc_dma_complete;
desc->callback_param = dmadata;
- desc->tx_submit(desc);
+ dmaengine_submit(desc);
return desc;
}
@@ -160,7 +161,7 @@ static void txx9aclc_dma_tasklet(unsigned long data)
void __iomem *base = drvdata->base;
spin_unlock_irqrestore(&dmadata->dma_lock, flags);
- chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(chan);
/* first time */
for (i = 0; i < NR_DMA_CHAIN; i++) {
desc = txx9aclc_dma_submit(dmadata,
@@ -169,7 +170,7 @@ static void txx9aclc_dma_tasklet(unsigned long data)
return;
}
dmadata->dmacount = NR_DMA_CHAIN;
- chan->device->device_issue_pending(chan);
+ dma_async_issue_pending(chan);
spin_lock_irqsave(&dmadata->dma_lock, flags);
__raw_writel(ctlbit, base + ACCTLEN);
dmadata->frag_count = NR_DMA_CHAIN % dmadata->frags;
@@ -188,7 +189,7 @@ static void txx9aclc_dma_tasklet(unsigned long data)
dmadata->frag_count * dmadata->frag_bytes);
if (!desc)
return;
- chan->device->device_issue_pending(chan);
+ dma_async_issue_pending(chan);
spin_lock_irqsave(&dmadata->dma_lock, flags);
dmadata->frag_count++;
@@ -266,7 +267,7 @@ static int txx9aclc_pcm_close(struct snd_pcm_substream *substream)
struct dma_chan *chan = dmadata->dma_chan;
dmadata->frag_count = -1;
- chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(chan);
return 0;
}
@@ -398,8 +399,7 @@ static int txx9aclc_pcm_remove(struct snd_soc_platform *platform)
struct dma_chan *chan = dmadata->dma_chan;
if (chan) {
dmadata->frag_count = -1;
- chan->device->device_control(chan,
- DMA_TERMINATE_ALL, 0);
+ dmaengine_terminate_all(chan);
dma_release_channel(chan);
}
dev->dmadata[i].dma_chan = NULL;
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index 7103b0908d13..272844746135 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -816,6 +816,11 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev)
return -EINVAL;
}
+ if (cdev->n_streams < 2) {
+ dev_err(dev, "bogus number of streams: %d\n", cdev->n_streams);
+ return -EINVAL;
+ }
+
ret = snd_pcm_new(cdev->chip.card, cdev->product_name, 0,
cdev->n_audio_out, cdev->n_audio_in, &cdev->pcm);
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index b2b6f398a4e1..d3d49525a16b 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1506,6 +1506,12 @@ static struct port_info {
PORT_INFO(vendor, product, num, name, 0, \
SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC | \
SNDRV_SEQ_PORT_TYPE_HARDWARE)
+#define GM_SYNTH_PORT(vendor, product, num, name, voices) \
+ PORT_INFO(vendor, product, num, name, voices, \
+ SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC | \
+ SNDRV_SEQ_PORT_TYPE_MIDI_GM | \
+ SNDRV_SEQ_PORT_TYPE_HARDWARE | \
+ SNDRV_SEQ_PORT_TYPE_SYNTHESIZER)
#define ROLAND_SYNTH_PORT(vendor, product, num, name, voices) \
PORT_INFO(vendor, product, num, name, voices, \
SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC | \
@@ -1525,6 +1531,11 @@ static struct port_info {
SNDRV_SEQ_PORT_TYPE_MIDI_MT32 | \
SNDRV_SEQ_PORT_TYPE_HARDWARE | \
SNDRV_SEQ_PORT_TYPE_SYNTHESIZER)
+ /* Yamaha MOTIF XF */
+ GM_SYNTH_PORT(0x0499, 0x105c, 0, "%s Tone Generator", 128),
+ CONTROL_PORT(0x0499, 0x105c, 1, "%s Remote Control"),
+ EXTERNAL_PORT(0x0499, 0x105c, 2, "%s Thru"),
+ CONTROL_PORT(0x0499, 0x105c, 3, "%s Editor"),
/* Roland UA-100 */
CONTROL_PORT(0x0582, 0x0000, 2, "%s Control"),
/* Roland SC-8850 */
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 19a921eb75f1..d2aa45a8d895 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1174,5 +1174,21 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
}
}
+ /* XMOS based USB DACs */
+ switch (chip->usb_id) {
+ /* iFi Audio micro/nano iDSD */
+ case USB_ID(0x20b1, 0x3008):
+ if (fp->altsetting == 2)
+ return SNDRV_PCM_FMTBIT_DSD_U32_LE;
+ break;
+ /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
+ case USB_ID(0x20b1, 0x2009):
+ if (fp->altsetting == 3)
+ return SNDRV_PCM_FMTBIT_DSD_U32_LE;
+ break;
+ default:
+ break;
+ }
+
return 0;
}
diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile
index ce00f7ee6455..36c08b1f4afb 100644
--- a/tools/lib/api/Makefile
+++ b/tools/lib/api/Makefile
@@ -10,9 +10,14 @@ LIB_OBJS=
LIB_H += fs/debugfs.h
LIB_H += fs/fs.h
+# See comment below about piggybacking...
+LIB_H += fd/array.h
LIB_OBJS += $(OUTPUT)fs/debugfs.o
LIB_OBJS += $(OUTPUT)fs/fs.o
+# XXX piggybacking here, need to introduce libapikfd, or rename this
+# to plain libapik.a and make it have it all api goodies
+LIB_OBJS += $(OUTPUT)fd/array.o
LIBFILE = libapikfs.a
@@ -29,7 +34,7 @@ $(LIBFILE): $(LIB_OBJS)
$(LIB_OBJS): $(LIB_H)
libapi_dirs:
- $(QUIET_MKDIR)mkdir -p $(OUTPUT)fs/
+ $(QUIET_MKDIR)mkdir -p $(OUTPUT)fd $(OUTPUT)fs
$(OUTPUT)%.o: %.c libapi_dirs
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
diff --git a/tools/lib/api/fd/array.c b/tools/lib/api/fd/array.c
new file mode 100644
index 000000000000..0e636c4339b8
--- /dev/null
+++ b/tools/lib/api/fd/array.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2014, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+#include "array.h"
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+void fdarray__init(struct fdarray *fda, int nr_autogrow)
+{
+ fda->entries = NULL;
+ fda->priv = NULL;
+ fda->nr = fda->nr_alloc = 0;
+ fda->nr_autogrow = nr_autogrow;
+}
+
+int fdarray__grow(struct fdarray *fda, int nr)
+{
+ void *priv;
+ int nr_alloc = fda->nr_alloc + nr;
+ size_t psize = sizeof(fda->priv[0]) * nr_alloc;
+ size_t size = sizeof(struct pollfd) * nr_alloc;
+ struct pollfd *entries = realloc(fda->entries, size);
+
+ if (entries == NULL)
+ return -ENOMEM;
+
+ priv = realloc(fda->priv, psize);
+ if (priv == NULL) {
+ free(entries);
+ return -ENOMEM;
+ }
+
+ fda->nr_alloc = nr_alloc;
+ fda->entries = entries;
+ fda->priv = priv;
+ return 0;
+}
+
+struct fdarray *fdarray__new(int nr_alloc, int nr_autogrow)
+{
+ struct fdarray *fda = calloc(1, sizeof(*fda));
+
+ if (fda != NULL) {
+ if (fdarray__grow(fda, nr_alloc)) {
+ free(fda);
+ fda = NULL;
+ } else {
+ fda->nr_autogrow = nr_autogrow;
+ }
+ }
+
+ return fda;
+}
+
+void fdarray__exit(struct fdarray *fda)
+{
+ free(fda->entries);
+ free(fda->priv);
+ fdarray__init(fda, 0);
+}
+
+void fdarray__delete(struct fdarray *fda)
+{
+ fdarray__exit(fda);
+ free(fda);
+}
+
+int fdarray__add(struct fdarray *fda, int fd, short revents)
+{
+ int pos = fda->nr;
+
+ if (fda->nr == fda->nr_alloc &&
+ fdarray__grow(fda, fda->nr_autogrow) < 0)
+ return -ENOMEM;
+
+ fda->entries[fda->nr].fd = fd;
+ fda->entries[fda->nr].events = revents;
+ fda->nr++;
+ return pos;
+}
+
+int fdarray__filter(struct fdarray *fda, short revents,
+ void (*entry_destructor)(struct fdarray *fda, int fd))
+{
+ int fd, nr = 0;
+
+ if (fda->nr == 0)
+ return 0;
+
+ for (fd = 0; fd < fda->nr; ++fd) {
+ if (fda->entries[fd].revents & revents) {
+ if (entry_destructor)
+ entry_destructor(fda, fd);
+
+ continue;
+ }
+
+ if (fd != nr) {
+ fda->entries[nr] = fda->entries[fd];
+ fda->priv[nr] = fda->priv[fd];
+ }
+
+ ++nr;
+ }
+
+ return fda->nr = nr;
+}
+
+int fdarray__poll(struct fdarray *fda, int timeout)
+{
+ return poll(fda->entries, fda->nr, timeout);
+}
+
+int fdarray__fprintf(struct fdarray *fda, FILE *fp)
+{
+ int fd, printed = fprintf(fp, "%d [ ", fda->nr);
+
+ for (fd = 0; fd < fda->nr; ++fd)
+ printed += fprintf(fp, "%s%d", fd ? ", " : "", fda->entries[fd].fd);
+
+ return printed + fprintf(fp, " ]");
+}
diff --git a/tools/lib/api/fd/array.h b/tools/lib/api/fd/array.h
new file mode 100644
index 000000000000..45db01818f45
--- /dev/null
+++ b/tools/lib/api/fd/array.h
@@ -0,0 +1,46 @@
+#ifndef __API_FD_ARRAY__
+#define __API_FD_ARRAY__
+
+#include <stdio.h>
+
+struct pollfd;
+
+/**
+ * struct fdarray: Array of file descriptors
+ *
+ * @priv: Per array entry priv area, users should access just its contents,
+ * not set it to anything, as it is kept in synch with @entries, being
+ * realloc'ed, * for instance, in fdarray__{grow,filter}.
+ *
+ * I.e. using 'fda->priv[N].idx = * value' where N < fda->nr is ok,
+ * but doing 'fda->priv = malloc(M)' is not allowed.
+ */
+struct fdarray {
+ int nr;
+ int nr_alloc;
+ int nr_autogrow;
+ struct pollfd *entries;
+ union {
+ int idx;
+ } *priv;
+};
+
+void fdarray__init(struct fdarray *fda, int nr_autogrow);
+void fdarray__exit(struct fdarray *fda);
+
+struct fdarray *fdarray__new(int nr_alloc, int nr_autogrow);
+void fdarray__delete(struct fdarray *fda);
+
+int fdarray__add(struct fdarray *fda, int fd, short revents);
+int fdarray__poll(struct fdarray *fda, int timeout);
+int fdarray__filter(struct fdarray *fda, short revents,
+ void (*entry_destructor)(struct fdarray *fda, int fd));
+int fdarray__grow(struct fdarray *fda, int extra);
+int fdarray__fprintf(struct fdarray *fda, FILE *fp);
+
+static inline int fdarray__available_entries(struct fdarray *fda)
+{
+ return fda->nr_alloc - fda->nr;
+}
+
+#endif /* __API_FD_ARRAY__ */
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index 782d86e961b9..717221e98450 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -15,6 +15,7 @@ perf.data
perf.data.old
output.svg
perf-archive
+perf-with-kcore
tags
TAGS
cscope*
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index 1513935c399b..aaa869be3dc1 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -104,6 +104,9 @@ OPTIONS
Specify path to the executable or shared library file for user
space tracing. Can also be used with --funcs option.
+--demangle-kernel::
+ Demangle kernel symbols.
+
In absence of -m/-x options, perf probe checks if the first argument after
the options is an absolute path name. If its an absolute path, perf probe
uses it as a target module/target user space binary to probe.
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index d2b59af62bc0..0927bf4e6c2a 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -147,7 +147,7 @@ OPTIONS
-w::
--column-widths=<width[,width...]>::
Force each column width to the provided list, for large terminal
- readability.
+ readability. 0 means no limit (default behavior).
-t::
--field-separator=::
@@ -276,6 +276,9 @@ OPTIONS
Demangle symbol names to human readable form. It's enabled by default,
disable with --no-demangle.
+--demangle-kernel::
+ Demangle kernel symbol names to human readable form (for C++ kernels).
+
--mem-mode::
Use the data addresses of samples in addition to instruction addresses
to build the histograms. To generate meaningful output, the perf.data
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 180ae02137a5..3265b1070518 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -98,6 +98,9 @@ Default is to monitor all CPUS.
--hide_user_symbols::
Hide user symbols.
+--demangle-kernel::
+ Demangle kernel symbols.
+
-D::
--dump-symtab::
Dump the symbol table used for profiling.
@@ -193,6 +196,12 @@ Default is to monitor all CPUS.
sum of shown entries will be always 100%. "absolute" means it retains
the original value before and after the filter is applied.
+-w::
+--column-widths=<width[,width...]>::
+ Force each column width to the provided list, for large terminal
+ readability. 0 means no limit (default behavior).
+
+
INTERACTIVE PROMPTING KEYS
--------------------------
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 2240974b7745..262916f4a377 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -126,6 +126,7 @@ PYRF_OBJS =
SCRIPT_SH =
SCRIPT_SH += perf-archive.sh
+SCRIPT_SH += perf-with-kcore.sh
grep-libs = $(filter -l%,$(1))
strip-libs = $(filter-out -l%,$(1))
@@ -263,6 +264,7 @@ LIB_H += util/xyarray.h
LIB_H += util/header.h
LIB_H += util/help.h
LIB_H += util/session.h
+LIB_H += util/ordered-events.h
LIB_H += util/strbuf.h
LIB_H += util/strlist.h
LIB_H += util/strfilter.h
@@ -347,6 +349,7 @@ LIB_OBJS += $(OUTPUT)util/machine.o
LIB_OBJS += $(OUTPUT)util/map.o
LIB_OBJS += $(OUTPUT)util/pstack.o
LIB_OBJS += $(OUTPUT)util/session.o
+LIB_OBJS += $(OUTPUT)util/ordered-events.o
LIB_OBJS += $(OUTPUT)util/comm.o
LIB_OBJS += $(OUTPUT)util/thread.o
LIB_OBJS += $(OUTPUT)util/thread_map.o
@@ -399,6 +402,7 @@ LIB_OBJS += $(OUTPUT)tests/perf-record.o
LIB_OBJS += $(OUTPUT)tests/rdpmc.o
LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o
LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o
+LIB_OBJS += $(OUTPUT)tests/fdarray.o
LIB_OBJS += $(OUTPUT)tests/pmu.o
LIB_OBJS += $(OUTPUT)tests/hists_common.o
LIB_OBJS += $(OUTPUT)tests/hists_link.o
@@ -423,6 +427,7 @@ endif
endif
LIB_OBJS += $(OUTPUT)tests/mmap-thread-lookup.o
LIB_OBJS += $(OUTPUT)tests/thread-mg-share.o
+LIB_OBJS += $(OUTPUT)tests/switch-tracking.o
BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
@@ -765,7 +770,7 @@ $(LIBTRACEEVENT)-clean:
install-traceevent-plugins: $(LIBTRACEEVENT)
$(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) install_plugins
-LIBAPIKFS_SOURCES = $(wildcard $(LIB_PATH)fs/*.[ch])
+LIBAPIKFS_SOURCES = $(wildcard $(LIB_PATH)fs/*.[ch] $(LIB_PATH)fd/*.[ch])
# if subdir is set, we've been called from above so target has been built
# already
@@ -875,6 +880,8 @@ install-bin: all install-gtk
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
$(call QUIET_INSTALL, perf-archive) \
$(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
+ $(call QUIET_INSTALL, perf-with-kcore) \
+ $(INSTALL) $(OUTPUT)perf-with-kcore -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
ifndef NO_LIBPERL
$(call QUIET_INSTALL, perl-scripts) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
@@ -920,7 +927,7 @@ config-clean:
@$(MAKE) -C config/feature-checks clean >/dev/null
clean: $(LIBTRACEEVENT)-clean $(LIBAPIKFS)-clean config-clean
- $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS) $(GTK_OBJS)
+ $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(OUTPUT)perf.o $(LANG_BINDINGS) $(GTK_OBJS)
$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf
$(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)PERF-FEATURES $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex*
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
diff --git a/tools/perf/arch/arm/tests/dwarf-unwind.c b/tools/perf/arch/arm/tests/dwarf-unwind.c
index 9f870d27cb39..62eff847f91c 100644
--- a/tools/perf/arch/arm/tests/dwarf-unwind.c
+++ b/tools/perf/arch/arm/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "thread.h"
#include "map.h"
#include "event.h"
+#include "debug.h"
#include "tests/tests.h"
#define STACK_SIZE 8192
diff --git a/tools/perf/arch/arm/util/unwind-libunwind.c b/tools/perf/arch/arm/util/unwind-libunwind.c
index 729ed69a6664..62c397ed3d97 100644
--- a/tools/perf/arch/arm/util/unwind-libunwind.c
+++ b/tools/perf/arch/arm/util/unwind-libunwind.c
@@ -3,6 +3,7 @@
#include <libunwind.h>
#include "perf_regs.h"
#include "../../util/unwind.h"
+#include "../../util/debug.h"
int libunwind__arch_reg_id(int regnum)
{
diff --git a/tools/perf/arch/arm64/include/perf_regs.h b/tools/perf/arch/arm64/include/perf_regs.h
index e9441b9e2a30..1d3f39c3aa56 100644
--- a/tools/perf/arch/arm64/include/perf_regs.h
+++ b/tools/perf/arch/arm64/include/perf_regs.h
@@ -6,6 +6,8 @@
#include <asm/perf_regs.h>
#define PERF_REGS_MASK ((1ULL << PERF_REG_ARM64_MAX) - 1)
+#define PERF_REGS_MAX PERF_REG_ARM64_MAX
+
#define PERF_REG_IP PERF_REG_ARM64_PC
#define PERF_REG_SP PERF_REG_ARM64_SP
diff --git a/tools/perf/arch/arm64/util/unwind-libunwind.c b/tools/perf/arch/arm64/util/unwind-libunwind.c
index 436ee43859dc..a87afa91a99e 100644
--- a/tools/perf/arch/arm64/util/unwind-libunwind.c
+++ b/tools/perf/arch/arm64/util/unwind-libunwind.c
@@ -3,6 +3,7 @@
#include <libunwind.h>
#include "perf_regs.h"
#include "../../util/unwind.h"
+#include "../../util/debug.h"
int libunwind__arch_reg_id(int regnum)
{
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 42faf369211c..49776f190abf 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -12,6 +12,11 @@ const char *const arm_triplets[] = {
NULL
};
+const char *const arm64_triplets[] = {
+ "aarch64-linux-android-",
+ NULL
+};
+
const char *const powerpc_triplets[] = {
"powerpc-unknown-linux-gnu-",
"powerpc64-unknown-linux-gnu-",
@@ -105,6 +110,8 @@ static const char *normalize_arch(char *arch)
return "x86";
if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
return "sparc";
+ if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
+ return "arm64";
if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
return "arm";
if (!strncmp(arch, "s390", 4))
@@ -159,6 +166,8 @@ static int perf_session_env__lookup_binutils_path(struct perf_session_env *env,
if (!strcmp(arch, "arm"))
path_list = arm_triplets;
+ else if (!strcmp(arch, "arm64"))
+ path_list = arm64_triplets;
else if (!strcmp(arch, "powerpc"))
path_list = powerpc_triplets;
else if (!strcmp(arch, "sh"))
diff --git a/tools/perf/arch/powerpc/Makefile b/tools/perf/arch/powerpc/Makefile
index b92219b1900d..6f7782bea5dd 100644
--- a/tools/perf/arch/powerpc/Makefile
+++ b/tools/perf/arch/powerpc/Makefile
@@ -1,6 +1,6 @@
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/skip-callchain-idx.o
endif
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/skip-callchain-idx.o
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index a7c23a4b3778..d73ef8bb08c7 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -15,6 +15,7 @@
#include "util/thread.h"
#include "util/callchain.h"
+#include "util/debug.h"
/*
* When saving the callchain on Power, the kernel conservatively saves
diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
index a84206e9c4aa..fc9bebd2cca0 100644
--- a/tools/perf/bench/futex-hash.c
+++ b/tools/perf/bench/futex-hash.c
@@ -26,6 +26,7 @@ static unsigned int nsecs = 10;
/* amount of futexes per thread */
static unsigned int nfutexes = 1024;
static bool fshared = false, done = false, silent = false;
+static int futex_flag = 0;
struct timeval start, end, runtime;
static pthread_mutex_t thread_lock;
@@ -75,8 +76,7 @@ static void *workerfn(void *arg)
* such as internal waitqueue handling, thus enlarging
* the critical region protected by hb->lock.
*/
- ret = futex_wait(&w->futex[i], 1234, NULL,
- fshared ? 0 : FUTEX_PRIVATE_FLAG);
+ ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag);
if (!silent &&
(!ret || errno != EAGAIN || errno != EWOULDBLOCK))
warn("Non-expected futex return call");
@@ -135,6 +135,9 @@ int bench_futex_hash(int argc, const char **argv,
if (!worker)
goto errmem;
+ if (!fshared)
+ futex_flag = FUTEX_PRIVATE_FLAG;
+
printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n",
getpid(), nthreads, nfutexes, fshared ? "shared":"private", nsecs);
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index 732403bfd31a..bedff6b5b3cf 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -30,16 +30,18 @@ static u_int32_t futex1 = 0, futex2 = 0;
static unsigned int nrequeue = 1;
static pthread_t *worker;
-static bool done = 0, silent = 0;
+static bool done = false, silent = false, fshared = false;
static pthread_mutex_t thread_lock;
static pthread_cond_t thread_parent, thread_worker;
static struct stats requeuetime_stats, requeued_stats;
static unsigned int ncpus, threads_starting, nthreads = 0;
+static int futex_flag = 0;
static const struct option options[] = {
OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
OPT_UINTEGER('q', "nrequeue", &nrequeue, "Specify amount of threads to requeue at once"),
OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
+ OPT_BOOLEAN( 'S', "shared", &fshared, "Use shared futexes instead of private ones"),
OPT_END()
};
@@ -70,7 +72,7 @@ static void *workerfn(void *arg __maybe_unused)
pthread_cond_wait(&thread_worker, &thread_lock);
pthread_mutex_unlock(&thread_lock);
- futex_wait(&futex1, 0, NULL, FUTEX_PRIVATE_FLAG);
+ futex_wait(&futex1, 0, NULL, futex_flag);
return NULL;
}
@@ -127,9 +129,12 @@ int bench_futex_requeue(int argc, const char **argv,
if (!worker)
err(EXIT_FAILURE, "calloc");
- printf("Run summary [PID %d]: Requeuing %d threads (from %p to %p), "
- "%d at a time.\n\n",
- getpid(), nthreads, &futex1, &futex2, nrequeue);
+ if (!fshared)
+ futex_flag = FUTEX_PRIVATE_FLAG;
+
+ printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), "
+ "%d at a time.\n\n", getpid(), nthreads,
+ fshared ? "shared":"private", &futex1, &futex2, nrequeue);
init_stats(&requeued_stats);
init_stats(&requeuetime_stats);
@@ -156,16 +161,20 @@ int bench_futex_requeue(int argc, const char **argv,
/* Ok, all threads are patiently blocked, start requeueing */
gettimeofday(&start, NULL);
- for (nrequeued = 0; nrequeued < nthreads; nrequeued += nrequeue)
+ for (nrequeued = 0; nrequeued < nthreads; nrequeued += nrequeue) {
/*
* Do not wakeup any tasks blocked on futex1, allowing
* us to really measure futex_wait functionality.
*/
- futex_cmp_requeue(&futex1, 0, &futex2, 0, nrequeue,
- FUTEX_PRIVATE_FLAG);
+ futex_cmp_requeue(&futex1, 0, &futex2, 0,
+ nrequeue, futex_flag);
+ }
gettimeofday(&end, NULL);
timersub(&end, &start, &runtime);
+ if (nrequeued > nthreads)
+ nrequeued = nthreads;
+
update_stats(&requeued_stats, nrequeued);
update_stats(&requeuetime_stats, runtime.tv_usec);
@@ -175,7 +184,7 @@ int bench_futex_requeue(int argc, const char **argv,
}
/* everybody should be blocked on futex2, wake'em up */
- nrequeued = futex_wake(&futex2, nthreads, FUTEX_PRIVATE_FLAG);
+ nrequeued = futex_wake(&futex2, nthreads, futex_flag);
if (nthreads != nrequeued)
warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads);
@@ -184,7 +193,6 @@ int bench_futex_requeue(int argc, const char **argv,
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
-
}
/* cleanup & report results */
diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
index 50022cbce87e..929f762be47e 100644
--- a/tools/perf/bench/futex-wake.c
+++ b/tools/perf/bench/futex-wake.c
@@ -31,16 +31,18 @@ static u_int32_t futex1 = 0;
static unsigned int nwakes = 1;
pthread_t *worker;
-static bool done = false, silent = false;
+static bool done = false, silent = false, fshared = false;
static pthread_mutex_t thread_lock;
static pthread_cond_t thread_parent, thread_worker;
static struct stats waketime_stats, wakeup_stats;
static unsigned int ncpus, threads_starting, nthreads = 0;
+static int futex_flag = 0;
static const struct option options[] = {
OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
OPT_UINTEGER('w', "nwakes", &nwakes, "Specify amount of threads to wake at once"),
OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
+ OPT_BOOLEAN( 'S', "shared", &fshared, "Use shared futexes instead of private ones"),
OPT_END()
};
@@ -58,7 +60,7 @@ static void *workerfn(void *arg __maybe_unused)
pthread_cond_wait(&thread_worker, &thread_lock);
pthread_mutex_unlock(&thread_lock);
- futex_wait(&futex1, 0, NULL, FUTEX_PRIVATE_FLAG);
+ futex_wait(&futex1, 0, NULL, futex_flag);
return NULL;
}
@@ -130,9 +132,12 @@ int bench_futex_wake(int argc, const char **argv,
if (!worker)
err(EXIT_FAILURE, "calloc");
- printf("Run summary [PID %d]: blocking on %d threads (at futex %p), "
+ if (!fshared)
+ futex_flag = FUTEX_PRIVATE_FLAG;
+
+ printf("Run summary [PID %d]: blocking on %d threads (at [%s] futex %p), "
"waking up %d at a time.\n\n",
- getpid(), nthreads, &futex1, nwakes);
+ getpid(), nthreads, fshared ? "shared":"private", &futex1, nwakes);
init_stats(&wakeup_stats);
init_stats(&waketime_stats);
@@ -160,7 +165,7 @@ int bench_futex_wake(int argc, const char **argv,
/* Ok, all threads are patiently blocked, start waking folks up */
gettimeofday(&start, NULL);
while (nwoken != nthreads)
- nwoken += futex_wake(&futex1, nwakes, FUTEX_PRIVATE_FLAG);
+ nwoken += futex_wake(&futex1, nwakes, futex_flag);
gettimeofday(&end, NULL);
timersub(&end, &start, &runtime);
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index 52a56599a543..d7f281c2828d 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -26,7 +26,7 @@
#include <sys/socket.h>
#include <sys/wait.h>
#include <sys/time.h>
-#include <sys/poll.h>
+#include <poll.h>
#include <limits.h>
#include <err.h>
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 1ec429fef2be..be5939418425 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -36,7 +36,8 @@
struct perf_annotate {
struct perf_tool tool;
- bool force, use_tui, use_stdio, use_gtk;
+ struct perf_session *session;
+ bool use_tui, use_stdio, use_gtk;
bool full_paths;
bool print_line;
bool skip_missing;
@@ -188,18 +189,9 @@ find_next:
static int __cmd_annotate(struct perf_annotate *ann)
{
int ret;
- struct perf_session *session;
+ struct perf_session *session = ann->session;
struct perf_evsel *pos;
u64 total_nr_samples;
- struct perf_data_file file = {
- .path = input_name,
- .mode = PERF_DATA_MODE_READ,
- .force = ann->force,
- };
-
- session = perf_session__new(&file, false, &ann->tool);
- if (session == NULL)
- return -ENOMEM;
machines__set_symbol_filter(&session->machines, symbol__annotate_init);
@@ -207,22 +199,22 @@ static int __cmd_annotate(struct perf_annotate *ann)
ret = perf_session__cpu_bitmap(session, ann->cpu_list,
ann->cpu_bitmap);
if (ret)
- goto out_delete;
+ goto out;
}
if (!objdump_path) {
ret = perf_session_env__lookup_objdump(&session->header.env);
if (ret)
- goto out_delete;
+ goto out;
}
ret = perf_session__process_events(session, &ann->tool);
if (ret)
- goto out_delete;
+ goto out;
if (dump_trace) {
perf_session__fprintf_nr_events(session, stdout);
- goto out_delete;
+ goto out;
}
if (verbose > 3)
@@ -250,8 +242,8 @@ static int __cmd_annotate(struct perf_annotate *ann)
}
if (total_nr_samples == 0) {
- ui__error("The %s file has no samples!\n", file.path);
- goto out_delete;
+ ui__error("The %s file has no samples!\n", session->file->path);
+ goto out;
}
if (use_browser == 2) {
@@ -261,24 +253,12 @@ static int __cmd_annotate(struct perf_annotate *ann)
"perf_gtk__show_annotations");
if (show_annotations == NULL) {
ui__error("GTK browser not found!\n");
- goto out_delete;
+ goto out;
}
show_annotations();
}
-out_delete:
- /*
- * Speed up the exit process, for large files this can
- * take quite a while.
- *
- * XXX Enable this when using valgrind or if we ever
- * librarize this command.
- *
- * Also experiment with obstacks to see how much speed
- * up we'll get here.
- *
- * perf_session__delete(session);
- */
+out:
return ret;
}
@@ -297,10 +277,14 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
.comm = perf_event__process_comm,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
- .ordered_samples = true,
+ .ordered_events = true,
.ordering_requires_timestamps = true,
},
};
+ struct perf_data_file file = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ };
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
@@ -308,7 +292,7 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
"only consider symbols in these dsos"),
OPT_STRING('s', "symbol", &annotate.sym_hist_filter, "symbol",
"symbol to annotate"),
- OPT_BOOLEAN('f', "force", &annotate.force, "don't complain, do it"),
+ OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
@@ -341,6 +325,7 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
"Show event group information together"),
OPT_END()
};
+ int ret;
argc = parse_options(argc, argv, options, annotate_usage, 0);
@@ -353,11 +338,16 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
setup_browser(true);
+ annotate.session = perf_session__new(&file, false, &annotate.tool);
+ if (annotate.session == NULL)
+ return -1;
+
symbol_conf.priv_size = sizeof(struct annotation);
symbol_conf.try_vmlinux_path = true;
- if (symbol__init() < 0)
- return -1;
+ ret = symbol__init(&annotate.session->header.env);
+ if (ret < 0)
+ goto out_delete;
if (setup_sorting() < 0)
usage_with_options(annotate_usage, options);
@@ -373,5 +363,20 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
annotate.sym_hist_filter = argv[0];
}
- return __cmd_annotate(&annotate);
+ ret = __cmd_annotate(&annotate);
+
+out_delete:
+ /*
+ * Speed up the exit process, for large files this can
+ * take quite a while.
+ *
+ * XXX Enable this when using valgrind or if we ever
+ * librarize this command.
+ *
+ * Also experiment with obstacks to see how much speed
+ * up we'll get here.
+ *
+ * perf_session__delete(session);
+ */
+ return ret;
}
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 2a2c78f80876..70385756da63 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -246,20 +246,9 @@ static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
return true;
}
-static int build_id_cache__fprintf_missing(const char *filename, bool force, FILE *fp)
+static int build_id_cache__fprintf_missing(struct perf_session *session, FILE *fp)
{
- struct perf_data_file file = {
- .path = filename,
- .mode = PERF_DATA_MODE_READ,
- .force = force,
- };
- struct perf_session *session = perf_session__new(&file, false, NULL);
- if (session == NULL)
- return -1;
-
perf_session__fprintf_dsos_buildid(session, fp, dso__missing_buildid_cache, 0);
- perf_session__delete(session);
-
return 0;
}
@@ -302,6 +291,12 @@ int cmd_buildid_cache(int argc, const char **argv,
*missing_filename = NULL,
*update_name_list_str = NULL,
*kcore_filename;
+ char sbuf[STRERR_BUFSIZE];
+
+ struct perf_data_file file = {
+ .mode = PERF_DATA_MODE_READ,
+ };
+ struct perf_session *session = NULL;
const struct option buildid_cache_options[] = {
OPT_STRING('a', "add", &add_name_list_str,
@@ -326,8 +321,17 @@ int cmd_buildid_cache(int argc, const char **argv,
argc = parse_options(argc, argv, buildid_cache_options,
buildid_cache_usage, 0);
- if (symbol__init() < 0)
- return -1;
+ if (missing_filename) {
+ file.path = missing_filename;
+ file.force = force;
+
+ session = perf_session__new(&file, false, NULL);
+ if (session == NULL)
+ return -1;
+ }
+
+ if (symbol__init(session ? &session->header.env : NULL) < 0)
+ goto out;
setup_pager();
@@ -344,7 +348,7 @@ int cmd_buildid_cache(int argc, const char **argv,
continue;
}
pr_warning("Couldn't add %s: %s\n",
- pos->s, strerror(errno));
+ pos->s, strerror_r(errno, sbuf, sizeof(sbuf)));
}
strlist__delete(list);
@@ -362,7 +366,7 @@ int cmd_buildid_cache(int argc, const char **argv,
continue;
}
pr_warning("Couldn't remove %s: %s\n",
- pos->s, strerror(errno));
+ pos->s, strerror_r(errno, sbuf, sizeof(sbuf)));
}
strlist__delete(list);
@@ -370,7 +374,7 @@ int cmd_buildid_cache(int argc, const char **argv,
}
if (missing_filename)
- ret = build_id_cache__fprintf_missing(missing_filename, force, stdout);
+ ret = build_id_cache__fprintf_missing(session, stdout);
if (update_name_list_str) {
list = strlist__new(true, update_name_list_str);
@@ -383,7 +387,7 @@ int cmd_buildid_cache(int argc, const char **argv,
continue;
}
pr_warning("Couldn't update %s: %s\n",
- pos->s, strerror(errno));
+ pos->s, strerror_r(errno, sbuf, sizeof(sbuf)));
}
strlist__delete(list);
@@ -394,5 +398,9 @@ int cmd_buildid_cache(int argc, const char **argv,
build_id_cache__add_kcore(kcore_filename, debugdir, force))
pr_warning("Couldn't add %s\n", kcore_filename);
+out:
+ if (session)
+ perf_session__delete(session);
+
return ret;
}
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 9a5a035cb426..a3ce19f7aebd 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -360,7 +360,7 @@ static struct perf_tool tool = {
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.lost = perf_event__process_lost,
- .ordered_samples = true,
+ .ordered_events = true,
.ordering_requires_timestamps = true,
};
@@ -683,7 +683,7 @@ static int __cmd_diff(void)
d->session = perf_session__new(&d->file, false, &tool);
if (!d->session) {
pr_err("Failed to open %s\n", d->file.path);
- ret = -ENOMEM;
+ ret = -1;
goto out_delete;
}
@@ -1143,7 +1143,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
argc = parse_options(argc, argv, options, diff_usage, 0);
- if (symbol__init() < 0)
+ if (symbol__init(NULL) < 0)
return -1;
if (data_init(argc, argv) < 0)
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index 66e12f55c052..0f93f859b782 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -28,7 +28,7 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
session = perf_session__new(&file, 0, NULL);
if (session == NULL)
- return -ENOMEM;
+ return -1;
evlist__for_each(session->evlist, pos)
perf_evsel__fprintf(pos, details, stdout);
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 0384d930480b..25d20628212e 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -103,6 +103,8 @@ static int check_emacsclient_version(void)
static void exec_woman_emacs(const char *path, const char *page)
{
+ char sbuf[STRERR_BUFSIZE];
+
if (!check_emacsclient_version()) {
/* This works only with emacsclient version >= 22. */
struct strbuf man_page = STRBUF_INIT;
@@ -111,16 +113,19 @@ static void exec_woman_emacs(const char *path, const char *page)
path = "emacsclient";
strbuf_addf(&man_page, "(woman \"%s\")", page);
execlp(path, "emacsclient", "-e", man_page.buf, NULL);
- warning("failed to exec '%s': %s", path, strerror(errno));
+ warning("failed to exec '%s': %s", path,
+ strerror_r(errno, sbuf, sizeof(sbuf)));
}
}
static void exec_man_konqueror(const char *path, const char *page)
{
const char *display = getenv("DISPLAY");
+
if (display && *display) {
struct strbuf man_page = STRBUF_INIT;
const char *filename = "kfmclient";
+ char sbuf[STRERR_BUFSIZE];
/* It's simpler to launch konqueror using kfmclient. */
if (path) {
@@ -139,24 +144,31 @@ static void exec_man_konqueror(const char *path, const char *page)
path = "kfmclient";
strbuf_addf(&man_page, "man:%s(1)", page);
execlp(path, filename, "newTab", man_page.buf, NULL);
- warning("failed to exec '%s': %s", path, strerror(errno));
+ warning("failed to exec '%s': %s", path,
+ strerror_r(errno, sbuf, sizeof(sbuf)));
}
}
static void exec_man_man(const char *path, const char *page)
{
+ char sbuf[STRERR_BUFSIZE];
+
if (!path)
path = "man";
execlp(path, "man", page, NULL);
- warning("failed to exec '%s': %s", path, strerror(errno));
+ warning("failed to exec '%s': %s", path,
+ strerror_r(errno, sbuf, sizeof(sbuf)));
}
static void exec_man_cmd(const char *cmd, const char *page)
{
struct strbuf shell_cmd = STRBUF_INIT;
+ char sbuf[STRERR_BUFSIZE];
+
strbuf_addf(&shell_cmd, "%s %s", cmd, page);
execl("/bin/sh", "sh", "-c", shell_cmd.buf, NULL);
- warning("failed to exec '%s': %s", cmd, strerror(errno));
+ warning("failed to exec '%s': %s", cmd,
+ strerror_r(errno, sbuf, sizeof(sbuf)));
}
static void add_man_viewer(const char *name)
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 9a02807387d6..de99ca1bb942 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -23,6 +23,7 @@
struct perf_inject {
struct perf_tool tool;
+ struct perf_session *session;
bool build_ids;
bool sched_stat;
const char *input_name;
@@ -340,12 +341,8 @@ static int perf_evsel__check_stype(struct perf_evsel *evsel,
static int __cmd_inject(struct perf_inject *inject)
{
- struct perf_session *session;
int ret = -EINVAL;
- struct perf_data_file file = {
- .path = inject->input_name,
- .mode = PERF_DATA_MODE_READ,
- };
+ struct perf_session *session = inject->session;
struct perf_data_file *file_out = &inject->output;
signal(SIGINT, sig_handler);
@@ -357,16 +354,12 @@ static int __cmd_inject(struct perf_inject *inject)
inject->tool.tracing_data = perf_event__repipe_tracing_data;
}
- session = perf_session__new(&file, true, &inject->tool);
- if (session == NULL)
- return -ENOMEM;
-
if (inject->build_ids) {
inject->tool.sample = perf_event__inject_buildid;
} else if (inject->sched_stat) {
struct perf_evsel *evsel;
- inject->tool.ordered_samples = true;
+ inject->tool.ordered_events = true;
evlist__for_each(session->evlist, evsel) {
const char *name = perf_evsel__name(evsel);
@@ -396,8 +389,6 @@ static int __cmd_inject(struct perf_inject *inject)
perf_session__write_header(session, session->evlist, file_out->fd, true);
}
- perf_session__delete(session);
-
return ret;
}
@@ -427,6 +418,11 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
.mode = PERF_DATA_MODE_WRITE,
},
};
+ struct perf_data_file file = {
+ .mode = PERF_DATA_MODE_READ,
+ };
+ int ret;
+
const struct option options[] = {
OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
"Inject build-ids into the output stream"),
@@ -461,8 +457,17 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
return -1;
}
- if (symbol__init() < 0)
+ file.path = inject.input_name;
+ inject.session = perf_session__new(&file, true, &inject.tool);
+ if (inject.session == NULL)
+ return -1;
+
+ if (symbol__init(&inject.session->header.env) < 0)
return -1;
- return __cmd_inject(&inject);
+ ret = __cmd_inject(&inject);
+
+ perf_session__delete(inject.session);
+
+ return ret;
}
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index bef3376bfaf3..f295141025bc 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -256,7 +256,9 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
static struct perf_tool perf_kmem = {
.sample = process_sample_event,
.comm = perf_event__process_comm,
- .ordered_samples = true,
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ .ordered_events = true,
};
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
@@ -403,10 +405,9 @@ static void sort_result(void)
__sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
}
-static int __cmd_kmem(void)
+static int __cmd_kmem(struct perf_session *session)
{
int err = -EINVAL;
- struct perf_session *session;
const struct perf_evsel_str_handler kmem_tracepoints[] = {
{ "kmem:kmalloc", perf_evsel__process_alloc_event, },
{ "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
@@ -415,34 +416,22 @@ static int __cmd_kmem(void)
{ "kmem:kfree", perf_evsel__process_free_event, },
{ "kmem:kmem_cache_free", perf_evsel__process_free_event, },
};
- struct perf_data_file file = {
- .path = input_name,
- .mode = PERF_DATA_MODE_READ,
- };
-
- session = perf_session__new(&file, false, &perf_kmem);
- if (session == NULL)
- return -ENOMEM;
-
- if (perf_session__create_kernel_maps(session) < 0)
- goto out_delete;
if (!perf_session__has_traces(session, "kmem record"))
- goto out_delete;
+ goto out;
if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
pr_err("Initializing perf session tracepoint handlers failed\n");
- return -1;
+ goto out;
}
setup_pager();
err = perf_session__process_events(session, &perf_kmem);
if (err != 0)
- goto out_delete;
+ goto out;
sort_result();
print_result(session);
-out_delete:
- perf_session__delete(session);
+out:
return err;
}
@@ -689,29 +678,46 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
NULL,
NULL
};
+ struct perf_session *session;
+ struct perf_data_file file = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ };
+ int ret = -1;
+
argc = parse_options_subcommand(argc, argv, kmem_options,
kmem_subcommands, kmem_usage, 0);
if (!argc)
usage_with_options(kmem_usage, kmem_options);
- symbol__init();
-
if (!strncmp(argv[0], "rec", 3)) {
+ symbol__init(NULL);
return __cmd_record(argc, argv);
- } else if (!strcmp(argv[0], "stat")) {
+ }
+
+ session = perf_session__new(&file, false, &perf_kmem);
+ if (session == NULL)
+ return -1;
+
+ symbol__init(&session->header.env);
+
+ if (!strcmp(argv[0], "stat")) {
if (cpu__setup_cpunode_map())
- return -1;
+ goto out_delete;
if (list_empty(&caller_sort))
setup_sorting(&caller_sort, default_sort_order);
if (list_empty(&alloc_sort))
setup_sorting(&alloc_sort, default_sort_order);
- return __cmd_kmem();
+ ret = __cmd_kmem(session);
} else
usage_with_options(kmem_usage, kmem_options);
- return 0;
+out_delete:
+ perf_session__delete(session);
+
+ return ret;
}
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 43367eb00510..d8bf2271f4ea 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -543,14 +543,12 @@ static void print_vcpu_info(struct perf_kvm_stat *kvm)
pr_info("Analyze events for ");
- if (kvm->live) {
- if (kvm->opts.target.system_wide)
- pr_info("all VMs, ");
- else if (kvm->opts.target.pid)
- pr_info("pid(s) %s, ", kvm->opts.target.pid);
- else
- pr_info("dazed and confused on what is monitored, ");
- }
+ if (kvm->opts.target.system_wide)
+ pr_info("all VMs, ");
+ else if (kvm->opts.target.pid)
+ pr_info("pid(s) %s, ", kvm->opts.target.pid);
+ else
+ pr_info("dazed and confused on what is monitored, ");
if (vcpu == -1)
pr_info("all VCPUs:\n\n");
@@ -592,8 +590,8 @@ static void print_result(struct perf_kvm_stat *kvm)
pr_info("%9s ", "Samples%");
pr_info("%9s ", "Time%");
- pr_info("%10s ", "Min Time");
- pr_info("%10s ", "Max Time");
+ pr_info("%11s ", "Min Time");
+ pr_info("%11s ", "Max Time");
pr_info("%16s ", "Avg time");
pr_info("\n\n");
@@ -610,8 +608,8 @@ static void print_result(struct perf_kvm_stat *kvm)
pr_info("%10llu ", (unsigned long long)ecount);
pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
- pr_info("%8" PRIu64 "us ", min / 1000);
- pr_info("%8" PRIu64 "us ", max / 1000);
+ pr_info("%9.2fus ", (double)min / 1e3);
+ pr_info("%9.2fus ", (double)max / 1e3);
pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount/1e3,
kvm_event_rel_stddev(vcpu, event));
pr_info("\n");
@@ -732,7 +730,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
return -1;
}
- err = perf_session_queue_event(kvm->session, event, &sample, 0);
+ err = perf_session_queue_event(kvm->session, event, &kvm->tool, &sample, 0);
/*
* FIXME: Here we can't consume the event, as perf_session_queue_event will
* point to it, and it'll get possibly overwritten by the kernel.
@@ -785,7 +783,7 @@ static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm)
/* flush queue after each round in which we processed events */
if (ntotal) {
- kvm->session->ordered_samples.next_flush = flush_time;
+ kvm->session->ordered_events.next_flush = flush_time;
err = kvm->tool.finished_round(&kvm->tool, NULL, kvm->session);
if (err) {
if (kvm->lost_events)
@@ -885,15 +883,11 @@ static int fd_set_nonblock(int fd)
return 0;
}
-static
-int perf_kvm__handle_stdin(struct termios *tc_now, struct termios *tc_save)
+static int perf_kvm__handle_stdin(void)
{
int c;
- tcsetattr(0, TCSANOW, tc_now);
c = getc(stdin);
- tcsetattr(0, TCSAFLUSH, tc_save);
-
if (c == 'q')
return 1;
@@ -904,7 +898,7 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
{
struct pollfd *pollfds = NULL;
int nr_fds, nr_stdin, ret, err = -EINVAL;
- struct termios tc, save;
+ struct termios save;
/* live flag must be set first */
kvm->live = true;
@@ -919,26 +913,14 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
goto out;
}
+ set_term_quiet_input(&save);
init_kvm_event_record(kvm);
- tcgetattr(0, &save);
- tc = save;
- tc.c_lflag &= ~(ICANON | ECHO);
- tc.c_cc[VMIN] = 0;
- tc.c_cc[VTIME] = 0;
-
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
- /* copy pollfds -- need to add timerfd and stdin */
- nr_fds = kvm->evlist->nr_fds;
- pollfds = zalloc(sizeof(struct pollfd) * (nr_fds + 2));
- if (!pollfds) {
- err = -ENOMEM;
- goto out;
- }
- memcpy(pollfds, kvm->evlist->pollfd,
- sizeof(struct pollfd) * kvm->evlist->nr_fds);
+ /* use pollfds -- need to add timerfd and stdin */
+ nr_fds = kvm->evlist->pollfd.nr;
/* add timer fd */
if (perf_kvm__timerfd_create(kvm) < 0) {
@@ -946,17 +928,21 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
goto out;
}
- pollfds[nr_fds].fd = kvm->timerfd;
- pollfds[nr_fds].events = POLLIN;
+ if (perf_evlist__add_pollfd(kvm->evlist, kvm->timerfd))
+ goto out;
+
nr_fds++;
- pollfds[nr_fds].fd = fileno(stdin);
- pollfds[nr_fds].events = POLLIN;
+ if (perf_evlist__add_pollfd(kvm->evlist, fileno(stdin)))
+ goto out;
+
nr_stdin = nr_fds;
nr_fds++;
if (fd_set_nonblock(fileno(stdin)) != 0)
goto out;
+ pollfds = kvm->evlist->pollfd.entries;
+
/* everything is good - enable the events and process */
perf_evlist__enable(kvm->evlist);
@@ -972,7 +958,7 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
goto out;
if (pollfds[nr_stdin].revents & POLLIN)
- done = perf_kvm__handle_stdin(&tc, &save);
+ done = perf_kvm__handle_stdin();
if (!rc && !done)
err = poll(pollfds, nr_fds, 100);
@@ -989,7 +975,7 @@ out:
if (kvm->timerfd >= 0)
close(kvm->timerfd);
- free(pollfds);
+ tcsetattr(0, TCSAFLUSH, &save);
return err;
}
@@ -998,6 +984,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
int err, rc = -1;
struct perf_evsel *pos;
struct perf_evlist *evlist = kvm->evlist;
+ char sbuf[STRERR_BUFSIZE];
perf_evlist__config(evlist, &kvm->opts);
@@ -1034,12 +1021,14 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
err = perf_evlist__open(evlist);
if (err < 0) {
- printf("Couldn't create the events: %s\n", strerror(errno));
+ printf("Couldn't create the events: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out;
}
if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages, false) < 0) {
- ui__error("Failed to mmap the events: %s\n", strerror(errno));
+ ui__error("Failed to mmap the events: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
perf_evlist__close(evlist);
goto out;
}
@@ -1058,7 +1047,7 @@ static int read_events(struct perf_kvm_stat *kvm)
struct perf_tool eops = {
.sample = process_sample_event,
.comm = perf_event__process_comm,
- .ordered_samples = true,
+ .ordered_events = true,
};
struct perf_data_file file = {
.path = kvm->file_name,
@@ -1069,9 +1058,11 @@ static int read_events(struct perf_kvm_stat *kvm)
kvm->session = perf_session__new(&file, false, &kvm->tool);
if (!kvm->session) {
pr_err("Initializing perf session failed\n");
- return -EINVAL;
+ return -1;
}
+ symbol__init(&kvm->session->header.env);
+
if (!perf_session__has_traces(kvm->session, "kvm record"))
return -EINVAL;
@@ -1088,8 +1079,8 @@ static int read_events(struct perf_kvm_stat *kvm)
static int parse_target_str(struct perf_kvm_stat *kvm)
{
- if (kvm->pid_str) {
- kvm->pid_list = intlist__new(kvm->pid_str);
+ if (kvm->opts.target.pid) {
+ kvm->pid_list = intlist__new(kvm->opts.target.pid);
if (kvm->pid_list == NULL) {
pr_err("Error parsing process id string\n");
return -EINVAL;
@@ -1191,7 +1182,7 @@ kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
"key for sorting: sample(sort by samples number)"
" time (sort by avg time)"),
- OPT_STRING('p', "pid", &kvm->pid_str, "pid",
+ OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
"analyze events only for given process id(s)"),
OPT_END()
};
@@ -1201,8 +1192,6 @@ kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
NULL
};
- symbol__init();
-
if (argc) {
argc = parse_options(argc, argv,
kvm_events_report_options,
@@ -1212,6 +1201,9 @@ kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
kvm_events_report_options);
}
+ if (!kvm->opts.target.pid)
+ kvm->opts.target.system_wide = true;
+
return kvm_events_report_vcpu(kvm);
}
@@ -1311,7 +1303,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
kvm->tool.exit = perf_event__process_exit;
kvm->tool.fork = perf_event__process_fork;
kvm->tool.lost = process_lost_event;
- kvm->tool.ordered_samples = true;
+ kvm->tool.ordered_events = true;
perf_tool__fill_defaults(&kvm->tool);
/* set defaults */
@@ -1322,7 +1314,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
kvm->opts.target.uid_str = NULL;
kvm->opts.target.uid = UINT_MAX;
- symbol__init();
+ symbol__init(NULL);
disable_buildid_cache();
use_browser = 0;
@@ -1369,7 +1361,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
*/
kvm->session = perf_session__new(&file, false, &kvm->tool);
if (kvm->session == NULL) {
- err = -ENOMEM;
+ err = -1;
goto out;
}
kvm->session->evlist = kvm->evlist;
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 6148afc995c6..e7ec71589da6 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -852,7 +852,7 @@ static int __cmd_report(bool display_info)
struct perf_tool eops = {
.sample = process_sample_event,
.comm = perf_event__process_comm,
- .ordered_samples = true,
+ .ordered_events = true,
};
struct perf_data_file file = {
.path = input_name,
@@ -862,9 +862,11 @@ static int __cmd_report(bool display_info)
session = perf_session__new(&file, false, &eops);
if (!session) {
pr_err("Initializing perf session failed\n");
- return -ENOMEM;
+ return -1;
}
+ symbol__init(&session->header.env);
+
if (!perf_session__has_traces(session, "lock record"))
goto out_delete;
@@ -974,7 +976,6 @@ int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused)
unsigned int i;
int rc = 0;
- symbol__init();
for (i = 0; i < LOCKHASH_SIZE; i++)
INIT_LIST_HEAD(lockhash_table + i);
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 4a1a6c94a5eb..24db6ffe2957 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -124,7 +124,7 @@ static int report_raw_events(struct perf_mem *mem)
&mem->tool);
if (session == NULL)
- return -ENOMEM;
+ return -1;
if (mem->cpu_list) {
ret = perf_session__cpu_bitmap(session, mem->cpu_list,
@@ -133,7 +133,7 @@ static int report_raw_events(struct perf_mem *mem)
goto out_delete;
}
- if (symbol__init() < 0)
+ if (symbol__init(&session->header.env) < 0)
return -1;
printf("# PID, TID, IP, ADDR, LOCAL WEIGHT, DSRC, SYMBOL\n");
@@ -194,7 +194,7 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
.lost = perf_event__process_lost,
.fork = perf_event__process_fork,
.build_id = perf_event__process_build_id,
- .ordered_samples = true,
+ .ordered_events = true,
},
.input_name = "perf.data",
};
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index c63fa2925075..04412b4770a2 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -290,8 +290,11 @@ static void cleanup_params(void)
static void pr_err_with_code(const char *msg, int err)
{
+ char sbuf[STRERR_BUFSIZE];
+
pr_err("%s", msg);
- pr_debug(" Reason: %s (Code: %d)", strerror(-err), err);
+ pr_debug(" Reason: %s (Code: %d)",
+ strerror_r(-err, sbuf, sizeof(sbuf)), err);
pr_err("\n");
}
@@ -373,6 +376,8 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
"target executable name or path", opt_set_target),
OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
"Disable symbol demangling"),
+ OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
+ "Enable kernel symbol demangling"),
OPT_END()
};
int ret;
@@ -467,7 +472,8 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
usage_with_options(probe_usage, options);
}
- ret = show_line_range(&params.line_range, params.target);
+ ret = show_line_range(&params.line_range, params.target,
+ params.uprobes);
if (ret < 0)
pr_err_with_code(" Error: Failed to show lines.", ret);
return ret;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 4869050e7194..44c6f3d55ce7 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -65,8 +65,9 @@ static int process_synthesized_event(struct perf_tool *tool,
return record__write(rec, event, event->header.size);
}
-static int record__mmap_read(struct record *rec, struct perf_mmap *md)
+static int record__mmap_read(struct record *rec, int idx)
{
+ struct perf_mmap *md = &rec->evlist->mmap[idx];
unsigned int head = perf_mmap__read_head(md);
unsigned int old = md->prev;
unsigned char *data = md->base + page_size;
@@ -102,8 +103,7 @@ static int record__mmap_read(struct record *rec, struct perf_mmap *md)
}
md->prev = old;
- perf_mmap__write_tail(md, old);
-
+ perf_evlist__mmap_consume(rec->evlist, idx);
out:
return rc;
}
@@ -161,7 +161,7 @@ try_again:
if (perf_evlist__apply_filters(evlist)) {
error("failed to set filter with %d (%s)\n", errno,
- strerror(errno));
+ strerror_r(errno, msg, sizeof(msg)));
rc = -1;
goto out;
}
@@ -175,7 +175,8 @@ try_again:
"(current value: %u)\n", opts->mmap_pages);
rc = -errno;
} else {
- pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
+ pr_err("failed to mmap with %d (%s)\n", errno,
+ strerror_r(errno, msg, sizeof(msg)));
rc = -errno;
}
goto out;
@@ -244,7 +245,7 @@ static int record__mmap_read_all(struct record *rec)
for (i = 0; i < rec->evlist->nr_mmaps; i++) {
if (rec->evlist->mmap[i].base) {
- if (record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
+ if (record__mmap_read(rec, i) != 0) {
rc = -1;
goto out;
}
@@ -307,7 +308,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
struct record_opts *opts = &rec->opts;
struct perf_data_file *file = &rec->file;
struct perf_session *session;
- bool disabled = false;
+ bool disabled = false, draining = false;
rec->progname = argv[0];
@@ -456,9 +457,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
}
if (hits == rec->samples) {
- if (done)
+ if (done || draining)
break;
- err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1);
+ err = perf_evlist__poll(rec->evlist, -1);
/*
* Propagate error, only if there's any. Ignore positive
* number of returned events and interrupt error.
@@ -466,6 +467,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
if (err > 0 || (err < 0 && errno == EINTR))
err = 0;
waking++;
+
+ if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
+ draining = true;
}
/*
@@ -480,7 +484,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
}
if (forks && workload_exec_errno) {
- char msg[512];
+ char msg[STRERR_BUFSIZE];
const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
pr_err("Workload failed: %s\n", emsg);
err = -1;
@@ -620,145 +624,56 @@ error:
return ret;
}
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-static int get_stack_size(char *str, unsigned long *_size)
-{
- char *endptr;
- unsigned long size;
- unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
-
- size = strtoul(str, &endptr, 0);
-
- do {
- if (*endptr)
- break;
-
- size = round_up(size, sizeof(u64));
- if (!size || size > max_size)
- break;
-
- *_size = size;
- return 0;
-
- } while (0);
-
- pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
- max_size, str);
- return -1;
-}
-#endif /* HAVE_DWARF_UNWIND_SUPPORT */
-
-int record_parse_callchain(const char *arg, struct record_opts *opts)
-{
- char *tok, *name, *saveptr = NULL;
- char *buf;
- int ret = -1;
-
- /* We need buffer that we know we can write to. */
- buf = malloc(strlen(arg) + 1);
- if (!buf)
- return -ENOMEM;
-
- strcpy(buf, arg);
-
- tok = strtok_r((char *)buf, ",", &saveptr);
- name = tok ? : (char *)buf;
-
- do {
- /* Framepointer style */
- if (!strncmp(name, "fp", sizeof("fp"))) {
- if (!strtok_r(NULL, ",", &saveptr)) {
- opts->call_graph = CALLCHAIN_FP;
- ret = 0;
- } else
- pr_err("callchain: No more arguments "
- "needed for -g fp\n");
- break;
-
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
- /* Dwarf style */
- } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
- const unsigned long default_stack_dump_size = 8192;
-
- ret = 0;
- opts->call_graph = CALLCHAIN_DWARF;
- opts->stack_dump_size = default_stack_dump_size;
-
- tok = strtok_r(NULL, ",", &saveptr);
- if (tok) {
- unsigned long size = 0;
-
- ret = get_stack_size(tok, &size);
- opts->stack_dump_size = size;
- }
-#endif /* HAVE_DWARF_UNWIND_SUPPORT */
- } else {
- pr_err("callchain: Unknown --call-graph option "
- "value: %s\n", arg);
- break;
- }
-
- } while (0);
-
- free(buf);
- return ret;
-}
-
-static void callchain_debug(struct record_opts *opts)
+static void callchain_debug(void)
{
static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };
- pr_debug("callchain: type %s\n", str[opts->call_graph]);
+ pr_debug("callchain: type %s\n", str[callchain_param.record_mode]);
- if (opts->call_graph == CALLCHAIN_DWARF)
+ if (callchain_param.record_mode == CALLCHAIN_DWARF)
pr_debug("callchain: stack dump size %d\n",
- opts->stack_dump_size);
+ callchain_param.dump_size);
}
-int record_parse_callchain_opt(const struct option *opt,
+int record_parse_callchain_opt(const struct option *opt __maybe_unused,
const char *arg,
int unset)
{
- struct record_opts *opts = opt->value;
int ret;
- opts->call_graph_enabled = !unset;
+ callchain_param.enabled = !unset;
/* --no-call-graph */
if (unset) {
- opts->call_graph = CALLCHAIN_NONE;
+ callchain_param.record_mode = CALLCHAIN_NONE;
pr_debug("callchain: disabled\n");
return 0;
}
- ret = record_parse_callchain(arg, opts);
+ ret = parse_callchain_record_opt(arg);
if (!ret)
- callchain_debug(opts);
+ callchain_debug();
return ret;
}
-int record_callchain_opt(const struct option *opt,
+int record_callchain_opt(const struct option *opt __maybe_unused,
const char *arg __maybe_unused,
int unset __maybe_unused)
{
- struct record_opts *opts = opt->value;
+ callchain_param.enabled = true;
- opts->call_graph_enabled = !unset;
+ if (callchain_param.record_mode == CALLCHAIN_NONE)
+ callchain_param.record_mode = CALLCHAIN_FP;
- if (opts->call_graph == CALLCHAIN_NONE)
- opts->call_graph = CALLCHAIN_FP;
-
- callchain_debug(opts);
+ callchain_debug();
return 0;
}
static int perf_record_config(const char *var, const char *value, void *cb)
{
- struct record *rec = cb;
-
if (!strcmp(var, "record.call-graph"))
- return record_parse_callchain(value, &rec->opts);
+ var = "call-graph.record-mode"; /* fall-through */
return perf_default_config(var, value, cb);
}
@@ -781,6 +696,7 @@ static const char * const record_usage[] = {
*/
static struct record record = {
.opts = {
+ .sample_time = true,
.mmap_pages = UINT_MAX,
.user_freq = UINT_MAX,
.user_interval = ULLONG_MAX,
@@ -907,7 +823,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
usage_with_options(record_usage, record_options);
}
- symbol__init();
+ symbol__init(NULL);
if (symbol_conf.kptr_restrict)
pr_warning(
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 21d830bafff3..ac145fae0521 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -58,17 +58,19 @@ struct report {
const char *symbol_filter_str;
float min_percent;
u64 nr_entries;
+ u64 queue_size;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
};
static int report__config(const char *var, const char *value, void *cb)
{
+ struct report *rep = cb;
+
if (!strcmp(var, "report.group")) {
symbol_conf.event_group = perf_config_bool(var, value);
return 0;
}
if (!strcmp(var, "report.percent-limit")) {
- struct report *rep = cb;
rep->min_percent = strtof(value, NULL);
return 0;
}
@@ -76,6 +78,10 @@ static int report__config(const char *var, const char *value, void *cb)
symbol_conf.cumulate_callchain = perf_config_bool(var, value);
return 0;
}
+ if (!strcmp(var, "report.queue-size")) {
+ rep->queue_size = perf_config_u64(var, value);
+ return 0;
+ }
return perf_default_config(var, value, cb);
}
@@ -578,7 +584,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
.attr = perf_event__process_attr,
.tracing_data = perf_event__process_tracing_data,
.build_id = perf_event__process_build_id,
- .ordered_samples = true,
+ .ordered_events = true,
.ordering_requires_timestamps = true,
},
.max_stack = PERF_MAX_STACK_DEPTH,
@@ -674,6 +680,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
"objdump binary to use for disassembly and annotations"),
OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
"Disable symbol demangling"),
+ OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
+ "Enable kernel symbol demangling"),
OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
OPT_CALLBACK(0, "percent-limit", &report, "percent",
"Don't show entries under that percent", parse_percent_limit),
@@ -712,14 +720,19 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
repeat:
session = perf_session__new(&file, false, &report.tool);
if (session == NULL)
- return -ENOMEM;
+ return -1;
+
+ if (report.queue_size) {
+ ordered_events__set_alloc_size(&session->ordered_events,
+ report.queue_size);
+ }
report.session = session;
has_br_stack = perf_header__has_feat(&session->header,
HEADER_BRANCH_STACK);
- if (branch_mode == -1 && has_br_stack) {
+ if ((branch_mode == -1 && has_br_stack) || branch_mode == 1) {
sort__mode = SORT_MODE__BRANCH;
symbol_conf.cumulate_callchain = false;
}
@@ -787,7 +800,7 @@ repeat:
}
}
- if (symbol__init() < 0)
+ if (symbol__init(&session->header.env) < 0)
goto error;
if (argc) {
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index f83c08c0dd87..9c9287fbf8e9 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -428,6 +428,7 @@ static u64 get_cpu_usage_nsec_parent(void)
static int self_open_counters(void)
{
struct perf_event_attr attr;
+ char sbuf[STRERR_BUFSIZE];
int fd;
memset(&attr, 0, sizeof(attr));
@@ -440,7 +441,8 @@ static int self_open_counters(void)
if (fd < 0)
pr_err("Error: sys_perf_event_open() syscall returned "
- "with %d (%s)\n", fd, strerror(errno));
+ "with %d (%s)\n", fd,
+ strerror_r(errno, sbuf, sizeof(sbuf)));
return fd;
}
@@ -1462,6 +1464,8 @@ static int perf_sched__read_events(struct perf_sched *sched,
return -1;
}
+ symbol__init(&session->header.env);
+
if (perf_session__set_tracepoints_handlers(session, handlers))
goto out_delete;
@@ -1662,7 +1666,7 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
.comm = perf_event__process_comm,
.lost = perf_event__process_lost,
.fork = perf_sched__process_fork_event,
- .ordered_samples = true,
+ .ordered_events = true,
},
.cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
.sort_list = LIST_HEAD_INIT(sched.sort_list),
@@ -1747,7 +1751,6 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
if (!strcmp(argv[0], "script"))
return cmd_script(argc, argv, prefix);
- symbol__init();
if (!strncmp(argv[0], "rec", 3)) {
return __cmd_record(argc, argv);
} else if (!strncmp(argv[0], "lat", 3)) {
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index f57035b89c15..b9b9e58a6c39 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -184,10 +184,6 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
if (perf_evsel__check_stype(evsel, PERF_SAMPLE_IP, "IP",
PERF_OUTPUT_IP))
return -EINVAL;
-
- if (!no_callchain &&
- !(attr->sample_type & PERF_SAMPLE_CALLCHAIN))
- symbol_conf.use_callchain = false;
}
if (PRINT_FIELD(ADDR) &&
@@ -290,6 +286,19 @@ static int perf_session__check_output_opt(struct perf_session *session)
set_print_ip_opts(&evsel->attr);
}
+ if (!no_callchain) {
+ bool use_callchain = false;
+
+ evlist__for_each(session->evlist, evsel) {
+ if (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
+ use_callchain = true;
+ break;
+ }
+ }
+ if (!use_callchain)
+ symbol_conf.use_callchain = false;
+ }
+
/*
* set default for tracepoints to print symbols only
* if callchains are present
@@ -476,6 +485,11 @@ static int default_start_script(const char *script __maybe_unused,
return 0;
}
+static int default_flush_script(void)
+{
+ return 0;
+}
+
static int default_stop_script(void)
{
return 0;
@@ -489,6 +503,7 @@ static int default_generate_script(struct pevent *pevent __maybe_unused,
static struct scripting_ops default_scripting_ops = {
.start_script = default_start_script,
+ .flush_script = default_flush_script,
.stop_script = default_stop_script,
.process_event = process_event,
.generate_script = default_generate_script,
@@ -504,6 +519,11 @@ static void setup_scripting(void)
scripting_ops = &default_scripting_ops;
}
+static int flush_scripting(void)
+{
+ return scripting_ops->flush_script();
+}
+
static int cleanup_scripting(void)
{
pr_debug("\nperf script stopped\n");
@@ -1471,12 +1491,13 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
bool show_full_info = false;
bool header = false;
bool header_only = false;
+ bool script_started = false;
char *rec_script_path = NULL;
char *rep_script_path = NULL;
struct perf_session *session;
char *script_path = NULL;
const char **__argv;
- int i, j, err;
+ int i, j, err = 0;
struct perf_script script = {
.tool = {
.sample = process_sample_event,
@@ -1488,7 +1509,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
.attr = process_attr,
.tracing_data = perf_event__process_tracing_data,
.build_id = perf_event__process_build_id,
- .ordered_samples = true,
+ .ordered_events = true,
.ordering_requires_timestamps = true,
},
};
@@ -1718,26 +1739,28 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
exit(-1);
}
- if (symbol__init() < 0)
- return -1;
if (!script_name)
setup_pager();
session = perf_session__new(&file, false, &script.tool);
if (session == NULL)
- return -ENOMEM;
+ return -1;
if (header || header_only) {
perf_session__fprintf_info(session, stdout, show_full_info);
if (header_only)
- return 0;
+ goto out_delete;
}
+ if (symbol__init(&session->header.env) < 0)
+ goto out_delete;
+
script.session = session;
if (cpu_list) {
- if (perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap))
- return -1;
+ err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
+ if (err < 0)
+ goto out_delete;
}
if (!no_callchain)
@@ -1752,53 +1775,62 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
if (output_set_by_user()) {
fprintf(stderr,
"custom fields not supported for generated scripts");
- return -1;
+ err = -EINVAL;
+ goto out_delete;
}
input = open(file.path, O_RDONLY); /* input_name */
if (input < 0) {
+ err = -errno;
perror("failed to open file");
- return -1;
+ goto out_delete;
}
err = fstat(input, &perf_stat);
if (err < 0) {
perror("failed to stat file");
- return -1;
+ goto out_delete;
}
if (!perf_stat.st_size) {
fprintf(stderr, "zero-sized file, nothing to do!\n");
- return 0;
+ goto out_delete;
}
scripting_ops = script_spec__lookup(generate_script_lang);
if (!scripting_ops) {
fprintf(stderr, "invalid language specifier");
- return -1;
+ err = -ENOENT;
+ goto out_delete;
}
err = scripting_ops->generate_script(session->tevent.pevent,
"perf-script");
- goto out;
+ goto out_delete;
}
if (script_name) {
err = scripting_ops->start_script(script_name, argc, argv);
if (err)
- goto out;
+ goto out_delete;
pr_debug("perf script started with script %s\n\n", script_name);
+ script_started = true;
}
err = perf_session__check_output_opt(session);
if (err < 0)
- goto out;
+ goto out_delete;
err = __cmd_script(&script);
+ flush_scripting();
+
+out_delete:
perf_session__delete(session);
- cleanup_scripting();
+
+ if (script_started)
+ cleanup_scripting();
out:
return err;
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 3e80aa10cfd8..b22c62f80078 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -593,7 +593,7 @@ static int __run_perf_stat(int argc, const char **argv)
if (perf_evlist__apply_filters(evsel_list)) {
error("failed to set filter with %d (%s)\n", errno,
- strerror(errno));
+ strerror_r(errno, msg, sizeof(msg)));
return -1;
}
@@ -732,7 +732,7 @@ static void aggr_printout(struct perf_evsel *evsel, int id, int nr)
}
}
-static void nsec_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
+static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
{
double msecs = avg / 1e6;
const char *fmt_v, *fmt_n;
@@ -741,7 +741,7 @@ static void nsec_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
fmt_v = csv_output ? "%.6f%s" : "%18.6f%s";
fmt_n = csv_output ? "%s" : "%-25s";
- aggr_printout(evsel, cpu, nr);
+ aggr_printout(evsel, id, nr);
scnprintf(name, sizeof(name), "%s%s",
perf_evsel__name(evsel), csv_output ? "" : " (msec)");
@@ -947,11 +947,12 @@ static void print_ll_cache_misses(int cpu,
fprintf(output, " of all LL-cache hits ");
}
-static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
+static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
{
double total, ratio = 0.0, total2;
double sc = evsel->scale;
const char *fmt;
+ int cpu = cpu_map__id_to_cpu(id);
if (csv_output) {
fmt = sc != 1.0 ? "%.2f%s" : "%.0f%s";
@@ -962,7 +963,7 @@ static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
fmt = sc != 1.0 ? "%18.2f%s" : "%18.0f%s";
}
- aggr_printout(evsel, cpu, nr);
+ aggr_printout(evsel, id, nr);
if (aggr_mode == AGGR_GLOBAL)
cpu = 0;
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 2f1a5220c090..35b425b6293f 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1605,7 +1605,9 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
int ret = -EINVAL;
if (session == NULL)
- return -ENOMEM;
+ return -1;
+
+ symbol__init(&session->header.env);
(void)perf_header__process_sections(&session->header,
perf_data_file__fd(session->file),
@@ -1920,7 +1922,7 @@ int cmd_timechart(int argc, const char **argv,
.fork = process_fork_event,
.exit = process_exit_event,
.sample = process_sample_event,
- .ordered_samples = true,
+ .ordered_events = true,
},
.proc_num = 15,
.min_time = 1000000,
@@ -1982,8 +1984,6 @@ int cmd_timechart(int argc, const char **argv,
return -1;
}
- symbol__init();
-
if (argc && !strncmp(argv[0], "rec", 3)) {
argc = parse_options(argc, argv, record_options, record_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 377971dc89a3..fc3d55f832ac 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -59,7 +59,7 @@
#include <sys/syscall.h>
#include <sys/ioctl.h>
-#include <sys/poll.h>
+#include <poll.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <sys/uio.h>
@@ -276,11 +276,17 @@ static void perf_top__print_sym_table(struct perf_top *top)
return;
}
+ if (top->zero) {
+ hists__delete_entries(&top->sym_evsel->hists);
+ } else {
+ hists__decay_entries(&top->sym_evsel->hists,
+ top->hide_user_symbols,
+ top->hide_kernel_symbols);
+ }
+
hists__collapse_resort(&top->sym_evsel->hists, NULL);
hists__output_resort(&top->sym_evsel->hists);
- hists__decay_entries(&top->sym_evsel->hists,
- top->hide_user_symbols,
- top->hide_kernel_symbols);
+
hists__output_recalc_col_len(&top->sym_evsel->hists,
top->print_entries - printed);
putchar('\n');
@@ -427,18 +433,13 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
if (!perf_top__key_mapped(top, c)) {
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
- struct termios tc, save;
+ struct termios save;
perf_top__print_mapped_keys(top);
fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
fflush(stdout);
- tcgetattr(0, &save);
- tc = save;
- tc.c_lflag &= ~(ICANON | ECHO);
- tc.c_cc[VMIN] = 0;
- tc.c_cc[VTIME] = 0;
- tcsetattr(0, TCSANOW, &tc);
+ set_term_quiet_input(&save);
poll(&stdin_poll, 1, -1);
c = getc(stdin);
@@ -542,11 +543,16 @@ static void perf_top__sort_new_samples(void *arg)
if (t->evlist->selected != NULL)
t->sym_evsel = t->evlist->selected;
+ if (t->zero) {
+ hists__delete_entries(&t->sym_evsel->hists);
+ } else {
+ hists__decay_entries(&t->sym_evsel->hists,
+ t->hide_user_symbols,
+ t->hide_kernel_symbols);
+ }
+
hists__collapse_resort(&t->sym_evsel->hists, NULL);
hists__output_resort(&t->sym_evsel->hists);
- hists__decay_entries(&t->sym_evsel->hists,
- t->hide_user_symbols,
- t->hide_kernel_symbols);
}
static void *display_thread_tui(void *arg)
@@ -577,23 +583,32 @@ static void *display_thread_tui(void *arg)
return NULL;
}
+static void display_sig(int sig __maybe_unused)
+{
+ done = 1;
+}
+
+static void display_setup_sig(void)
+{
+ signal(SIGSEGV, display_sig);
+ signal(SIGFPE, display_sig);
+ signal(SIGINT, display_sig);
+ signal(SIGQUIT, display_sig);
+ signal(SIGTERM, display_sig);
+}
+
static void *display_thread(void *arg)
{
struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
- struct termios tc, save;
+ struct termios save;
struct perf_top *top = arg;
int delay_msecs, c;
- tcgetattr(0, &save);
- tc = save;
- tc.c_lflag &= ~(ICANON | ECHO);
- tc.c_cc[VMIN] = 0;
- tc.c_cc[VTIME] = 0;
-
+ display_setup_sig();
pthread__unblock_sigwinch();
repeat:
delay_msecs = top->delay_secs * 1000;
- tcsetattr(0, TCSANOW, &tc);
+ set_term_quiet_input(&save);
/* trash return*/
getc(stdin);
@@ -620,13 +635,16 @@ repeat:
}
}
+ tcsetattr(0, TCSAFLUSH, &save);
return NULL;
}
-static int symbol_filter(struct map *map __maybe_unused, struct symbol *sym)
+static int symbol_filter(struct map *map, struct symbol *sym)
{
const char *name = sym->name;
+ if (!map->dso->kernel)
+ return 0;
/*
* ppc64 uses function descriptors and appends a '.' to the
* start of every instruction address. Remove it.
@@ -876,7 +894,7 @@ try_again:
if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
ui__error("Failed to mmap with %d (%s)\n",
- errno, strerror(errno));
+ errno, strerror_r(errno, msg, sizeof(msg)));
goto out_err;
}
@@ -911,7 +929,7 @@ static int __cmd_top(struct perf_top *top)
top->session = perf_session__new(NULL, false, NULL);
if (top->session == NULL)
- return -ENOMEM;
+ return -1;
machines__set_symbol_filter(&top->session->machines, symbol_filter);
@@ -946,7 +964,7 @@ static int __cmd_top(struct perf_top *top)
perf_evlist__enable(top->evlist);
/* Wait for a minimal set of events before starting the snapshot */
- poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
+ perf_evlist__poll(top->evlist, 100);
perf_top__mmap_read(top);
@@ -963,7 +981,7 @@ static int __cmd_top(struct perf_top *top)
param.sched_priority = top->realtime_prio;
if (sched_setscheduler(0, SCHED_FIFO, &param)) {
ui__error("Could not set realtime priority.\n");
- goto out_delete;
+ goto out_join;
}
}
@@ -973,10 +991,12 @@ static int __cmd_top(struct perf_top *top)
perf_top__mmap_read(top);
if (hits == top->samples)
- ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
+ ret = perf_evlist__poll(top->evlist, 100);
}
ret = 0;
+out_join:
+ pthread_join(thread, NULL);
out_delete:
perf_session__delete(top->session);
top->session = NULL;
@@ -1000,10 +1020,8 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset)
static int perf_top_config(const char *var, const char *value, void *cb)
{
- struct perf_top *top = cb;
-
if (!strcmp(var, "top.call-graph"))
- return record_parse_callchain(value, &top->record_opts);
+ var = "call-graph.record-mode"; /* fall-through */
if (!strcmp(var, "top.children")) {
symbol_conf.cumulate_callchain = perf_config_bool(var, value);
return 0;
@@ -1122,6 +1140,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
"Interleave source code with assembly code (default)"),
OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
"Display raw encoding of assembly instructions (default)"),
+ OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
+ "Enable kernel symbol demangling"),
OPT_STRING(0, "objdump", &objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
@@ -1131,6 +1151,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
"Don't show entries under that percent", parse_percent_limit),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
"How to display percentage of filtered entries", parse_filter_percentage),
+ OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
+ "width[,width...]",
+ "don't try to adjust column width, use these fixed values"),
OPT_END()
};
const char * const top_usage[] = {
@@ -1217,7 +1240,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
symbol_conf.priv_size = sizeof(struct annotation);
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
- if (symbol__init() < 0)
+ if (symbol__init(NULL) < 0)
return -1;
sort__setup_elide(stdout);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index a6c375224f46..09bcf2393910 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -402,6 +402,31 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
#define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
+static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ int printed = 0, flags = arg->val;
+
+#define P_MREMAP_FLAG(n) \
+ if (flags & MREMAP_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~MREMAP_##n; \
+ }
+
+ P_MREMAP_FLAG(MAYMOVE);
+#ifdef MREMAP_FIXED
+ P_MREMAP_FLAG(FIXED);
+#endif
+#undef P_MREMAP_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
+
+#define SCA_MREMAP_FLAGS syscall_arg__scnprintf_mremap_flags
+
static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
struct syscall_arg *arg)
{
@@ -1004,6 +1029,7 @@ static struct syscall_fmt {
[2] = SCA_MMAP_PROT, /* prot */ }, },
{ .name = "mremap", .hexret = true,
.arg_scnprintf = { [0] = SCA_HEX, /* addr */
+ [3] = SCA_MREMAP_FLAGS, /* flags */
[4] = SCA_HEX, /* new_addr */ }, },
{ .name = "munlock", .errmsg = true,
.arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
@@ -1385,7 +1411,7 @@ static int trace__tool_process(struct perf_tool *tool,
static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
{
- int err = symbol__init();
+ int err = symbol__init(NULL);
if (err)
return err;
@@ -1669,7 +1695,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
{
- int ret;
+ long ret;
u64 duration = 0;
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample);
@@ -1722,9 +1748,9 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
if (sc->fmt == NULL) {
signed_print:
- fprintf(trace->output, ") = %d", ret);
+ fprintf(trace->output, ") = %ld", ret);
} else if (ret < 0 && sc->fmt->errmsg) {
- char bf[256];
+ char bf[STRERR_BUFSIZE];
const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
*e = audit_errno_to_name(-ret);
@@ -1732,7 +1758,7 @@ signed_print:
} else if (ret == 0 && sc->fmt->timeout)
fprintf(trace->output, ") = 0 Timeout");
else if (sc->fmt->hexret)
- fprintf(trace->output, ") = %#x", ret);
+ fprintf(trace->output, ") = %#lx", ret);
else
goto signed_print;
@@ -2018,6 +2044,8 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
int err = -1, i;
unsigned long before;
const bool forks = argc > 0;
+ bool draining = false;
+ char sbuf[STRERR_BUFSIZE];
trace->live = true;
@@ -2079,7 +2107,8 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
if (err < 0) {
- fprintf(trace->output, "Couldn't mmap the events: %s\n", strerror(errno));
+ fprintf(trace->output, "Couldn't mmap the events: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
@@ -2143,8 +2172,12 @@ next_event:
if (trace->nr_events == before) {
int timeout = done ? 100 : -1;
- if (poll(evlist->pollfd, evlist->nr_fds, timeout) > 0)
+ if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
+ if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0)
+ draining = true;
+
goto again;
+ }
} else {
goto again;
}
@@ -2209,18 +2242,18 @@ static int trace__replay(struct trace *trace)
trace->tool.tracing_data = perf_event__process_tracing_data;
trace->tool.build_id = perf_event__process_build_id;
- trace->tool.ordered_samples = true;
+ trace->tool.ordered_events = true;
trace->tool.ordering_requires_timestamps = true;
/* add tid to output */
trace->multiple_threads = true;
- if (symbol__init() < 0)
- return -1;
-
session = perf_session__new(&file, false, &trace->tool);
if (session == NULL)
- return -ENOMEM;
+ return -1;
+
+ if (symbol__init(&session->header.env) < 0)
+ goto out;
trace->host = &session->machines.host;
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 1f67aa02d240..58f609198c6d 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -48,10 +48,6 @@ ifneq ($(ARCH),$(filter $(ARCH),x86 arm))
NO_LIBDW_DWARF_UNWIND := 1
endif
-ifeq ($(ARCH),powerpc)
- CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX
-endif
-
ifeq ($(LIBUNWIND_LIBS),)
NO_LIBUNWIND := 1
else
@@ -120,6 +116,29 @@ ifdef PARSER_DEBUG
CFLAGS += -DPARSER_DEBUG
endif
+ifndef NO_LIBPYTHON
+ # Try different combinations to accommodate systems that only have
+ # python[2][-config] in weird combinations but always preferring
+ # python2 and python2-config as per pep-0394. If we catch a
+ # python[-config] in version 3, the version check will kill it.
+ PYTHON2 := $(if $(call get-executable,python2),python2,python)
+ override PYTHON := $(call get-executable-or-default,PYTHON,$(PYTHON2))
+ PYTHON2_CONFIG := \
+ $(if $(call get-executable,$(PYTHON)-config),$(PYTHON)-config,python-config)
+ override PYTHON_CONFIG := \
+ $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON2_CONFIG))
+
+ PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG))
+
+ PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
+ PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
+
+ FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
+ FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
+ FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
+ FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
+endif
+
CFLAGS += -fno-omit-frame-pointer
CFLAGS += -ggdb3
CFLAGS += -funwind-tables
@@ -355,6 +374,12 @@ ifndef NO_LIBELF
endif # NO_DWARF
endif # NO_LIBELF
+ifeq ($(ARCH),powerpc)
+ ifndef NO_DWARF
+ CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX
+ endif
+endif
+
ifndef NO_LIBUNWIND
ifneq ($(feature-libunwind), 1)
msg := $(warning No libunwind found. Please install libunwind-dev[el] >= 1.1 and/or set LIBUNWIND_DIR);
@@ -482,21 +507,14 @@ define disable-python_code
NO_LIBPYTHON := 1
endef
-override PYTHON := \
- $(call get-executable-or-default,PYTHON,python)
-
-ifndef PYTHON
- $(call disable-python,python interpreter)
+ifdef NO_LIBPYTHON
+ $(call disable-python)
else
- PYTHON_WORD := $(call shell-wordify,$(PYTHON))
-
- ifdef NO_LIBPYTHON
- $(call disable-python)
+ ifndef PYTHON
+ $(call disable-python,python interpreter)
else
-
- override PYTHON_CONFIG := \
- $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON)-config)
+ PYTHON_WORD := $(call shell-wordify,$(PYTHON))
ifndef PYTHON_CONFIG
$(call disable-python,python-config tool)
@@ -635,11 +653,13 @@ else
sysconfdir = $(prefix)/etc
ETC_PERFCONFIG = etc/perfconfig
endif
+ifndef lib
ifeq ($(IS_X86_64),1)
lib = lib64
else
lib = lib
endif
+endif # lib
libdir = $(prefix)/$(lib)
# Shell quote (do not use $(call) to accommodate ancient setups);
diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile
index 6088f8d8a434..72ab2984718e 100644
--- a/tools/perf/config/feature-checks/Makefile
+++ b/tools/perf/config/feature-checks/Makefile
@@ -101,25 +101,11 @@ FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
test-libperl.bin:
$(BUILD) $(FLAGS_PERL_EMBED)
-override PYTHON := python
-override PYTHON_CONFIG := python-config
-
-escape-for-shell-sq = $(subst ','\'',$(1))
-shell-sq = '$(escape-for-shell-sq)'
-
-PYTHON_CONFIG_SQ = $(call shell-sq,$(PYTHON_CONFIG))
-
-PYTHON_EMBED_LDOPTS = $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
-PYTHON_EMBED_LDFLAGS = $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
-PYTHON_EMBED_LIBADD = $(call grep-libs,$(PYTHON_EMBED_LDOPTS))
-PYTHON_EMBED_CCOPTS = $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
-FLAGS_PYTHON_EMBED = $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
-
test-libpython.bin:
- $(BUILD) $(FLAGS_PYTHON_EMBED)
+ $(BUILD)
test-libpython-version.bin:
- $(BUILD) $(FLAGS_PYTHON_EMBED)
+ $(BUILD)
test-libbfd.bin:
$(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak
index 4d985e0f03f5..7076a62d0ff7 100644
--- a/tools/perf/config/utilities.mak
+++ b/tools/perf/config/utilities.mak
@@ -132,7 +132,7 @@ endef
#
# Usage: bool-value = $(call is-absolute,path)
#
-is-absolute = $(shell echo $(shell-sq) | grep ^/ -q && echo y)
+is-absolute = $(shell echo $(shell-sq) | grep -q ^/ && echo y)
# lookup
#
diff --git a/tools/perf/perf-with-kcore.sh b/tools/perf/perf-with-kcore.sh
new file mode 100644
index 000000000000..c7ff90a90e4e
--- /dev/null
+++ b/tools/perf/perf-with-kcore.sh
@@ -0,0 +1,259 @@
+#!/bin/bash
+# perf-with-kcore: use perf with a copy of kcore
+# Copyright (c) 2014, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+
+set -e
+
+usage()
+{
+ echo "Usage: perf-with-kcore <perf sub-command> <perf.data directory> [<sub-command options> [ -- <workload>]]" >&2
+ echo " <perf sub-command> can be record, script, report or inject" >&2
+ echo " or: perf-with-kcore fix_buildid_cache_permissions" >&2
+ exit 1
+}
+
+find_perf()
+{
+ if [ -n "$PERF" ] ; then
+ return
+ fi
+ PERF=`which perf || true`
+ if [ -z "$PERF" ] ; then
+ echo "Failed to find perf" >&2
+ exit 1
+ fi
+ if [ ! -x "$PERF" ] ; then
+ echo "Failed to find perf" >&2
+ exit 1
+ fi
+ echo "Using $PERF"
+ "$PERF" version
+}
+
+copy_kcore()
+{
+ echo "Copying kcore"
+
+ if [ $EUID -eq 0 ] ; then
+ SUDO=""
+ else
+ SUDO="sudo"
+ fi
+
+ rm -f perf.data.junk
+ ("$PERF" record -o perf.data.junk $PERF_OPTIONS -- sleep 60) >/dev/null 2>/dev/null &
+ PERF_PID=$!
+
+ # Need to make sure that perf has started
+ sleep 1
+
+ KCORE=$(($SUDO "$PERF" buildid-cache -v -f -k /proc/kcore >/dev/null) 2>&1)
+ case "$KCORE" in
+ "kcore added to build-id cache directory "*)
+ KCORE_DIR=${KCORE#"kcore added to build-id cache directory "}
+ ;;
+ *)
+ kill $PERF_PID
+ wait >/dev/null 2>/dev/null || true
+ rm perf.data.junk
+ echo "$KCORE"
+ echo "Failed to find kcore" >&2
+ exit 1
+ ;;
+ esac
+
+ kill $PERF_PID
+ wait >/dev/null 2>/dev/null || true
+ rm perf.data.junk
+
+ $SUDO cp -a "$KCORE_DIR" "$(pwd)/$PERF_DATA_DIR"
+ $SUDO rm -f "$KCORE_DIR/kcore"
+ $SUDO rm -f "$KCORE_DIR/kallsyms"
+ $SUDO rm -f "$KCORE_DIR/modules"
+ $SUDO rmdir "$KCORE_DIR"
+
+ KCORE_DIR_BASENAME=$(basename "$KCORE_DIR")
+ KCORE_DIR="$(pwd)/$PERF_DATA_DIR/$KCORE_DIR_BASENAME"
+
+ $SUDO chown $UID "$KCORE_DIR"
+ $SUDO chown $UID "$KCORE_DIR/kcore"
+ $SUDO chown $UID "$KCORE_DIR/kallsyms"
+ $SUDO chown $UID "$KCORE_DIR/modules"
+
+ $SUDO chgrp $GROUPS "$KCORE_DIR"
+ $SUDO chgrp $GROUPS "$KCORE_DIR/kcore"
+ $SUDO chgrp $GROUPS "$KCORE_DIR/kallsyms"
+ $SUDO chgrp $GROUPS "$KCORE_DIR/modules"
+
+ ln -s "$KCORE_DIR_BASENAME" "$PERF_DATA_DIR/kcore_dir"
+}
+
+fix_buildid_cache_permissions()
+{
+ if [ $EUID -ne 0 ] ; then
+ echo "This script must be run as root via sudo " >&2
+ exit 1
+ fi
+
+ if [ -z "$SUDO_USER" ] ; then
+ echo "This script must be run via sudo" >&2
+ exit 1
+ fi
+
+ USER_HOME=$(bash <<< "echo ~$SUDO_USER")
+
+ if [ "$HOME" != "$USER_HOME" ] ; then
+ echo "Fix unnecessary because root has a home: $HOME" >&2
+ exit 1
+ fi
+
+ echo "Fixing buildid cache permissions"
+
+ find "$USER_HOME/.debug" -xdev -type d ! -user "$SUDO_USER" -ls -exec chown "$SUDO_USER" \{\} \;
+ find "$USER_HOME/.debug" -xdev -type f -links 1 ! -user "$SUDO_USER" -ls -exec chown "$SUDO_USER" \{\} \;
+ find "$USER_HOME/.debug" -xdev -type l ! -user "$SUDO_USER" -ls -exec chown -h "$SUDO_USER" \{\} \;
+
+ if [ -n "$SUDO_GID" ] ; then
+ find "$USER_HOME/.debug" -xdev -type d ! -group "$SUDO_GID" -ls -exec chgrp "$SUDO_GID" \{\} \;
+ find "$USER_HOME/.debug" -xdev -type f -links 1 ! -group "$SUDO_GID" -ls -exec chgrp "$SUDO_GID" \{\} \;
+ find "$USER_HOME/.debug" -xdev -type l ! -group "$SUDO_GID" -ls -exec chgrp -h "$SUDO_GID" \{\} \;
+ fi
+
+ echo "Done"
+}
+
+check_buildid_cache_permissions()
+{
+ if [ $EUID -eq 0 ] ; then
+ return
+ fi
+
+ PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type d ! -user "$USER" -print -quit)
+ PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type f -links 1 ! -user "$USER" -print -quit)
+ PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type l ! -user "$USER" -print -quit)
+
+ PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type d ! -group "$GROUPS" -print -quit)
+ PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type f -links 1 ! -group "$GROUPS" -print -quit)
+ PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type l ! -group "$GROUPS" -print -quit)
+
+ if [ -n "$PERMISSIONS_OK" ] ; then
+ echo "*** WARNING *** buildid cache permissions may need fixing" >&2
+ fi
+}
+
+record()
+{
+ echo "Recording"
+
+ if [ $EUID -ne 0 ] ; then
+
+ if [ "$(cat /proc/sys/kernel/kptr_restrict)" -ne 0 ] ; then
+ echo "*** WARNING *** /proc/sys/kernel/kptr_restrict prevents access to kernel addresses" >&2
+ fi
+
+ if echo "$PERF_OPTIONS" | grep -q ' -a \|^-a \| -a$\|^-a$\| --all-cpus \|^--all-cpus \| --all-cpus$\|^--all-cpus$' ; then
+ echo "*** WARNING *** system-wide tracing without root access will not be able to read all necessary information from /proc" >&2
+ fi
+
+ if echo "$PERF_OPTIONS" | grep -q 'intel_pt\|intel_bts\| -I\|^-I' ; then
+ if [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt -1 ] ; then
+ echo "*** WARNING *** /proc/sys/kernel/perf_event_paranoid restricts buffer size and tracepoint (sched_switch) use" >&2
+ fi
+
+ if echo "$PERF_OPTIONS" | grep -q ' --per-thread \|^--per-thread \| --per-thread$\|^--per-thread$' ; then
+ true
+ elif echo "$PERF_OPTIONS" | grep -q ' -t \|^-t \| -t$\|^-t$' ; then
+ true
+ elif [ ! -r /sys/kernel/debug -o ! -x /sys/kernel/debug ] ; then
+ echo "*** WARNING *** /sys/kernel/debug permissions prevent tracepoint (sched_switch) use" >&2
+ fi
+ fi
+ fi
+
+ if [ -z "$1" ] ; then
+ echo "Workload is required for recording" >&2
+ usage
+ fi
+
+ if [ -e "$PERF_DATA_DIR" ] ; then
+ echo "'$PERF_DATA_DIR' exists" >&2
+ exit 1
+ fi
+
+ find_perf
+
+ mkdir "$PERF_DATA_DIR"
+
+ echo "$PERF record -o $PERF_DATA_DIR/perf.data $PERF_OPTIONS -- $*"
+ "$PERF" record -o "$PERF_DATA_DIR/perf.data" $PERF_OPTIONS -- $* || true
+
+ if rmdir "$PERF_DATA_DIR" > /dev/null 2>/dev/null ; then
+ exit 1
+ fi
+
+ copy_kcore
+
+ echo "Done"
+}
+
+subcommand()
+{
+ find_perf
+ check_buildid_cache_permissions
+ echo "$PERF $PERF_SUB_COMMAND -i $PERF_DATA_DIR/perf.data --kallsyms=$PERF_DATA_DIR/kcore_dir/kallsyms $*"
+ "$PERF" $PERF_SUB_COMMAND -i "$PERF_DATA_DIR/perf.data" "--kallsyms=$PERF_DATA_DIR/kcore_dir/kallsyms" $*
+}
+
+if [ "$1" = "fix_buildid_cache_permissions" ] ; then
+ fix_buildid_cache_permissions
+ exit 0
+fi
+
+PERF_SUB_COMMAND=$1
+PERF_DATA_DIR=$2
+shift || true
+shift || true
+
+if [ -z "$PERF_SUB_COMMAND" ] ; then
+ usage
+fi
+
+if [ -z "$PERF_DATA_DIR" ] ; then
+ usage
+fi
+
+case "$PERF_SUB_COMMAND" in
+"record")
+ while [ "$1" != "--" ] ; do
+ PERF_OPTIONS+="$1 "
+ shift || break
+ done
+ if [ "$1" != "--" ] ; then
+ echo "Options and workload are required for recording" >&2
+ usage
+ fi
+ shift
+ record $*
+;;
+"script")
+ subcommand $*
+;;
+"report")
+ subcommand $*
+;;
+"inject")
+ subcommand $*
+;;
+*)
+ usage
+;;
+esac
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 2282d41879a2..452a8474d29d 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -313,6 +313,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
int status;
struct stat st;
const char *prefix;
+ char sbuf[STRERR_BUFSIZE];
prefix = NULL;
if (p->option & RUN_SETUP)
@@ -343,7 +344,8 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
status = 1;
/* Check for ENOSPC and EIO errors.. */
if (fflush(stdout)) {
- fprintf(stderr, "write failure on standard output: %s", strerror(errno));
+ fprintf(stderr, "write failure on standard output: %s",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out;
}
if (ferror(stdout)) {
@@ -351,7 +353,8 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
goto out;
}
if (fclose(stdout)) {
- fprintf(stderr, "close failed on standard output: %s", strerror(errno));
+ fprintf(stderr, "close failed on standard output: %s",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out;
}
status = 0;
@@ -466,6 +469,7 @@ void pthread__unblock_sigwinch(void)
int main(int argc, const char **argv)
{
const char *cmd;
+ char sbuf[STRERR_BUFSIZE];
/* The page_size is placed in util object. */
page_size = sysconf(_SC_PAGE_SIZE);
@@ -561,7 +565,7 @@ int main(int argc, const char **argv)
}
fprintf(stderr, "Failed to run command '%s': %s\n",
- cmd, strerror(errno));
+ cmd, strerror_r(errno, sbuf, sizeof(sbuf)));
out:
return 1;
}
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 510c65f72858..220d44e44c1b 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -41,8 +41,6 @@ void pthread__unblock_sigwinch(void);
struct record_opts {
struct target target;
- int call_graph;
- bool call_graph_enabled;
bool group;
bool inherit_stat;
bool no_buffering;
@@ -60,7 +58,6 @@ struct record_opts {
u64 branch_stack;
u64 default_interval;
u64 user_interval;
- u16 stack_dump_size;
bool sample_transaction;
unsigned initial_delay;
};
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 6f8b01bc6033..ac655b0700e7 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -154,6 +154,18 @@ static struct test {
.func = test__hists_cumulate,
},
{
+ .desc = "Test tracking with sched_switch",
+ .func = test__switch_tracking,
+ },
+ {
+ .desc = "Filter fds with revents mask in a fdarray",
+ .func = test__fdarray__filter,
+ },
+ {
+ .desc = "Add fd to a fdarray, making it autogrow",
+ .func = test__fdarray__add,
+ },
+ {
.func = NULL,
},
};
@@ -185,9 +197,11 @@ static bool perf_test__matches(int curr, int argc, const char *argv[])
static int run_test(struct test *test)
{
int status, err = -1, child = fork();
+ char sbuf[STRERR_BUFSIZE];
if (child < 0) {
- pr_err("failed to fork test: %s\n", strerror(errno));
+ pr_err("failed to fork test: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
return -1;
}
@@ -297,7 +311,7 @@ int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
symbol_conf.sort_by_name = true;
symbol_conf.try_vmlinux_path = true;
- if (symbol__init() < 0)
+ if (symbol__init(NULL) < 0)
return -1;
if (skip != NULL)
diff --git a/tools/perf/tests/fdarray.c b/tools/perf/tests/fdarray.c
new file mode 100644
index 000000000000..d24b837951d4
--- /dev/null
+++ b/tools/perf/tests/fdarray.c
@@ -0,0 +1,174 @@
+#include <api/fd/array.h>
+#include "util/debug.h"
+#include "tests/tests.h"
+
+static void fdarray__init_revents(struct fdarray *fda, short revents)
+{
+ int fd;
+
+ fda->nr = fda->nr_alloc;
+
+ for (fd = 0; fd < fda->nr; ++fd) {
+ fda->entries[fd].fd = fda->nr - fd;
+ fda->entries[fd].revents = revents;
+ }
+}
+
+static int fdarray__fprintf_prefix(struct fdarray *fda, const char *prefix, FILE *fp)
+{
+ int printed = 0;
+
+ if (!verbose)
+ return 0;
+
+ printed += fprintf(fp, "\n%s: ", prefix);
+ return printed + fdarray__fprintf(fda, fp);
+}
+
+int test__fdarray__filter(void)
+{
+ int nr_fds, expected_fd[2], fd, err = TEST_FAIL;
+ struct fdarray *fda = fdarray__new(5, 5);
+
+ if (fda == NULL) {
+ pr_debug("\nfdarray__new() failed!");
+ goto out;
+ }
+
+ fdarray__init_revents(fda, POLLIN);
+ nr_fds = fdarray__filter(fda, POLLHUP, NULL);
+ if (nr_fds != fda->nr_alloc) {
+ pr_debug("\nfdarray__filter()=%d != %d shouldn't have filtered anything",
+ nr_fds, fda->nr_alloc);
+ goto out_delete;
+ }
+
+ fdarray__init_revents(fda, POLLHUP);
+ nr_fds = fdarray__filter(fda, POLLHUP, NULL);
+ if (nr_fds != 0) {
+ pr_debug("\nfdarray__filter()=%d != %d, should have filtered all fds",
+ nr_fds, fda->nr_alloc);
+ goto out_delete;
+ }
+
+ fdarray__init_revents(fda, POLLHUP);
+ fda->entries[2].revents = POLLIN;
+ expected_fd[0] = fda->entries[2].fd;
+
+ pr_debug("\nfiltering all but fda->entries[2]:");
+ fdarray__fprintf_prefix(fda, "before", stderr);
+ nr_fds = fdarray__filter(fda, POLLHUP, NULL);
+ fdarray__fprintf_prefix(fda, " after", stderr);
+ if (nr_fds != 1) {
+ pr_debug("\nfdarray__filter()=%d != 1, should have left just one event", nr_fds);
+ goto out_delete;
+ }
+
+ if (fda->entries[0].fd != expected_fd[0]) {
+ pr_debug("\nfda->entries[0].fd=%d != %d\n",
+ fda->entries[0].fd, expected_fd[0]);
+ goto out_delete;
+ }
+
+ fdarray__init_revents(fda, POLLHUP);
+ fda->entries[0].revents = POLLIN;
+ expected_fd[0] = fda->entries[0].fd;
+ fda->entries[3].revents = POLLIN;
+ expected_fd[1] = fda->entries[3].fd;
+
+ pr_debug("\nfiltering all but (fda->entries[0], fda->entries[3]):");
+ fdarray__fprintf_prefix(fda, "before", stderr);
+ nr_fds = fdarray__filter(fda, POLLHUP, NULL);
+ fdarray__fprintf_prefix(fda, " after", stderr);
+ if (nr_fds != 2) {
+ pr_debug("\nfdarray__filter()=%d != 2, should have left just two events",
+ nr_fds);
+ goto out_delete;
+ }
+
+ for (fd = 0; fd < 2; ++fd) {
+ if (fda->entries[fd].fd != expected_fd[fd]) {
+ pr_debug("\nfda->entries[%d].fd=%d != %d\n", fd,
+ fda->entries[fd].fd, expected_fd[fd]);
+ goto out_delete;
+ }
+ }
+
+ pr_debug("\n");
+
+ err = 0;
+out_delete:
+ fdarray__delete(fda);
+out:
+ return err;
+}
+
+int test__fdarray__add(void)
+{
+ int err = TEST_FAIL;
+ struct fdarray *fda = fdarray__new(2, 2);
+
+ if (fda == NULL) {
+ pr_debug("\nfdarray__new() failed!");
+ goto out;
+ }
+
+#define FDA_CHECK(_idx, _fd, _revents) \
+ if (fda->entries[_idx].fd != _fd) { \
+ pr_debug("\n%d: fda->entries[%d](%d) != %d!", \
+ __LINE__, _idx, fda->entries[1].fd, _fd); \
+ goto out_delete; \
+ } \
+ if (fda->entries[_idx].events != (_revents)) { \
+ pr_debug("\n%d: fda->entries[%d].revents(%d) != %d!", \
+ __LINE__, _idx, fda->entries[_idx].fd, _revents); \
+ goto out_delete; \
+ }
+
+#define FDA_ADD(_idx, _fd, _revents, _nr) \
+ if (fdarray__add(fda, _fd, _revents) < 0) { \
+ pr_debug("\n%d: fdarray__add(fda, %d, %d) failed!", \
+ __LINE__,_fd, _revents); \
+ goto out_delete; \
+ } \
+ if (fda->nr != _nr) { \
+ pr_debug("\n%d: fdarray__add(fda, %d, %d)=%d != %d", \
+ __LINE__,_fd, _revents, fda->nr, _nr); \
+ goto out_delete; \
+ } \
+ FDA_CHECK(_idx, _fd, _revents)
+
+ FDA_ADD(0, 1, POLLIN, 1);
+ FDA_ADD(1, 2, POLLERR, 2);
+
+ fdarray__fprintf_prefix(fda, "before growing array", stderr);
+
+ FDA_ADD(2, 35, POLLHUP, 3);
+
+ if (fda->entries == NULL) {
+ pr_debug("\nfdarray__add(fda, 35, POLLHUP) should have allocated fda->pollfd!");
+ goto out_delete;
+ }
+
+ fdarray__fprintf_prefix(fda, "after 3rd add", stderr);
+
+ FDA_ADD(3, 88, POLLIN | POLLOUT, 4);
+
+ fdarray__fprintf_prefix(fda, "after 4th add", stderr);
+
+ FDA_CHECK(0, 1, POLLIN);
+ FDA_CHECK(1, 2, POLLERR);
+ FDA_CHECK(2, 35, POLLHUP);
+ FDA_CHECK(3, 88, POLLIN | POLLOUT);
+
+#undef FDA_ADD
+#undef FDA_CHECK
+
+ pr_debug("\n");
+
+ err = 0;
+out_delete:
+ fdarray__delete(fda);
+out:
+ return err;
+}
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index 142263492f6f..9b9622a33932 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -31,6 +31,7 @@ int test__basic_mmap(void)
unsigned int nr_events[nsyscalls],
expected_nr_events[nsyscalls], i, j;
struct perf_evsel *evsels[nsyscalls], *evsel;
+ char sbuf[STRERR_BUFSIZE];
threads = thread_map__new(-1, getpid(), UINT_MAX);
if (threads == NULL) {
@@ -49,7 +50,7 @@ int test__basic_mmap(void)
sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
pr_debug("sched_setaffinity() failed on CPU %d: %s ",
- cpus->map[0], strerror(errno));
+ cpus->map[0], strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_free_cpus;
}
@@ -79,7 +80,7 @@ int test__basic_mmap(void)
if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
- strerror(errno));
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
@@ -89,7 +90,7 @@ int test__basic_mmap(void)
if (perf_evlist__mmap(evlist, 128, true) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
- strerror(errno));
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
diff --git a/tools/perf/tests/open-syscall-all-cpus.c b/tools/perf/tests/open-syscall-all-cpus.c
index 5fecdbd2f5f7..8fa82d1700c7 100644
--- a/tools/perf/tests/open-syscall-all-cpus.c
+++ b/tools/perf/tests/open-syscall-all-cpus.c
@@ -12,6 +12,7 @@ int test__open_syscall_event_on_all_cpus(void)
unsigned int nr_open_calls = 111, i;
cpu_set_t cpu_set;
struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
+ char sbuf[STRERR_BUFSIZE];
if (threads == NULL) {
pr_debug("thread_map__new\n");
@@ -35,7 +36,7 @@ int test__open_syscall_event_on_all_cpus(void)
if (perf_evsel__open(evsel, cpus, threads) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
- strerror(errno));
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_evsel_delete;
}
@@ -56,7 +57,7 @@ int test__open_syscall_event_on_all_cpus(void)
if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
pr_debug("sched_setaffinity() failed on CPU %d: %s ",
cpus->map[cpu],
- strerror(errno));
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_close_fd;
}
for (i = 0; i < ncalls; ++i) {
diff --git a/tools/perf/tests/open-syscall-tp-fields.c b/tools/perf/tests/open-syscall-tp-fields.c
index 0785b64ffd6c..127dcae0b760 100644
--- a/tools/perf/tests/open-syscall-tp-fields.c
+++ b/tools/perf/tests/open-syscall-tp-fields.c
@@ -22,6 +22,7 @@ int test__syscall_open_tp_fields(void)
struct perf_evlist *evlist = perf_evlist__new();
struct perf_evsel *evsel;
int err = -1, i, nr_events = 0, nr_polls = 0;
+ char sbuf[STRERR_BUFSIZE];
if (evlist == NULL) {
pr_debug("%s: perf_evlist__new\n", __func__);
@@ -48,13 +49,15 @@ int test__syscall_open_tp_fields(void)
err = perf_evlist__open(evlist);
if (err < 0) {
- pr_debug("perf_evlist__open: %s\n", strerror(errno));
+ pr_debug("perf_evlist__open: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
err = perf_evlist__mmap(evlist, UINT_MAX, false);
if (err < 0) {
- pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
+ pr_debug("perf_evlist__mmap: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
@@ -102,7 +105,7 @@ int test__syscall_open_tp_fields(void)
}
if (nr_events == before)
- poll(evlist->pollfd, evlist->nr_fds, 10);
+ perf_evlist__poll(evlist, 10);
if (++nr_polls > 5) {
pr_debug("%s: no events!\n", __func__);
diff --git a/tools/perf/tests/open-syscall.c b/tools/perf/tests/open-syscall.c
index c1dc7d25f38c..a33b2daae40f 100644
--- a/tools/perf/tests/open-syscall.c
+++ b/tools/perf/tests/open-syscall.c
@@ -9,6 +9,7 @@ int test__open_syscall_event(void)
struct perf_evsel *evsel;
unsigned int nr_open_calls = 111, i;
struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
+ char sbuf[STRERR_BUFSIZE];
if (threads == NULL) {
pr_debug("thread_map__new\n");
@@ -24,7 +25,7 @@ int test__open_syscall_event(void)
if (perf_evsel__open_per_thread(evsel, threads) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
- strerror(errno));
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_evsel_delete;
}
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index aca1a83dd13a..7a228a2a070b 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -59,6 +59,7 @@ int test__PERF_RECORD(void)
int err = -1, errs = 0, i, wakeups = 0;
u32 cpu;
int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
+ char sbuf[STRERR_BUFSIZE];
if (evlist == NULL || argv == NULL) {
pr_debug("Not enough memory to create evlist\n");
@@ -100,7 +101,8 @@ int test__PERF_RECORD(void)
err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
if (err < 0) {
- pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
+ pr_debug("sched__get_first_possible_cpu: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
@@ -110,7 +112,8 @@ int test__PERF_RECORD(void)
* So that we can check perf_sample.cpu on all the samples.
*/
if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
- pr_debug("sched_setaffinity: %s\n", strerror(errno));
+ pr_debug("sched_setaffinity: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
@@ -120,7 +123,8 @@ int test__PERF_RECORD(void)
*/
err = perf_evlist__open(evlist);
if (err < 0) {
- pr_debug("perf_evlist__open: %s\n", strerror(errno));
+ pr_debug("perf_evlist__open: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
@@ -131,7 +135,8 @@ int test__PERF_RECORD(void)
*/
err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
if (err < 0) {
- pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
+ pr_debug("perf_evlist__mmap: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
@@ -263,7 +268,7 @@ int test__PERF_RECORD(void)
* perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
*/
if (total_events == before && false)
- poll(evlist->pollfd, evlist->nr_fds, -1);
+ perf_evlist__poll(evlist, -1);
sleep(1);
if (++wakeups > 5) {
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index 12b322fa3475..eeb68bb1972d 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -152,7 +152,7 @@ int test__pmu(void)
if (ret)
break;
- ret = perf_pmu__config_terms(&formats, &attr, terms);
+ ret = perf_pmu__config_terms(&formats, &attr, terms, false);
if (ret)
break;
diff --git a/tools/perf/tests/rdpmc.c b/tools/perf/tests/rdpmc.c
index c04d1f268576..d31f2c4d9f64 100644
--- a/tools/perf/tests/rdpmc.c
+++ b/tools/perf/tests/rdpmc.c
@@ -100,6 +100,7 @@ static int __test__rdpmc(void)
};
u64 delta_sum = 0;
struct sigaction sa;
+ char sbuf[STRERR_BUFSIZE];
sigfillset(&sa.sa_mask);
sa.sa_sigaction = segfault_handler;
@@ -109,14 +110,15 @@ static int __test__rdpmc(void)
perf_event_open_cloexec_flag());
if (fd < 0) {
pr_err("Error: sys_perf_event_open() syscall returned "
- "with %d (%s)\n", fd, strerror(errno));
+ "with %d (%s)\n", fd,
+ strerror_r(errno, sbuf, sizeof(sbuf)));
return -1;
}
addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
if (addr == (void *)(-1)) {
pr_err("Error: mmap() syscall returned with (%s)\n",
- strerror(errno));
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_close;
}
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 983d6b8562a8..1aa21c90731b 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -22,6 +22,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
volatile int tmp = 0;
u64 total_periods = 0;
int nr_samples = 0;
+ char sbuf[STRERR_BUFSIZE];
union perf_event *event;
struct perf_evsel *evsel;
struct perf_evlist *evlist;
@@ -62,14 +63,15 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
err = -errno;
pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n",
- strerror(errno), knob, (u64)attr.sample_freq);
+ strerror_r(errno, sbuf, sizeof(sbuf)),
+ knob, (u64)attr.sample_freq);
goto out_delete_evlist;
}
err = perf_evlist__mmap(evlist, 128, true);
if (err < 0) {
pr_debug("failed to mmap event: %d (%s)\n", errno,
- strerror(errno));
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
new file mode 100644
index 000000000000..cc68648c7c55
--- /dev/null
+++ b/tools/perf/tests/switch-tracking.c
@@ -0,0 +1,572 @@
+#include <sys/time.h>
+#include <sys/prctl.h>
+#include <time.h>
+#include <stdlib.h>
+
+#include "parse-events.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "thread_map.h"
+#include "cpumap.h"
+#include "tests.h"
+
+static int spin_sleep(void)
+{
+ struct timeval start, now, diff, maxtime;
+ struct timespec ts;
+ int err, i;
+
+ maxtime.tv_sec = 0;
+ maxtime.tv_usec = 50000;
+
+ err = gettimeofday(&start, NULL);
+ if (err)
+ return err;
+
+ /* Spin for 50ms */
+ while (1) {
+ for (i = 0; i < 1000; i++)
+ barrier();
+
+ err = gettimeofday(&now, NULL);
+ if (err)
+ return err;
+
+ timersub(&now, &start, &diff);
+ if (timercmp(&diff, &maxtime, > /* For checkpatch */))
+ break;
+ }
+
+ ts.tv_nsec = 50 * 1000 * 1000;
+ ts.tv_sec = 0;
+
+ /* Sleep for 50ms */
+ err = nanosleep(&ts, NULL);
+ if (err == EINTR)
+ err = 0;
+
+ return err;
+}
+
+struct switch_tracking {
+ struct perf_evsel *switch_evsel;
+ struct perf_evsel *cycles_evsel;
+ pid_t *tids;
+ int nr_tids;
+ int comm_seen[4];
+ int cycles_before_comm_1;
+ int cycles_between_comm_2_and_comm_3;
+ int cycles_after_comm_4;
+};
+
+static int check_comm(struct switch_tracking *switch_tracking,
+ union perf_event *event, const char *comm, int nr)
+{
+ if (event->header.type == PERF_RECORD_COMM &&
+ (pid_t)event->comm.pid == getpid() &&
+ (pid_t)event->comm.tid == getpid() &&
+ strcmp(event->comm.comm, comm) == 0) {
+ if (switch_tracking->comm_seen[nr]) {
+ pr_debug("Duplicate comm event\n");
+ return -1;
+ }
+ switch_tracking->comm_seen[nr] = 1;
+ pr_debug3("comm event: %s nr: %d\n", event->comm.comm, nr);
+ return 1;
+ }
+ return 0;
+}
+
+static int check_cpu(struct switch_tracking *switch_tracking, int cpu)
+{
+ int i, nr = cpu + 1;
+
+ if (cpu < 0)
+ return -1;
+
+ if (!switch_tracking->tids) {
+ switch_tracking->tids = calloc(nr, sizeof(pid_t));
+ if (!switch_tracking->tids)
+ return -1;
+ for (i = 0; i < nr; i++)
+ switch_tracking->tids[i] = -1;
+ switch_tracking->nr_tids = nr;
+ return 0;
+ }
+
+ if (cpu >= switch_tracking->nr_tids) {
+ void *addr;
+
+ addr = realloc(switch_tracking->tids, nr * sizeof(pid_t));
+ if (!addr)
+ return -1;
+ switch_tracking->tids = addr;
+ for (i = switch_tracking->nr_tids; i < nr; i++)
+ switch_tracking->tids[i] = -1;
+ switch_tracking->nr_tids = nr;
+ return 0;
+ }
+
+ return 0;
+}
+
+static int process_sample_event(struct perf_evlist *evlist,
+ union perf_event *event,
+ struct switch_tracking *switch_tracking)
+{
+ struct perf_sample sample;
+ struct perf_evsel *evsel;
+ pid_t next_tid, prev_tid;
+ int cpu, err;
+
+ if (perf_evlist__parse_sample(evlist, event, &sample)) {
+ pr_debug("perf_evlist__parse_sample failed\n");
+ return -1;
+ }
+
+ evsel = perf_evlist__id2evsel(evlist, sample.id);
+ if (evsel == switch_tracking->switch_evsel) {
+ next_tid = perf_evsel__intval(evsel, &sample, "next_pid");
+ prev_tid = perf_evsel__intval(evsel, &sample, "prev_pid");
+ cpu = sample.cpu;
+ pr_debug3("sched_switch: cpu: %d prev_tid %d next_tid %d\n",
+ cpu, prev_tid, next_tid);
+ err = check_cpu(switch_tracking, cpu);
+ if (err)
+ return err;
+ /*
+ * Check for no missing sched_switch events i.e. that the
+ * evsel->system_wide flag has worked.
+ */
+ if (switch_tracking->tids[cpu] != -1 &&
+ switch_tracking->tids[cpu] != prev_tid) {
+ pr_debug("Missing sched_switch events\n");
+ return -1;
+ }
+ switch_tracking->tids[cpu] = next_tid;
+ }
+
+ if (evsel == switch_tracking->cycles_evsel) {
+ pr_debug3("cycles event\n");
+ if (!switch_tracking->comm_seen[0])
+ switch_tracking->cycles_before_comm_1 = 1;
+ if (switch_tracking->comm_seen[1] &&
+ !switch_tracking->comm_seen[2])
+ switch_tracking->cycles_between_comm_2_and_comm_3 = 1;
+ if (switch_tracking->comm_seen[3])
+ switch_tracking->cycles_after_comm_4 = 1;
+ }
+
+ return 0;
+}
+
+static int process_event(struct perf_evlist *evlist, union perf_event *event,
+ struct switch_tracking *switch_tracking)
+{
+ if (event->header.type == PERF_RECORD_SAMPLE)
+ return process_sample_event(evlist, event, switch_tracking);
+
+ if (event->header.type == PERF_RECORD_COMM) {
+ int err, done = 0;
+
+ err = check_comm(switch_tracking, event, "Test COMM 1", 0);
+ if (err < 0)
+ return -1;
+ done += err;
+ err = check_comm(switch_tracking, event, "Test COMM 2", 1);
+ if (err < 0)
+ return -1;
+ done += err;
+ err = check_comm(switch_tracking, event, "Test COMM 3", 2);
+ if (err < 0)
+ return -1;
+ done += err;
+ err = check_comm(switch_tracking, event, "Test COMM 4", 3);
+ if (err < 0)
+ return -1;
+ done += err;
+ if (done != 1) {
+ pr_debug("Unexpected comm event\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+struct event_node {
+ struct list_head list;
+ union perf_event *event;
+ u64 event_time;
+};
+
+static int add_event(struct perf_evlist *evlist, struct list_head *events,
+ union perf_event *event)
+{
+ struct perf_sample sample;
+ struct event_node *node;
+
+ node = malloc(sizeof(struct event_node));
+ if (!node) {
+ pr_debug("malloc failed\n");
+ return -1;
+ }
+ node->event = event;
+ list_add(&node->list, events);
+
+ if (perf_evlist__parse_sample(evlist, event, &sample)) {
+ pr_debug("perf_evlist__parse_sample failed\n");
+ return -1;
+ }
+
+ if (!sample.time) {
+ pr_debug("event with no time\n");
+ return -1;
+ }
+
+ node->event_time = sample.time;
+
+ return 0;
+}
+
+static void free_event_nodes(struct list_head *events)
+{
+ struct event_node *node;
+
+ while (!list_empty(events)) {
+ node = list_entry(events->next, struct event_node, list);
+ list_del(&node->list);
+ free(node);
+ }
+}
+
+static int compar(const void *a, const void *b)
+{
+ const struct event_node *nodea = a;
+ const struct event_node *nodeb = b;
+ s64 cmp = nodea->event_time - nodeb->event_time;
+
+ return cmp;
+}
+
+static int process_events(struct perf_evlist *evlist,
+ struct switch_tracking *switch_tracking)
+{
+ union perf_event *event;
+ unsigned pos, cnt = 0;
+ LIST_HEAD(events);
+ struct event_node *events_array, *node;
+ int i, ret;
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+ cnt += 1;
+ ret = add_event(evlist, &events, event);
+ perf_evlist__mmap_consume(evlist, i);
+ if (ret < 0)
+ goto out_free_nodes;
+ }
+ }
+
+ events_array = calloc(cnt, sizeof(struct event_node));
+ if (!events_array) {
+ pr_debug("calloc failed\n");
+ ret = -1;
+ goto out_free_nodes;
+ }
+
+ pos = 0;
+ list_for_each_entry(node, &events, list)
+ events_array[pos++] = *node;
+
+ qsort(events_array, cnt, sizeof(struct event_node), compar);
+
+ for (pos = 0; pos < cnt; pos++) {
+ ret = process_event(evlist, events_array[pos].event,
+ switch_tracking);
+ if (ret < 0)
+ goto out_free;
+ }
+
+ ret = 0;
+out_free:
+ pr_debug("%u events recorded\n", cnt);
+ free(events_array);
+out_free_nodes:
+ free_event_nodes(&events);
+ return ret;
+}
+
+/**
+ * test__switch_tracking - test using sched_switch and tracking events.
+ *
+ * This function implements a test that checks that sched_switch events and
+ * tracking events can be recorded for a workload (current process) using the
+ * evsel->system_wide and evsel->tracking flags (respectively) with other events
+ * sometimes enabled or disabled.
+ */
+int test__switch_tracking(void)
+{
+ const char *sched_switch = "sched:sched_switch";
+ struct switch_tracking switch_tracking = { .tids = NULL, };
+ struct record_opts opts = {
+ .mmap_pages = UINT_MAX,
+ .user_freq = UINT_MAX,
+ .user_interval = ULLONG_MAX,
+ .freq = 4000,
+ .target = {
+ .uses_mmap = true,
+ },
+ };
+ struct thread_map *threads = NULL;
+ struct cpu_map *cpus = NULL;
+ struct perf_evlist *evlist = NULL;
+ struct perf_evsel *evsel, *cpu_clocks_evsel, *cycles_evsel;
+ struct perf_evsel *switch_evsel, *tracking_evsel;
+ const char *comm;
+ int err = -1;
+
+ threads = thread_map__new(-1, getpid(), UINT_MAX);
+ if (!threads) {
+ pr_debug("thread_map__new failed!\n");
+ goto out_err;
+ }
+
+ cpus = cpu_map__new(NULL);
+ if (!cpus) {
+ pr_debug("cpu_map__new failed!\n");
+ goto out_err;
+ }
+
+ evlist = perf_evlist__new();
+ if (!evlist) {
+ pr_debug("perf_evlist__new failed!\n");
+ goto out_err;
+ }
+
+ perf_evlist__set_maps(evlist, cpus, threads);
+
+ /* First event */
+ err = parse_events(evlist, "cpu-clock:u");
+ if (err) {
+ pr_debug("Failed to parse event dummy:u\n");
+ goto out_err;
+ }
+
+ cpu_clocks_evsel = perf_evlist__last(evlist);
+
+ /* Second event */
+ err = parse_events(evlist, "cycles:u");
+ if (err) {
+ pr_debug("Failed to parse event cycles:u\n");
+ goto out_err;
+ }
+
+ cycles_evsel = perf_evlist__last(evlist);
+
+ /* Third event */
+ if (!perf_evlist__can_select_event(evlist, sched_switch)) {
+ fprintf(stderr, " (no sched_switch)");
+ err = 0;
+ goto out;
+ }
+
+ err = parse_events(evlist, sched_switch);
+ if (err) {
+ pr_debug("Failed to parse event %s\n", sched_switch);
+ goto out_err;
+ }
+
+ switch_evsel = perf_evlist__last(evlist);
+
+ perf_evsel__set_sample_bit(switch_evsel, CPU);
+ perf_evsel__set_sample_bit(switch_evsel, TIME);
+
+ switch_evsel->system_wide = true;
+ switch_evsel->no_aux_samples = true;
+ switch_evsel->immediate = true;
+
+ /* Test moving an event to the front */
+ if (cycles_evsel == perf_evlist__first(evlist)) {
+ pr_debug("cycles event already at front");
+ goto out_err;
+ }
+ perf_evlist__to_front(evlist, cycles_evsel);
+ if (cycles_evsel != perf_evlist__first(evlist)) {
+ pr_debug("Failed to move cycles event to front");
+ goto out_err;
+ }
+
+ perf_evsel__set_sample_bit(cycles_evsel, CPU);
+ perf_evsel__set_sample_bit(cycles_evsel, TIME);
+
+ /* Fourth event */
+ err = parse_events(evlist, "dummy:u");
+ if (err) {
+ pr_debug("Failed to parse event dummy:u\n");
+ goto out_err;
+ }
+
+ tracking_evsel = perf_evlist__last(evlist);
+
+ perf_evlist__set_tracking_event(evlist, tracking_evsel);
+
+ tracking_evsel->attr.freq = 0;
+ tracking_evsel->attr.sample_period = 1;
+
+ perf_evsel__set_sample_bit(tracking_evsel, TIME);
+
+ /* Config events */
+ perf_evlist__config(evlist, &opts);
+
+ /* Check moved event is still at the front */
+ if (cycles_evsel != perf_evlist__first(evlist)) {
+ pr_debug("Front event no longer at front");
+ goto out_err;
+ }
+
+ /* Check tracking event is tracking */
+ if (!tracking_evsel->attr.mmap || !tracking_evsel->attr.comm) {
+ pr_debug("Tracking event not tracking\n");
+ goto out_err;
+ }
+
+ /* Check non-tracking events are not tracking */
+ evlist__for_each(evlist, evsel) {
+ if (evsel != tracking_evsel) {
+ if (evsel->attr.mmap || evsel->attr.comm) {
+ pr_debug("Non-tracking event is tracking\n");
+ goto out_err;
+ }
+ }
+ }
+
+ if (perf_evlist__open(evlist) < 0) {
+ fprintf(stderr, " (not supported)");
+ err = 0;
+ goto out;
+ }
+
+ err = perf_evlist__mmap(evlist, UINT_MAX, false);
+ if (err) {
+ pr_debug("perf_evlist__mmap failed!\n");
+ goto out_err;
+ }
+
+ perf_evlist__enable(evlist);
+
+ err = perf_evlist__disable_event(evlist, cpu_clocks_evsel);
+ if (err) {
+ pr_debug("perf_evlist__disable_event failed!\n");
+ goto out_err;
+ }
+
+ err = spin_sleep();
+ if (err) {
+ pr_debug("spin_sleep failed!\n");
+ goto out_err;
+ }
+
+ comm = "Test COMM 1";
+ err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
+ if (err) {
+ pr_debug("PR_SET_NAME failed!\n");
+ goto out_err;
+ }
+
+ err = perf_evlist__disable_event(evlist, cycles_evsel);
+ if (err) {
+ pr_debug("perf_evlist__disable_event failed!\n");
+ goto out_err;
+ }
+
+ comm = "Test COMM 2";
+ err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
+ if (err) {
+ pr_debug("PR_SET_NAME failed!\n");
+ goto out_err;
+ }
+
+ err = spin_sleep();
+ if (err) {
+ pr_debug("spin_sleep failed!\n");
+ goto out_err;
+ }
+
+ comm = "Test COMM 3";
+ err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
+ if (err) {
+ pr_debug("PR_SET_NAME failed!\n");
+ goto out_err;
+ }
+
+ err = perf_evlist__enable_event(evlist, cycles_evsel);
+ if (err) {
+ pr_debug("perf_evlist__disable_event failed!\n");
+ goto out_err;
+ }
+
+ comm = "Test COMM 4";
+ err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
+ if (err) {
+ pr_debug("PR_SET_NAME failed!\n");
+ goto out_err;
+ }
+
+ err = spin_sleep();
+ if (err) {
+ pr_debug("spin_sleep failed!\n");
+ goto out_err;
+ }
+
+ perf_evlist__disable(evlist);
+
+ switch_tracking.switch_evsel = switch_evsel;
+ switch_tracking.cycles_evsel = cycles_evsel;
+
+ err = process_events(evlist, &switch_tracking);
+
+ zfree(&switch_tracking.tids);
+
+ if (err)
+ goto out_err;
+
+ /* Check all 4 comm events were seen i.e. that evsel->tracking works */
+ if (!switch_tracking.comm_seen[0] || !switch_tracking.comm_seen[1] ||
+ !switch_tracking.comm_seen[2] || !switch_tracking.comm_seen[3]) {
+ pr_debug("Missing comm events\n");
+ goto out_err;
+ }
+
+ /* Check cycles event got enabled */
+ if (!switch_tracking.cycles_before_comm_1) {
+ pr_debug("Missing cycles events\n");
+ goto out_err;
+ }
+
+ /* Check cycles event got disabled */
+ if (switch_tracking.cycles_between_comm_2_and_comm_3) {
+ pr_debug("cycles events even though event was disabled\n");
+ goto out_err;
+ }
+
+ /* Check cycles event got enabled again */
+ if (!switch_tracking.cycles_after_comm_4) {
+ pr_debug("Missing cycles events\n");
+ goto out_err;
+ }
+out:
+ if (evlist) {
+ perf_evlist__disable(evlist);
+ perf_evlist__delete(evlist);
+ } else {
+ cpu_map__delete(cpus);
+ thread_map__delete(threads);
+ }
+
+ return err;
+
+out_err:
+ err = -1;
+ goto out;
+}
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 5ff3db318f12..3a8fedef83bc 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -42,6 +42,7 @@ int test__task_exit(void)
.uses_mmap = true,
};
const char *argv[] = { "true", NULL };
+ char sbuf[STRERR_BUFSIZE];
signal(SIGCHLD, sig_handler);
@@ -82,13 +83,14 @@ int test__task_exit(void)
err = perf_evlist__open(evlist);
if (err < 0) {
- pr_debug("Couldn't open the evlist: %s\n", strerror(-err));
+ pr_debug("Couldn't open the evlist: %s\n",
+ strerror_r(-err, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
if (perf_evlist__mmap(evlist, 128, true) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
- strerror(errno));
+ strerror_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
}
@@ -103,7 +105,7 @@ retry:
}
if (!exited || !nr_exit) {
- poll(evlist->pollfd, evlist->nr_fds, -1);
+ perf_evlist__poll(evlist, -1);
goto retry;
}
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index ed64790a395f..00e776a87a9c 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -48,6 +48,9 @@ int test__mmap_thread_lookup(void);
int test__thread_mg_share(void);
int test__hists_output(void);
int test__hists_cumulate(void);
+int test__switch_tracking(void);
+int test__fdarray__filter(void);
+int test__fdarray__add(void);
#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index a94b11fc5e00..8f60a970404f 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -10,6 +10,7 @@
#include "../../util/pstack.h"
#include "../../util/sort.h"
#include "../../util/util.h"
+#include "../../util/top.h"
#include "../../arch/common.h"
#include "../browser.h"
@@ -228,8 +229,10 @@ static void callchain_node__init_have_children(struct callchain_node *node)
{
struct callchain_list *chain;
- list_for_each_entry(chain, &node->val, list)
+ if (!list_empty(&node->val)) {
+ chain = list_entry(node->val.prev, struct callchain_list, list);
chain->ms.has_children = !RB_EMPTY_ROOT(&node->rb_root);
+ }
callchain_node__init_have_children_rb_tree(node);
}
@@ -474,26 +477,87 @@ static char *callchain_list__sym_name(struct callchain_list *cl,
return bf;
}
+struct callchain_print_arg {
+ /* for hists browser */
+ off_t row_offset;
+ bool is_current_entry;
+
+ /* for file dump */
+ FILE *fp;
+ int printed;
+};
+
+typedef void (*print_callchain_entry_fn)(struct hist_browser *browser,
+ struct callchain_list *chain,
+ const char *str, int offset,
+ unsigned short row,
+ struct callchain_print_arg *arg);
+
+static void hist_browser__show_callchain_entry(struct hist_browser *browser,
+ struct callchain_list *chain,
+ const char *str, int offset,
+ unsigned short row,
+ struct callchain_print_arg *arg)
+{
+ int color, width;
+ char folded_sign = callchain_list__folded(chain);
+
+ color = HE_COLORSET_NORMAL;
+ width = browser->b.width - (offset + 2);
+ if (ui_browser__is_current_entry(&browser->b, row)) {
+ browser->selection = &chain->ms;
+ color = HE_COLORSET_SELECTED;
+ arg->is_current_entry = true;
+ }
+
+ ui_browser__set_color(&browser->b, color);
+ hist_browser__gotorc(browser, row, 0);
+ slsmg_write_nstring(" ", offset);
+ slsmg_printf("%c ", folded_sign);
+ slsmg_write_nstring(str, width);
+}
+
+static void hist_browser__fprintf_callchain_entry(struct hist_browser *b __maybe_unused,
+ struct callchain_list *chain,
+ const char *str, int offset,
+ unsigned short row __maybe_unused,
+ struct callchain_print_arg *arg)
+{
+ char folded_sign = callchain_list__folded(chain);
+
+ arg->printed += fprintf(arg->fp, "%*s%c %s\n", offset, " ",
+ folded_sign, str);
+}
+
+typedef bool (*check_output_full_fn)(struct hist_browser *browser,
+ unsigned short row);
+
+static bool hist_browser__check_output_full(struct hist_browser *browser,
+ unsigned short row)
+{
+ return browser->b.rows == row;
+}
+
+static bool hist_browser__check_dump_full(struct hist_browser *browser __maybe_unused,
+ unsigned short row __maybe_unused)
+{
+ return false;
+}
+
#define LEVEL_OFFSET_STEP 3
-static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browser,
- struct callchain_node *chain_node,
- u64 total, int level,
- unsigned short row,
- off_t *row_offset,
- bool *is_current_entry)
+static int hist_browser__show_callchain(struct hist_browser *browser,
+ struct rb_root *root, int level,
+ unsigned short row, u64 total,
+ print_callchain_entry_fn print,
+ struct callchain_print_arg *arg,
+ check_output_full_fn is_output_full)
{
struct rb_node *node;
- int first_row = row, width, offset = level * LEVEL_OFFSET_STEP;
- u64 new_total, remaining;
+ int first_row = row, offset = level * LEVEL_OFFSET_STEP;
+ u64 new_total;
- if (callchain_param.mode == CHAIN_GRAPH_REL)
- new_total = chain_node->children_hit;
- else
- new_total = total;
-
- remaining = new_total;
- node = rb_first(&chain_node->rb_root);
+ node = rb_first(root);
while (node) {
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
struct rb_node *next = rb_next(node);
@@ -503,30 +567,28 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browse
int first = true;
int extra_offset = 0;
- remaining -= cumul;
-
list_for_each_entry(chain, &child->val, list) {
char bf[1024], *alloc_str;
const char *str;
- int color;
bool was_first = first;
if (first)
first = false;
- else
+ else if (level > 1)
extra_offset = LEVEL_OFFSET_STEP;
folded_sign = callchain_list__folded(chain);
- if (*row_offset != 0) {
- --*row_offset;
+ if (arg->row_offset != 0) {
+ arg->row_offset--;
goto do_next;
}
alloc_str = NULL;
str = callchain_list__sym_name(chain, bf, sizeof(bf),
browser->show_dso);
- if (was_first) {
- double percent = cumul * 100.0 / new_total;
+
+ if (was_first && level > 1) {
+ double percent = cumul * 100.0 / total;
if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0)
str = "Not enough memory!";
@@ -534,22 +596,11 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browse
str = alloc_str;
}
- color = HE_COLORSET_NORMAL;
- width = browser->b.width - (offset + extra_offset + 2);
- if (ui_browser__is_current_entry(&browser->b, row)) {
- browser->selection = &chain->ms;
- color = HE_COLORSET_SELECTED;
- *is_current_entry = true;
- }
+ print(browser, chain, str, offset + extra_offset, row, arg);
- ui_browser__set_color(&browser->b, color);
- hist_browser__gotorc(browser, row, 0);
- slsmg_write_nstring(" ", offset + extra_offset);
- slsmg_printf("%c ", folded_sign);
- slsmg_write_nstring(str, width);
free(alloc_str);
- if (++row == browser->b.rows)
+ if (is_output_full(browser, ++row))
goto out;
do_next:
if (folded_sign == '+')
@@ -558,89 +609,21 @@ do_next:
if (folded_sign == '-') {
const int new_level = level + (extra_offset ? 2 : 1);
- row += hist_browser__show_callchain_node_rb_tree(browser, child, new_total,
- new_level, row, row_offset,
- is_current_entry);
- }
- if (row == browser->b.rows)
- goto out;
- node = next;
- }
-out:
- return row - first_row;
-}
-
-static int hist_browser__show_callchain_node(struct hist_browser *browser,
- struct callchain_node *node,
- int level, unsigned short row,
- off_t *row_offset,
- bool *is_current_entry)
-{
- struct callchain_list *chain;
- int first_row = row,
- offset = level * LEVEL_OFFSET_STEP,
- width = browser->b.width - offset;
- char folded_sign = ' ';
- list_for_each_entry(chain, &node->val, list) {
- char bf[1024], *s;
- int color;
-
- folded_sign = callchain_list__folded(chain);
-
- if (*row_offset != 0) {
- --*row_offset;
- continue;
- }
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ new_total = child->children_hit;
+ else
+ new_total = total;
- color = HE_COLORSET_NORMAL;
- if (ui_browser__is_current_entry(&browser->b, row)) {
- browser->selection = &chain->ms;
- color = HE_COLORSET_SELECTED;
- *is_current_entry = true;
+ row += hist_browser__show_callchain(browser, &child->rb_root,
+ new_level, row, new_total,
+ print, arg, is_output_full);
}
-
- s = callchain_list__sym_name(chain, bf, sizeof(bf),
- browser->show_dso);
- hist_browser__gotorc(browser, row, 0);
- ui_browser__set_color(&browser->b, color);
- slsmg_write_nstring(" ", offset);
- slsmg_printf("%c ", folded_sign);
- slsmg_write_nstring(s, width - 2);
-
- if (++row == browser->b.rows)
- goto out;
- }
-
- if (folded_sign == '-')
- row += hist_browser__show_callchain_node_rb_tree(browser, node,
- browser->hists->stats.total_period,
- level + 1, row,
- row_offset,
- is_current_entry);
-out:
- return row - first_row;
-}
-
-static int hist_browser__show_callchain(struct hist_browser *browser,
- struct rb_root *chain,
- int level, unsigned short row,
- off_t *row_offset,
- bool *is_current_entry)
-{
- struct rb_node *nd;
- int first_row = row;
-
- for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
- struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
-
- row += hist_browser__show_callchain_node(browser, node, level,
- row, row_offset,
- is_current_entry);
- if (row == browser->b.rows)
+ if (is_output_full(browser, row))
break;
+ node = next;
}
-
+out:
return row - first_row;
}
@@ -653,17 +636,18 @@ struct hpp_arg {
static int __hpp__slsmg_color_printf(struct perf_hpp *hpp, const char *fmt, ...)
{
struct hpp_arg *arg = hpp->ptr;
- int ret;
+ int ret, len;
va_list args;
double percent;
va_start(args, fmt);
+ len = va_arg(args, int);
percent = va_arg(args, double);
va_end(args);
ui_browser__set_percent_color(arg->b, percent, arg->current_entry);
- ret = scnprintf(hpp->buf, hpp->size, fmt, percent);
+ ret = scnprintf(hpp->buf, hpp->size, fmt, len, percent);
slsmg_printf("%s", hpp->buf);
advance_hpp(hpp, ret);
@@ -677,12 +661,12 @@ static u64 __hpp_get_##_field(struct hist_entry *he) \
} \
\
static int \
-hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused,\
+hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
- return __hpp__fmt(hpp, he, __hpp_get_##_field, " %6.2f%%", \
- __hpp__slsmg_color_printf, true); \
+ return hpp__fmt(fmt, hpp, he, __hpp_get_##_field, " %*.2f%%", \
+ __hpp__slsmg_color_printf, true); \
}
#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
@@ -692,18 +676,20 @@ static u64 __hpp_get_acc_##_field(struct hist_entry *he) \
} \
\
static int \
-hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused,\
+hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
if (!symbol_conf.cumulate_callchain) { \
- int ret = scnprintf(hpp->buf, hpp->size, "%8s", "N/A"); \
+ int len = fmt->user_len ?: fmt->len; \
+ int ret = scnprintf(hpp->buf, hpp->size, \
+ "%*s", len, "N/A"); \
slsmg_printf("%s", hpp->buf); \
\
return ret; \
} \
- return __hpp__fmt(hpp, he, __hpp_get_acc_##_field, " %6.2f%%", \
- __hpp__slsmg_color_printf, true); \
+ return hpp__fmt(fmt, hpp, he, __hpp_get_acc_##_field, \
+ " %*.2f%%", __hpp__slsmg_color_printf, true); \
}
__HPP_COLOR_PERCENT_FN(overhead, period)
@@ -812,10 +798,18 @@ static int hist_browser__show_entry(struct hist_browser *browser,
--row_offset;
if (folded_sign == '-' && row != browser->b.rows) {
- printed += hist_browser__show_callchain(browser, &entry->sorted_chain,
- 1, row, &row_offset,
- &current_entry);
- if (current_entry)
+ u64 total = hists__total_period(entry->hists);
+ struct callchain_print_arg arg = {
+ .row_offset = row_offset,
+ .is_current_entry = current_entry,
+ };
+
+ printed += hist_browser__show_callchain(browser,
+ &entry->sorted_chain, 1, row, total,
+ hist_browser__show_callchain_entry, &arg,
+ hist_browser__check_output_full);
+
+ if (arg.is_current_entry)
browser->he_selection = entry;
}
@@ -847,9 +841,6 @@ static int hists__scnprintf_headers(char *buf, size_t size, struct hists *hists)
if (perf_hpp__should_skip(fmt))
continue;
- /* We need to add the length of the columns header. */
- perf_hpp__reset_width(fmt, hists);
-
ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
if (advance_hpp_check(&dummy_hpp, ret))
break;
@@ -1074,113 +1065,21 @@ do_offset:
}
}
-static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *browser,
- struct callchain_node *chain_node,
- u64 total, int level,
- FILE *fp)
-{
- struct rb_node *node;
- int offset = level * LEVEL_OFFSET_STEP;
- u64 new_total, remaining;
- int printed = 0;
-
- if (callchain_param.mode == CHAIN_GRAPH_REL)
- new_total = chain_node->children_hit;
- else
- new_total = total;
-
- remaining = new_total;
- node = rb_first(&chain_node->rb_root);
- while (node) {
- struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
- struct rb_node *next = rb_next(node);
- u64 cumul = callchain_cumul_hits(child);
- struct callchain_list *chain;
- char folded_sign = ' ';
- int first = true;
- int extra_offset = 0;
-
- remaining -= cumul;
-
- list_for_each_entry(chain, &child->val, list) {
- char bf[1024], *alloc_str;
- const char *str;
- bool was_first = first;
-
- if (first)
- first = false;
- else
- extra_offset = LEVEL_OFFSET_STEP;
-
- folded_sign = callchain_list__folded(chain);
-
- alloc_str = NULL;
- str = callchain_list__sym_name(chain, bf, sizeof(bf),
- browser->show_dso);
- if (was_first) {
- double percent = cumul * 100.0 / new_total;
-
- if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0)
- str = "Not enough memory!";
- else
- str = alloc_str;
- }
-
- printed += fprintf(fp, "%*s%c %s\n", offset + extra_offset, " ", folded_sign, str);
- free(alloc_str);
- if (folded_sign == '+')
- break;
- }
-
- if (folded_sign == '-') {
- const int new_level = level + (extra_offset ? 2 : 1);
- printed += hist_browser__fprintf_callchain_node_rb_tree(browser, child, new_total,
- new_level, fp);
- }
-
- node = next;
- }
-
- return printed;
-}
-
-static int hist_browser__fprintf_callchain_node(struct hist_browser *browser,
- struct callchain_node *node,
- int level, FILE *fp)
-{
- struct callchain_list *chain;
- int offset = level * LEVEL_OFFSET_STEP;
- char folded_sign = ' ';
- int printed = 0;
-
- list_for_each_entry(chain, &node->val, list) {
- char bf[1024], *s;
-
- folded_sign = callchain_list__folded(chain);
- s = callchain_list__sym_name(chain, bf, sizeof(bf), browser->show_dso);
- printed += fprintf(fp, "%*s%c %s\n", offset, " ", folded_sign, s);
- }
-
- if (folded_sign == '-')
- printed += hist_browser__fprintf_callchain_node_rb_tree(browser, node,
- browser->hists->stats.total_period,
- level + 1, fp);
- return printed;
-}
-
static int hist_browser__fprintf_callchain(struct hist_browser *browser,
- struct rb_root *chain, int level, FILE *fp)
+ struct hist_entry *he, FILE *fp)
{
- struct rb_node *nd;
- int printed = 0;
+ u64 total = hists__total_period(he->hists);
+ struct callchain_print_arg arg = {
+ .fp = fp,
+ };
- for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
- struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
+ if (symbol_conf.cumulate_callchain)
+ total = he->stat_acc->period;
- printed += hist_browser__fprintf_callchain_node(browser, node, level, fp);
- }
-
- return printed;
+ hist_browser__show_callchain(browser, &he->sorted_chain, 1, 0, total,
+ hist_browser__fprintf_callchain_entry, &arg,
+ hist_browser__check_dump_full);
+ return arg.printed;
}
static int hist_browser__fprintf_entry(struct hist_browser *browser,
@@ -1219,7 +1118,7 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser,
printed += fprintf(fp, "%s\n", rtrim(s));
if (folded_sign == '-')
- printed += hist_browser__fprintf_callchain(browser, &he->sorted_chain, 1, fp);
+ printed += hist_browser__fprintf_callchain(browser, he, fp);
return printed;
}
@@ -1498,6 +1397,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
char buf[64];
char script_opt[64];
int delay_secs = hbt ? hbt->refresh : 0;
+ struct perf_hpp_fmt *fmt;
#define HIST_BROWSER_HELP_COMMON \
"h/?/F1 Show this window\n" \
@@ -1529,6 +1429,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"P Print histograms to perf.hist.N\n"
"t Zoom into current Thread\n"
"V Verbose (DSO names in callchains, etc)\n"
+ "z Toggle zeroing of samples\n"
"/ Filter symbol by name";
if (browser == NULL)
@@ -1547,6 +1448,12 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
memset(options, 0, sizeof(options));
+ perf_hpp__for_each_format(fmt)
+ perf_hpp__reset_width(fmt, hists);
+
+ if (symbol_conf.col_width_list_str)
+ perf_hpp__set_user_width(symbol_conf.col_width_list_str);
+
while (1) {
const struct thread *thread = NULL;
const struct dso *dso = NULL;
@@ -1623,6 +1530,13 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
case 'F':
symbol_conf.filter_relative ^= 1;
continue;
+ case 'z':
+ if (!is_report_browser(hbt)) {
+ struct perf_top *top = hbt->arg;
+
+ top->zero = !top->zero;
+ }
+ continue;
case K_F1:
case 'h':
case '?':
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 6ca60e482cdc..f3fa4258b256 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -11,6 +11,7 @@
static int __percent_color_snprintf(struct perf_hpp *hpp, const char *fmt, ...)
{
int ret = 0;
+ int len;
va_list args;
double percent;
const char *markup;
@@ -18,6 +19,7 @@ static int __percent_color_snprintf(struct perf_hpp *hpp, const char *fmt, ...)
size_t size = hpp->size;
va_start(args, fmt);
+ len = va_arg(args, int);
percent = va_arg(args, double);
va_end(args);
@@ -25,7 +27,7 @@ static int __percent_color_snprintf(struct perf_hpp *hpp, const char *fmt, ...)
if (markup)
ret += scnprintf(buf, size, markup);
- ret += scnprintf(buf + ret, size - ret, fmt, percent);
+ ret += scnprintf(buf + ret, size - ret, fmt, len, percent);
if (markup)
ret += scnprintf(buf + ret, size - ret, "</span>");
@@ -39,12 +41,12 @@ static u64 he_get_##_field(struct hist_entry *he) \
return he->stat._field; \
} \
\
-static int perf_gtk__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
+static int perf_gtk__hpp_color_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
- return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \
- __percent_color_snprintf, true); \
+ return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
+ __percent_color_snprintf, true); \
}
#define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
@@ -57,8 +59,8 @@ static int perf_gtk__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused,
struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
- return __hpp__fmt_acc(hpp, he, he_get_acc_##_field, " %6.2f%%", \
- __percent_color_snprintf, true); \
+ return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
+ __percent_color_snprintf, true); \
}
__HPP_COLOR_PERCENT_FN(overhead, period)
@@ -205,10 +207,8 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
if (perf_hpp__is_sort_entry(fmt))
sym_col = col_idx;
- fmt->header(fmt, &hpp, hists_to_evsel(hists));
-
gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
- -1, ltrim(s),
+ -1, fmt->name,
renderer, "markup",
col_idx++, NULL);
}
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 498adb23c02e..2af18376b077 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -15,9 +15,9 @@
__ret; \
})
-int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
- hpp_field_fn get_field, const char *fmt,
- hpp_snprint_fn print_fn, bool fmt_percent)
+static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
+ hpp_field_fn get_field, const char *fmt, int len,
+ hpp_snprint_fn print_fn, bool fmt_percent)
{
int ret;
struct hists *hists = he->hists;
@@ -32,9 +32,9 @@ int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
if (total)
percent = 100.0 * get_field(he) / total;
- ret = hpp__call_print_fn(hpp, print_fn, fmt, percent);
+ ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
} else
- ret = hpp__call_print_fn(hpp, print_fn, fmt, get_field(he));
+ ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
if (perf_evsel__is_group_event(evsel)) {
int prev_idx, idx_delta;
@@ -60,19 +60,19 @@ int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
*/
if (fmt_percent) {
ret += hpp__call_print_fn(hpp, print_fn,
- fmt, 0.0);
+ fmt, len, 0.0);
} else {
ret += hpp__call_print_fn(hpp, print_fn,
- fmt, 0ULL);
+ fmt, len, 0ULL);
}
}
if (fmt_percent) {
- ret += hpp__call_print_fn(hpp, print_fn, fmt,
+ ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
100.0 * period / total);
} else {
ret += hpp__call_print_fn(hpp, print_fn, fmt,
- period);
+ len, period);
}
prev_idx = perf_evsel__group_idx(evsel);
@@ -86,10 +86,10 @@ int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
*/
if (fmt_percent) {
ret += hpp__call_print_fn(hpp, print_fn,
- fmt, 0.0);
+ fmt, len, 0.0);
} else {
ret += hpp__call_print_fn(hpp, print_fn,
- fmt, 0ULL);
+ fmt, len, 0ULL);
}
}
}
@@ -104,16 +104,35 @@ int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
return ret;
}
-int __hpp__fmt_acc(struct perf_hpp *hpp, struct hist_entry *he,
- hpp_field_fn get_field, const char *fmt,
- hpp_snprint_fn print_fn, bool fmt_percent)
+int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he, hpp_field_fn get_field,
+ const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
+{
+ int len = fmt->user_len ?: fmt->len;
+
+ if (symbol_conf.field_sep) {
+ return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
+ print_fn, fmt_percent);
+ }
+
+ if (fmt_percent)
+ len -= 2; /* 2 for a space and a % sign */
+ else
+ len -= 1;
+
+ return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
+}
+
+int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he, hpp_field_fn get_field,
+ const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
{
if (!symbol_conf.cumulate_callchain) {
- return snprintf(hpp->buf, hpp->size, "%*s",
- fmt_percent ? 8 : 12, "N/A");
+ int len = fmt->user_len ?: fmt->len;
+ return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
}
- return __hpp__fmt(hpp, he, get_field, fmt, print_fn, fmt_percent);
+ return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
}
static int field_cmp(u64 field_a, u64 field_b)
@@ -190,30 +209,26 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
return ret;
}
-#define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
-static int hpp__header_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
- struct perf_hpp *hpp, \
- struct perf_evsel *evsel) \
-{ \
- int len = _min_width; \
- \
- if (symbol_conf.event_group) \
- len = max(len, evsel->nr_members * _unit_width); \
- \
- return scnprintf(hpp->buf, hpp->size, "%*s", len, _str); \
-}
-
-#define __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
-static int hpp__width_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
- struct perf_hpp *hpp __maybe_unused, \
- struct perf_evsel *evsel) \
-{ \
- int len = _min_width; \
- \
- if (symbol_conf.event_group) \
- len = max(len, evsel->nr_members * _unit_width); \
- \
- return len; \
+static int hpp__width_fn(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp __maybe_unused,
+ struct perf_evsel *evsel)
+{
+ int len = fmt->user_len ?: fmt->len;
+
+ if (symbol_conf.event_group)
+ len = max(len, evsel->nr_members * fmt->len);
+
+ if (len < (int)strlen(fmt->name))
+ len = strlen(fmt->name);
+
+ return len;
+}
+
+static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct perf_evsel *evsel)
+{
+ int len = hpp__width_fn(fmt, hpp, evsel);
+ return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
}
static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
@@ -221,11 +236,12 @@ static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
va_list args;
ssize_t ssize = hpp->size;
double percent;
- int ret;
+ int ret, len;
va_start(args, fmt);
+ len = va_arg(args, int);
percent = va_arg(args, double);
- ret = value_color_snprintf(hpp->buf, hpp->size, fmt, percent);
+ ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
va_end(args);
return (ret >= ssize) ? (ssize - 1) : ret;
@@ -250,20 +266,19 @@ static u64 he_get_##_field(struct hist_entry *he) \
return he->stat._field; \
} \
\
-static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
+static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
- return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \
- hpp_color_scnprintf, true); \
+ return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
+ hpp_color_scnprintf, true); \
}
#define __HPP_ENTRY_PERCENT_FN(_type, _field) \
-static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
+static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
- const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \
- return __hpp__fmt(hpp, he, he_get_##_field, fmt, \
- hpp_entry_scnprintf, true); \
+ return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
+ hpp_entry_scnprintf, true); \
}
#define __HPP_SORT_FN(_type, _field) \
@@ -278,20 +293,19 @@ static u64 he_get_acc_##_field(struct hist_entry *he) \
return he->stat_acc->_field; \
} \
\
-static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
+static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
- return __hpp__fmt_acc(hpp, he, he_get_acc_##_field, " %6.2f%%", \
- hpp_color_scnprintf, true); \
+ return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
+ hpp_color_scnprintf, true); \
}
#define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
-static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
+static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
- const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \
- return __hpp__fmt_acc(hpp, he, he_get_acc_##_field, fmt, \
- hpp_entry_scnprintf, true); \
+ return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
+ hpp_entry_scnprintf, true); \
}
#define __HPP_SORT_ACC_FN(_type, _field) \
@@ -306,12 +320,11 @@ static u64 he_get_raw_##_field(struct hist_entry *he) \
return he->stat._field; \
} \
\
-static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
+static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
- const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64; \
- return __hpp__fmt(hpp, he, he_get_raw_##_field, fmt, \
- hpp_entry_scnprintf, false); \
+ return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
+ hpp_entry_scnprintf, false); \
}
#define __HPP_SORT_RAW_FN(_type, _field) \
@@ -321,37 +334,29 @@ static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \
}
-#define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width) \
-__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
-__HPP_WIDTH_FN(_type, _min_width, _unit_width) \
+#define HPP_PERCENT_FNS(_type, _field) \
__HPP_COLOR_PERCENT_FN(_type, _field) \
__HPP_ENTRY_PERCENT_FN(_type, _field) \
__HPP_SORT_FN(_type, _field)
-#define HPP_PERCENT_ACC_FNS(_type, _str, _field, _min_width, _unit_width)\
-__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
-__HPP_WIDTH_FN(_type, _min_width, _unit_width) \
+#define HPP_PERCENT_ACC_FNS(_type, _field) \
__HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
__HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
__HPP_SORT_ACC_FN(_type, _field)
-#define HPP_RAW_FNS(_type, _str, _field, _min_width, _unit_width) \
-__HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
-__HPP_WIDTH_FN(_type, _min_width, _unit_width) \
+#define HPP_RAW_FNS(_type, _field) \
__HPP_ENTRY_RAW_FN(_type, _field) \
__HPP_SORT_RAW_FN(_type, _field)
-__HPP_HEADER_FN(overhead_self, "Self", 8, 8)
-
-HPP_PERCENT_FNS(overhead, "Overhead", period, 8, 8)
-HPP_PERCENT_FNS(overhead_sys, "sys", period_sys, 8, 8)
-HPP_PERCENT_FNS(overhead_us, "usr", period_us, 8, 8)
-HPP_PERCENT_FNS(overhead_guest_sys, "guest sys", period_guest_sys, 9, 8)
-HPP_PERCENT_FNS(overhead_guest_us, "guest usr", period_guest_us, 9, 8)
-HPP_PERCENT_ACC_FNS(overhead_acc, "Children", period, 8, 8)
+HPP_PERCENT_FNS(overhead, period)
+HPP_PERCENT_FNS(overhead_sys, period_sys)
+HPP_PERCENT_FNS(overhead_us, period_us)
+HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
+HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
+HPP_PERCENT_ACC_FNS(overhead_acc, period)
-HPP_RAW_FNS(samples, "Samples", nr_events, 12, 12)
-HPP_RAW_FNS(period, "Period", period, 12, 12)
+HPP_RAW_FNS(samples, nr_events)
+HPP_RAW_FNS(period, period)
static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused,
struct hist_entry *b __maybe_unused)
@@ -359,47 +364,50 @@ static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused,
return 0;
}
-#define HPP__COLOR_PRINT_FNS(_name) \
+#define HPP__COLOR_PRINT_FNS(_name, _fn) \
{ \
- .header = hpp__header_ ## _name, \
- .width = hpp__width_ ## _name, \
- .color = hpp__color_ ## _name, \
- .entry = hpp__entry_ ## _name, \
+ .name = _name, \
+ .header = hpp__header_fn, \
+ .width = hpp__width_fn, \
+ .color = hpp__color_ ## _fn, \
+ .entry = hpp__entry_ ## _fn, \
.cmp = hpp__nop_cmp, \
.collapse = hpp__nop_cmp, \
- .sort = hpp__sort_ ## _name, \
+ .sort = hpp__sort_ ## _fn, \
}
-#define HPP__COLOR_ACC_PRINT_FNS(_name) \
+#define HPP__COLOR_ACC_PRINT_FNS(_name, _fn) \
{ \
- .header = hpp__header_ ## _name, \
- .width = hpp__width_ ## _name, \
- .color = hpp__color_ ## _name, \
- .entry = hpp__entry_ ## _name, \
+ .name = _name, \
+ .header = hpp__header_fn, \
+ .width = hpp__width_fn, \
+ .color = hpp__color_ ## _fn, \
+ .entry = hpp__entry_ ## _fn, \
.cmp = hpp__nop_cmp, \
.collapse = hpp__nop_cmp, \
- .sort = hpp__sort_ ## _name, \
+ .sort = hpp__sort_ ## _fn, \
}
-#define HPP__PRINT_FNS(_name) \
+#define HPP__PRINT_FNS(_name, _fn) \
{ \
- .header = hpp__header_ ## _name, \
- .width = hpp__width_ ## _name, \
- .entry = hpp__entry_ ## _name, \
+ .name = _name, \
+ .header = hpp__header_fn, \
+ .width = hpp__width_fn, \
+ .entry = hpp__entry_ ## _fn, \
.cmp = hpp__nop_cmp, \
.collapse = hpp__nop_cmp, \
- .sort = hpp__sort_ ## _name, \
+ .sort = hpp__sort_ ## _fn, \
}
struct perf_hpp_fmt perf_hpp__format[] = {
- HPP__COLOR_PRINT_FNS(overhead),
- HPP__COLOR_PRINT_FNS(overhead_sys),
- HPP__COLOR_PRINT_FNS(overhead_us),
- HPP__COLOR_PRINT_FNS(overhead_guest_sys),
- HPP__COLOR_PRINT_FNS(overhead_guest_us),
- HPP__COLOR_ACC_PRINT_FNS(overhead_acc),
- HPP__PRINT_FNS(samples),
- HPP__PRINT_FNS(period)
+ HPP__COLOR_PRINT_FNS("Overhead", overhead),
+ HPP__COLOR_PRINT_FNS("sys", overhead_sys),
+ HPP__COLOR_PRINT_FNS("usr", overhead_us),
+ HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys),
+ HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us),
+ HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc),
+ HPP__PRINT_FNS("Samples", samples),
+ HPP__PRINT_FNS("Period", period)
};
LIST_HEAD(perf_hpp__list);
@@ -444,14 +452,12 @@ void perf_hpp__init(void)
/*
* If user specified field order, no need to setup default fields.
*/
- if (field_order)
+ if (is_strict_order(field_order))
return;
if (symbol_conf.cumulate_callchain) {
perf_hpp__column_enable(PERF_HPP__OVERHEAD_ACC);
-
- perf_hpp__format[PERF_HPP__OVERHEAD].header =
- hpp__header_overhead_self;
+ perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
}
perf_hpp__column_enable(PERF_HPP__OVERHEAD);
@@ -513,11 +519,11 @@ void perf_hpp__column_disable(unsigned col)
void perf_hpp__cancel_cumulate(void)
{
- if (field_order)
+ if (is_strict_order(field_order))
return;
perf_hpp__column_disable(PERF_HPP__OVERHEAD_ACC);
- perf_hpp__format[PERF_HPP__OVERHEAD].header = hpp__header_overhead;
+ perf_hpp__format[PERF_HPP__OVERHEAD].name = "Overhead";
}
void perf_hpp__setup_output_field(void)
@@ -622,3 +628,59 @@ unsigned int hists__sort_list_width(struct hists *hists)
return ret;
}
+
+void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
+{
+ int idx;
+
+ if (perf_hpp__is_sort_entry(fmt))
+ return perf_hpp__reset_sort_width(fmt, hists);
+
+ for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) {
+ if (fmt == &perf_hpp__format[idx])
+ break;
+ }
+
+ if (idx == PERF_HPP__MAX_INDEX)
+ return;
+
+ switch (idx) {
+ case PERF_HPP__OVERHEAD:
+ case PERF_HPP__OVERHEAD_SYS:
+ case PERF_HPP__OVERHEAD_US:
+ case PERF_HPP__OVERHEAD_ACC:
+ fmt->len = 8;
+ break;
+
+ case PERF_HPP__OVERHEAD_GUEST_SYS:
+ case PERF_HPP__OVERHEAD_GUEST_US:
+ fmt->len = 9;
+ break;
+
+ case PERF_HPP__SAMPLES:
+ case PERF_HPP__PERIOD:
+ fmt->len = 12;
+ break;
+
+ default:
+ break;
+ }
+}
+
+void perf_hpp__set_user_width(const char *width_list_str)
+{
+ struct perf_hpp_fmt *fmt;
+ const char *ptr = width_list_str;
+
+ perf_hpp__for_each_format(fmt) {
+ char *p;
+
+ int len = strtol(ptr, &p, 10);
+ fmt->user_len = len;
+
+ if (*p == ',')
+ ptr = p + 1;
+ else
+ break;
+ }
+}
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 40af0acb4fe9..15b451acbde6 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -395,10 +395,12 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
init_rem_hits();
-
perf_hpp__for_each_format(fmt)
perf_hpp__reset_width(fmt, hists);
+ if (symbol_conf.col_width_list_str)
+ perf_hpp__set_user_width(symbol_conf.col_width_list_str);
+
if (!show_header)
goto print_entries;
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 809b4c50beae..36437527dbb3 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -232,9 +232,16 @@ static int mov__parse(struct ins_operands *ops)
return -1;
target = ++s;
+ comment = strchr(s, '#');
- while (s[0] != '\0' && !isspace(s[0]))
- ++s;
+ if (comment != NULL)
+ s = comment - 1;
+ else
+ s = strchr(s, '\0') - 1;
+
+ while (s > target && isspace(s[0]))
+ --s;
+ s++;
prev = *s;
*s = '\0';
@@ -244,7 +251,6 @@ static int mov__parse(struct ins_operands *ops)
if (ops->target.raw == NULL)
goto out_free_source;
- comment = strchr(s, '#');
if (comment == NULL)
return 0;
@@ -899,10 +905,8 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
struct kcore_extract kce;
bool delete_extract = false;
- if (filename) {
- snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
- symbol_conf.symfs, filename);
- }
+ if (filename)
+ symbol__join_symfs(symfs_filename, filename);
if (filename == NULL) {
if (dso->has_build_id) {
@@ -922,8 +926,7 @@ fallback:
* DSO is the same as when 'perf record' ran.
*/
filename = (char *)dso->long_name;
- snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
- symbol_conf.symfs, filename);
+ symbol__join_symfs(symfs_filename, filename);
free_filename = false;
}
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 7b176dd02e1a..5cf9e1b5989d 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -22,6 +22,7 @@ typedef int (*config_fn_t)(const char *, const char *, void *);
extern int perf_default_config(const char *, const char *, void *);
extern int perf_config(config_fn_t fn, void *);
extern int perf_config_int(const char *, const char *);
+extern u64 perf_config_u64(const char *, const char *);
extern int perf_config_bool(const char *, const char *);
extern int config_error_nonbool(const char *);
extern const char *perf_config_dirname(const char *, const char *);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 437ee09727e6..c84d3f8dcb75 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -25,77 +25,172 @@
__thread struct callchain_cursor callchain_cursor;
-int
-parse_callchain_report_opt(const char *arg)
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+static int get_stack_size(const char *str, unsigned long *_size)
{
- char *tok, *tok2;
char *endptr;
+ unsigned long size;
+ unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
- symbol_conf.use_callchain = true;
+ size = strtoul(str, &endptr, 0);
- if (!arg)
+ do {
+ if (*endptr)
+ break;
+
+ size = round_up(size, sizeof(u64));
+ if (!size || size > max_size)
+ break;
+
+ *_size = size;
return 0;
- tok = strtok((char *)arg, ",");
- if (!tok)
- return -1;
+ } while (0);
- /* get the output mode */
- if (!strncmp(tok, "graph", strlen(arg))) {
- callchain_param.mode = CHAIN_GRAPH_ABS;
+ pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
+ max_size, str);
+ return -1;
+}
+#endif /* HAVE_DWARF_UNWIND_SUPPORT */
- } else if (!strncmp(tok, "flat", strlen(arg))) {
- callchain_param.mode = CHAIN_FLAT;
- } else if (!strncmp(tok, "fractal", strlen(arg))) {
- callchain_param.mode = CHAIN_GRAPH_REL;
- } else if (!strncmp(tok, "none", strlen(arg))) {
- callchain_param.mode = CHAIN_NONE;
- symbol_conf.use_callchain = false;
- return 0;
- } else {
- return -1;
- }
+int parse_callchain_record_opt(const char *arg)
+{
+ char *tok, *name, *saveptr = NULL;
+ char *buf;
+ int ret = -1;
+
+ /* We need buffer that we know we can write to. */
+ buf = malloc(strlen(arg) + 1);
+ if (!buf)
+ return -ENOMEM;
+
+ strcpy(buf, arg);
+
+ tok = strtok_r((char *)buf, ",", &saveptr);
+ name = tok ? : (char *)buf;
+
+ do {
+ /* Framepointer style */
+ if (!strncmp(name, "fp", sizeof("fp"))) {
+ if (!strtok_r(NULL, ",", &saveptr)) {
+ callchain_param.record_mode = CALLCHAIN_FP;
+ ret = 0;
+ } else
+ pr_err("callchain: No more arguments "
+ "needed for -g fp\n");
+ break;
- /* get the min percentage */
- tok = strtok(NULL, ",");
- if (!tok)
- goto setup;
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+ /* Dwarf style */
+ } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
+ const unsigned long default_stack_dump_size = 8192;
- callchain_param.min_percent = strtod(tok, &endptr);
- if (tok == endptr)
- return -1;
+ ret = 0;
+ callchain_param.record_mode = CALLCHAIN_DWARF;
+ callchain_param.dump_size = default_stack_dump_size;
- /* get the print limit */
- tok2 = strtok(NULL, ",");
- if (!tok2)
- goto setup;
+ tok = strtok_r(NULL, ",", &saveptr);
+ if (tok) {
+ unsigned long size = 0;
- if (tok2[0] != 'c') {
- callchain_param.print_limit = strtoul(tok2, &endptr, 0);
- tok2 = strtok(NULL, ",");
- if (!tok2)
- goto setup;
+ ret = get_stack_size(tok, &size);
+ callchain_param.dump_size = size;
+ }
+#endif /* HAVE_DWARF_UNWIND_SUPPORT */
+ } else {
+ pr_err("callchain: Unknown --call-graph option "
+ "value: %s\n", arg);
+ break;
+ }
+
+ } while (0);
+
+ free(buf);
+ return ret;
+}
+
+static int parse_callchain_mode(const char *value)
+{
+ if (!strncmp(value, "graph", strlen(value))) {
+ callchain_param.mode = CHAIN_GRAPH_ABS;
+ return 0;
+ }
+ if (!strncmp(value, "flat", strlen(value))) {
+ callchain_param.mode = CHAIN_FLAT;
+ return 0;
}
+ if (!strncmp(value, "fractal", strlen(value))) {
+ callchain_param.mode = CHAIN_GRAPH_REL;
+ return 0;
+ }
+ return -1;
+}
- /* get the call chain order */
- if (!strncmp(tok2, "caller", strlen("caller")))
+static int parse_callchain_order(const char *value)
+{
+ if (!strncmp(value, "caller", strlen(value))) {
callchain_param.order = ORDER_CALLER;
- else if (!strncmp(tok2, "callee", strlen("callee")))
+ return 0;
+ }
+ if (!strncmp(value, "callee", strlen(value))) {
callchain_param.order = ORDER_CALLEE;
- else
- return -1;
+ return 0;
+ }
+ return -1;
+}
- /* Get the sort key */
- tok2 = strtok(NULL, ",");
- if (!tok2)
- goto setup;
- if (!strncmp(tok2, "function", strlen("function")))
+static int parse_callchain_sort_key(const char *value)
+{
+ if (!strncmp(value, "function", strlen(value))) {
callchain_param.key = CCKEY_FUNCTION;
- else if (!strncmp(tok2, "address", strlen("address")))
+ return 0;
+ }
+ if (!strncmp(value, "address", strlen(value))) {
callchain_param.key = CCKEY_ADDRESS;
- else
- return -1;
-setup:
+ return 0;
+ }
+ return -1;
+}
+
+int
+parse_callchain_report_opt(const char *arg)
+{
+ char *tok;
+ char *endptr;
+ bool minpcnt_set = false;
+
+ symbol_conf.use_callchain = true;
+
+ if (!arg)
+ return 0;
+
+ while ((tok = strtok((char *)arg, ",")) != NULL) {
+ if (!strncmp(tok, "none", strlen(tok))) {
+ callchain_param.mode = CHAIN_NONE;
+ symbol_conf.use_callchain = false;
+ return 0;
+ }
+
+ if (!parse_callchain_mode(tok) ||
+ !parse_callchain_order(tok) ||
+ !parse_callchain_sort_key(tok)) {
+ /* parsing ok - move on to the next */
+ } else if (!minpcnt_set) {
+ /* try to get the min percent */
+ callchain_param.min_percent = strtod(tok, &endptr);
+ if (tok == endptr)
+ return -1;
+ minpcnt_set = true;
+ } else {
+ /* try print limit at last */
+ callchain_param.print_limit = strtoul(tok, &endptr, 0);
+ if (tok == endptr)
+ return -1;
+ }
+
+ arg = NULL;
+ }
+
if (callchain_register_param(&callchain_param) < 0) {
pr_err("Can't register callchain params\n");
return -1;
@@ -103,6 +198,47 @@ setup:
return 0;
}
+int perf_callchain_config(const char *var, const char *value)
+{
+ char *endptr;
+
+ if (prefixcmp(var, "call-graph."))
+ return 0;
+ var += sizeof("call-graph.") - 1;
+
+ if (!strcmp(var, "record-mode"))
+ return parse_callchain_record_opt(value);
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+ if (!strcmp(var, "dump-size")) {
+ unsigned long size = 0;
+ int ret;
+
+ ret = get_stack_size(value, &size);
+ callchain_param.dump_size = size;
+
+ return ret;
+ }
+#endif
+ if (!strcmp(var, "print-type"))
+ return parse_callchain_mode(value);
+ if (!strcmp(var, "order"))
+ return parse_callchain_order(value);
+ if (!strcmp(var, "sort-key"))
+ return parse_callchain_sort_key(value);
+ if (!strcmp(var, "threshold")) {
+ callchain_param.min_percent = strtod(value, &endptr);
+ if (value == endptr)
+ return -1;
+ }
+ if (!strcmp(var, "print-limit")) {
+ callchain_param.print_limit = strtod(value, &endptr);
+ if (value == endptr)
+ return -1;
+ }
+
+ return 0;
+}
+
static void
rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
enum chain_mode mode)
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index da43619d6173..2a1f5a46543a 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -54,6 +54,9 @@ enum chain_key {
};
struct callchain_param {
+ bool enabled;
+ enum perf_call_graph_mode record_mode;
+ u32 dump_size;
enum chain_mode mode;
u32 print_limit;
double min_percent;
@@ -154,7 +157,6 @@ static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
struct option;
struct hist_entry;
-int record_parse_callchain(const char *arg, struct record_opts *opts);
int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
int record_callchain_opt(const struct option *opt, const char *arg, int unset);
@@ -166,7 +168,9 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *
bool hide_unresolved);
extern const char record_callchain_help[];
+int parse_callchain_record_opt(const char *arg);
int parse_callchain_report_opt(const char *arg);
+int perf_callchain_config(const char *var, const char *value);
static inline void callchain_cursor_snapshot(struct callchain_cursor *dest,
struct callchain_cursor *src)
diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
index c5d05ec17220..47b78b3f0325 100644
--- a/tools/perf/util/cloexec.c
+++ b/tools/perf/util/cloexec.c
@@ -1,7 +1,9 @@
+#include <sched.h>
#include "util.h"
#include "../perf.h"
#include "cloexec.h"
#include "asm/bug.h"
+#include "debug.h"
static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
@@ -9,15 +11,30 @@ static int perf_flag_probe(void)
{
/* use 'safest' configuration as used in perf_evsel__fallback() */
struct perf_event_attr attr = {
- .type = PERF_COUNT_SW_CPU_CLOCK,
+ .type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
+ .exclude_kernel = 1,
};
int fd;
int err;
+ int cpu;
+ pid_t pid = -1;
+ char sbuf[STRERR_BUFSIZE];
- /* check cloexec flag */
- fd = sys_perf_event_open(&attr, 0, -1, -1,
- PERF_FLAG_FD_CLOEXEC);
+ cpu = sched_getcpu();
+ if (cpu < 0)
+ cpu = 0;
+
+ while (1) {
+ /* check cloexec flag */
+ fd = sys_perf_event_open(&attr, pid, cpu, -1,
+ PERF_FLAG_FD_CLOEXEC);
+ if (fd < 0 && pid == -1 && errno == EACCES) {
+ pid = 0;
+ continue;
+ }
+ break;
+ }
err = errno;
if (fd >= 0) {
@@ -25,17 +42,17 @@ static int perf_flag_probe(void)
return 1;
}
- WARN_ONCE(err != EINVAL,
+ WARN_ONCE(err != EINVAL && err != EBUSY,
"perf_event_open(..., PERF_FLAG_FD_CLOEXEC) failed with unexpected error %d (%s)\n",
- err, strerror(err));
+ err, strerror_r(err, sbuf, sizeof(sbuf)));
/* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
- fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+ fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
err = errno;
- if (WARN_ONCE(fd < 0,
+ if (WARN_ONCE(fd < 0 && err != EBUSY,
"perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
- err, strerror(err)))
+ err, strerror_r(err, sbuf, sizeof(sbuf))))
return -1;
close(fd);
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index 87b8672eb413..f4654183d391 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -335,3 +335,19 @@ int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...)
va_end(args);
return value_color_snprintf(bf, size, fmt, percent);
}
+
+int percent_color_len_snprintf(char *bf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int len;
+ double percent;
+ const char *color;
+
+ va_start(args, fmt);
+ len = va_arg(args, int);
+ percent = va_arg(args, double);
+ va_end(args);
+
+ color = get_percent_color(percent);
+ return color_snprintf(bf, size, color, fmt, len, percent);
+}
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 7ff30a62a132..0a594b8a0c26 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -41,6 +41,7 @@ int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf);
int value_color_snprintf(char *bf, size_t size, const char *fmt, double value);
int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...);
+int percent_color_len_snprintf(char *bf, size_t size, const char *fmt, ...);
int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
const char *get_percent_color(double percent);
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
index f9e777629e21..b2bb59df65e1 100644
--- a/tools/perf/util/comm.c
+++ b/tools/perf/util/comm.c
@@ -74,7 +74,7 @@ static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root)
return new;
}
-struct comm *comm__new(const char *str, u64 timestamp)
+struct comm *comm__new(const char *str, u64 timestamp, bool exec)
{
struct comm *comm = zalloc(sizeof(*comm));
@@ -82,6 +82,7 @@ struct comm *comm__new(const char *str, u64 timestamp)
return NULL;
comm->start = timestamp;
+ comm->exec = exec;
comm->comm_str = comm_str__findnew(str, &comm_str_root);
if (!comm->comm_str) {
@@ -94,7 +95,7 @@ struct comm *comm__new(const char *str, u64 timestamp)
return comm;
}
-int comm__override(struct comm *comm, const char *str, u64 timestamp)
+int comm__override(struct comm *comm, const char *str, u64 timestamp, bool exec)
{
struct comm_str *new, *old = comm->comm_str;
@@ -106,6 +107,8 @@ int comm__override(struct comm *comm, const char *str, u64 timestamp)
comm_str__put(old);
comm->comm_str = new;
comm->start = timestamp;
+ if (exec)
+ comm->exec = true;
return 0;
}
diff --git a/tools/perf/util/comm.h b/tools/perf/util/comm.h
index fac5bd51befc..51c10ab257f8 100644
--- a/tools/perf/util/comm.h
+++ b/tools/perf/util/comm.h
@@ -11,11 +11,13 @@ struct comm {
struct comm_str *comm_str;
u64 start;
struct list_head list;
+ bool exec;
};
void comm__free(struct comm *comm);
-struct comm *comm__new(const char *str, u64 timestamp);
+struct comm *comm__new(const char *str, u64 timestamp, bool exec);
const char *comm__str(const struct comm *comm);
-int comm__override(struct comm *comm, const char *str, u64 timestamp);
+int comm__override(struct comm *comm, const char *str, u64 timestamp,
+ bool exec);
#endif /* __PERF_COMM_H */
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 1e5e2e5af6b1..57ff826f150b 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -222,7 +222,8 @@ static int perf_parse_file(config_fn_t fn, void *data)
const unsigned char *bomptr = utf8_bom;
for (;;) {
- int c = get_next_char();
+ int line, c = get_next_char();
+
if (bomptr && *bomptr) {
/* We are at the file beginning; skip UTF8-encoded BOM
* if present. Sane editors won't put this in on their
@@ -261,8 +262,16 @@ static int perf_parse_file(config_fn_t fn, void *data)
if (!isalpha(c))
break;
var[baselen] = tolower(c);
- if (get_value(fn, data, var, baselen+1) < 0)
+
+ /*
+ * The get_value function might or might not reach the '\n',
+ * so saving the current line number for error reporting.
+ */
+ line = config_linenr;
+ if (get_value(fn, data, var, baselen+1) < 0) {
+ config_linenr = line;
break;
+ }
}
die("bad config file line %d in %s", config_linenr, config_file_name);
}
@@ -286,6 +295,21 @@ static int parse_unit_factor(const char *end, unsigned long *val)
return 0;
}
+static int perf_parse_llong(const char *value, long long *ret)
+{
+ if (value && *value) {
+ char *end;
+ long long val = strtoll(value, &end, 0);
+ unsigned long factor = 1;
+
+ if (!parse_unit_factor(end, &factor))
+ return 0;
+ *ret = val * factor;
+ return 1;
+ }
+ return 0;
+}
+
static int perf_parse_long(const char *value, long *ret)
{
if (value && *value) {
@@ -307,6 +331,15 @@ static void die_bad_config(const char *name)
die("bad config value for '%s'", name);
}
+u64 perf_config_u64(const char *name, const char *value)
+{
+ long long ret = 0;
+
+ if (!perf_parse_llong(value, &ret))
+ die_bad_config(name);
+ return (u64) ret;
+}
+
int perf_config_int(const char *name, const char *value)
{
long ret = 0;
@@ -372,6 +405,9 @@ int perf_default_config(const char *var, const char *value,
if (!prefixcmp(var, "ui."))
return perf_ui_config(var, value);
+ if (!prefixcmp(var, "call-graph."))
+ return perf_callchain_config(var, value);
+
/* Add other config variables here. */
return 0;
}
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 29d720cf5844..1921942fc2e0 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -50,12 +50,14 @@ static int open_file_read(struct perf_data_file *file)
{
struct stat st;
int fd;
+ char sbuf[STRERR_BUFSIZE];
fd = open(file->path, O_RDONLY);
if (fd < 0) {
int err = errno;
- pr_err("failed to open %s: %s", file->path, strerror(err));
+ pr_err("failed to open %s: %s", file->path,
+ strerror_r(err, sbuf, sizeof(sbuf)));
if (err == ENOENT && !strcmp(file->path, "perf.data"))
pr_err(" (try 'perf record' first)");
pr_err("\n");
@@ -88,6 +90,7 @@ static int open_file_read(struct perf_data_file *file)
static int open_file_write(struct perf_data_file *file)
{
int fd;
+ char sbuf[STRERR_BUFSIZE];
if (check_backup(file))
return -1;
@@ -95,7 +98,8 @@ static int open_file_write(struct perf_data_file *file)
fd = open(file->path, O_CREAT|O_RDWR|O_TRUNC, S_IRUSR|S_IWUSR);
if (fd < 0)
- pr_err("failed to open %s : %s\n", file->path, strerror(errno));
+ pr_err("failed to open %s : %s\n", file->path,
+ strerror_r(errno, sbuf, sizeof(sbuf)));
return fd;
}
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 71d419362634..ba357f3226c6 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -13,8 +13,12 @@
#include "util.h"
#include "target.h"
+#define NSECS_PER_SEC 1000000000ULL
+#define NSECS_PER_USEC 1000ULL
+
int verbose;
bool dump_trace = false, quiet = false;
+int debug_ordered_events;
static int _eprintf(int level, int var, const char *fmt, va_list args)
{
@@ -42,6 +46,35 @@ int eprintf(int level, int var, const char *fmt, ...)
return ret;
}
+static int __eprintf_time(u64 t, const char *fmt, va_list args)
+{
+ int ret = 0;
+ u64 secs, usecs, nsecs = t;
+
+ secs = nsecs / NSECS_PER_SEC;
+ nsecs -= secs * NSECS_PER_SEC;
+ usecs = nsecs / NSECS_PER_USEC;
+
+ ret = fprintf(stderr, "[%13" PRIu64 ".%06" PRIu64 "] ",
+ secs, usecs);
+ ret += vfprintf(stderr, fmt, args);
+ return ret;
+}
+
+int eprintf_time(int level, int var, u64 t, const char *fmt, ...)
+{
+ int ret = 0;
+ va_list args;
+
+ if (var >= level) {
+ va_start(args, fmt);
+ ret = __eprintf_time(t, fmt, args);
+ va_end(args);
+ }
+
+ return ret;
+}
+
/*
* Overloading libtraceevent standard info print
* function, display with -v in perf.
@@ -110,7 +143,8 @@ static struct debug_variable {
const char *name;
int *ptr;
} debug_variables[] = {
- { .name = "verbose", .ptr = &verbose },
+ { .name = "verbose", .ptr = &verbose },
+ { .name = "ordered-events", .ptr = &debug_ordered_events},
{ .name = NULL, }
};
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 89fb6b0f7ab2..be264d6f3b30 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -3,6 +3,7 @@
#define __PERF_DEBUG_H
#include <stdbool.h>
+#include <string.h>
#include "event.h"
#include "../ui/helpline.h"
#include "../ui/progress.h"
@@ -10,6 +11,7 @@
extern int verbose;
extern bool quiet, dump_trace;
+extern int debug_ordered_events;
#ifndef pr_fmt
#define pr_fmt(fmt) fmt
@@ -29,6 +31,14 @@ extern bool quiet, dump_trace;
#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_time_N(n, var, t, fmt, ...) \
+ eprintf_time(n, var, t, fmt, ##__VA_ARGS__)
+
+#define pr_oe_time(t, fmt, ...) pr_time_N(1, debug_ordered_events, t, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_oe_time2(t, fmt, ...) pr_time_N(2, debug_ordered_events, t, pr_fmt(fmt), ##__VA_ARGS__)
+
+#define STRERR_BUFSIZE 128 /* For the buffer size of strerror_r */
+
int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
void trace_event(union perf_event *event);
@@ -38,6 +48,7 @@ int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
void pr_stat(const char *fmt, ...);
int eprintf(int level, int var, const char *fmt, ...) __attribute__((format(printf, 3, 4)));
+int eprintf_time(int level, int var, u64 t, const char *fmt, ...) __attribute__((format(printf, 4, 5)));
int perf_debug_option(const char *str);
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 90d02c661dd4..0247acfdfaca 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -37,6 +37,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
{
char build_id_hex[BUILD_ID_SIZE * 2 + 1];
int ret = 0;
+ size_t len;
switch (type) {
case DSO_BINARY_TYPE__DEBUGLINK: {
@@ -60,26 +61,25 @@ int dso__read_binary_type_filename(const struct dso *dso,
break;
case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
- snprintf(filename, size, "%s/usr/lib/debug%s.debug",
- symbol_conf.symfs, dso->long_name);
+ len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
+ snprintf(filename + len, size - len, "%s.debug", dso->long_name);
break;
case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
- snprintf(filename, size, "%s/usr/lib/debug%s",
- symbol_conf.symfs, dso->long_name);
+ len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
+ snprintf(filename + len, size - len, "%s", dso->long_name);
break;
case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
{
const char *last_slash;
- size_t len;
size_t dir_size;
last_slash = dso->long_name + dso->long_name_len;
while (last_slash != dso->long_name && *last_slash != '/')
last_slash--;
- len = scnprintf(filename, size, "%s", symbol_conf.symfs);
+ len = __symbol__join_symfs(filename, size, "");
dir_size = last_slash - dso->long_name + 2;
if (dir_size > (size - len)) {
ret = -1;
@@ -100,26 +100,24 @@ int dso__read_binary_type_filename(const struct dso *dso,
build_id__sprintf(dso->build_id,
sizeof(dso->build_id),
build_id_hex);
- snprintf(filename, size,
- "%s/usr/lib/debug/.build-id/%.2s/%s.debug",
- symbol_conf.symfs, build_id_hex, build_id_hex + 2);
+ len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
+ snprintf(filename + len, size - len, "%.2s/%s.debug",
+ build_id_hex, build_id_hex + 2);
break;
case DSO_BINARY_TYPE__VMLINUX:
case DSO_BINARY_TYPE__GUEST_VMLINUX:
case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
- snprintf(filename, size, "%s%s",
- symbol_conf.symfs, dso->long_name);
+ __symbol__join_symfs(filename, size, dso->long_name);
break;
case DSO_BINARY_TYPE__GUEST_KMODULE:
- snprintf(filename, size, "%s%s%s", symbol_conf.symfs,
- root_dir, dso->long_name);
+ path__join3(filename, size, symbol_conf.symfs,
+ root_dir, dso->long_name);
break;
case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
- snprintf(filename, size, "%s%s", symbol_conf.symfs,
- dso->long_name);
+ __symbol__join_symfs(filename, size, dso->long_name);
break;
case DSO_BINARY_TYPE__KCORE:
@@ -164,13 +162,15 @@ static void close_first_dso(void);
static int do_open(char *name)
{
int fd;
+ char sbuf[STRERR_BUFSIZE];
do {
fd = open(name, O_RDONLY);
if (fd >= 0)
return fd;
- pr_debug("dso open failed, mmap: %s\n", strerror(errno));
+ pr_debug("dso open failed, mmap: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
if (!dso__data_open_cnt || errno != EMFILE)
break;
@@ -532,10 +532,12 @@ static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
static int data_file_size(struct dso *dso)
{
struct stat st;
+ char sbuf[STRERR_BUFSIZE];
if (!dso->data.file_size) {
if (fstat(dso->data.fd, &st)) {
- pr_err("dso mmap failed, fstat: %s\n", strerror(errno));
+ pr_err("dso mmap failed, fstat: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
return -1;
}
dso->data.file_size = st.st_size;
@@ -651,6 +653,65 @@ struct dso *dso__kernel_findnew(struct machine *machine, const char *name,
return dso;
}
+/*
+ * Find a matching entry and/or link current entry to RB tree.
+ * Either one of the dso or name parameter must be non-NULL or the
+ * function will not work.
+ */
+static struct dso *dso__findlink_by_longname(struct rb_root *root,
+ struct dso *dso, const char *name)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+
+ if (!name)
+ name = dso->long_name;
+ /*
+ * Find node with the matching name
+ */
+ while (*p) {
+ struct dso *this = rb_entry(*p, struct dso, rb_node);
+ int rc = strcmp(name, this->long_name);
+
+ parent = *p;
+ if (rc == 0) {
+ /*
+ * In case the new DSO is a duplicate of an existing
+ * one, print an one-time warning & put the new entry
+ * at the end of the list of duplicates.
+ */
+ if (!dso || (dso == this))
+ return this; /* Find matching dso */
+ /*
+ * The core kernel DSOs may have duplicated long name.
+ * In this case, the short name should be different.
+ * Comparing the short names to differentiate the DSOs.
+ */
+ rc = strcmp(dso->short_name, this->short_name);
+ if (rc == 0) {
+ pr_err("Duplicated dso name: %s\n", name);
+ return NULL;
+ }
+ }
+ if (rc < 0)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ if (dso) {
+ /* Add new node and rebalance tree */
+ rb_link_node(&dso->rb_node, parent, p);
+ rb_insert_color(&dso->rb_node, root);
+ }
+ return NULL;
+}
+
+static inline struct dso *
+dso__find_by_longname(const struct rb_root *root, const char *name)
+{
+ return dso__findlink_by_longname((struct rb_root *)root, NULL, name);
+}
+
void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
{
if (name == NULL)
@@ -753,6 +814,7 @@ struct dso *dso__new(const char *name)
dso->a2l_fails = 1;
dso->kernel = DSO_TYPE_USER;
dso->needs_swap = DSO_SWAP__UNSET;
+ RB_CLEAR_NODE(&dso->rb_node);
INIT_LIST_HEAD(&dso->node);
INIT_LIST_HEAD(&dso->data.open_entry);
}
@@ -763,6 +825,10 @@ struct dso *dso__new(const char *name)
void dso__delete(struct dso *dso)
{
int i;
+
+ if (!RB_EMPTY_NODE(&dso->rb_node))
+ pr_err("DSO %s is still in rbtree when being deleted!\n",
+ dso->long_name);
for (i = 0; i < MAP__NR_TYPES; ++i)
symbols__delete(&dso->symbols[i]);
@@ -849,35 +915,34 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
return have_build_id;
}
-void dsos__add(struct list_head *head, struct dso *dso)
+void dsos__add(struct dsos *dsos, struct dso *dso)
{
- list_add_tail(&dso->node, head);
+ list_add_tail(&dso->node, &dsos->head);
+ dso__findlink_by_longname(&dsos->root, dso, NULL);
}
-struct dso *dsos__find(const struct list_head *head, const char *name, bool cmp_short)
+struct dso *dsos__find(const struct dsos *dsos, const char *name,
+ bool cmp_short)
{
struct dso *pos;
if (cmp_short) {
- list_for_each_entry(pos, head, node)
+ list_for_each_entry(pos, &dsos->head, node)
if (strcmp(pos->short_name, name) == 0)
return pos;
return NULL;
}
- list_for_each_entry(pos, head, node)
- if (strcmp(pos->long_name, name) == 0)
- return pos;
- return NULL;
+ return dso__find_by_longname(&dsos->root, name);
}
-struct dso *__dsos__findnew(struct list_head *head, const char *name)
+struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
{
- struct dso *dso = dsos__find(head, name, false);
+ struct dso *dso = dsos__find(dsos, name, false);
if (!dso) {
dso = dso__new(name);
if (dso != NULL) {
- dsos__add(head, dso);
+ dsos__add(dsos, dso);
dso__set_basename(dso);
}
}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 5e463c0964d4..acb651acc7fd 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -90,8 +90,18 @@ struct dso_cache {
char data[0];
};
+/*
+ * DSOs are put into both a list for fast iteration and rbtree for fast
+ * long name lookup.
+ */
+struct dsos {
+ struct list_head head;
+ struct rb_root root; /* rbtree root sorted by long name */
+};
+
struct dso {
struct list_head node;
+ struct rb_node rb_node; /* rbtree node sorted by long name */
struct rb_root symbols[MAP__NR_TYPES];
struct rb_root symbol_names[MAP__NR_TYPES];
void *a2l;
@@ -224,10 +234,10 @@ struct map *dso__new_map(const char *name);
struct dso *dso__kernel_findnew(struct machine *machine, const char *name,
const char *short_name, int dso_type);
-void dsos__add(struct list_head *head, struct dso *dso);
-struct dso *dsos__find(const struct list_head *head, const char *name,
+void dsos__add(struct dsos *dsos, struct dso *dso);
+struct dso *dsos__find(const struct dsos *dsos, const char *name,
bool cmp_short);
-struct dso *__dsos__findnew(struct list_head *head, const char *name);
+struct dso *__dsos__findnew(struct dsos *dsos, const char *name);
bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 1398c83d896d..4af6b279e34a 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -558,13 +558,17 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
struct map *map;
struct kmap *kmap;
int err;
+ union perf_event *event;
+
+ if (machine->vmlinux_maps[0] == NULL)
+ return -1;
+
/*
* We should get this from /sys/kernel/sections/.text, but till that is
* available use this, and after it is use this as a fallback for older
* kernels.
*/
- union perf_event *event = zalloc((sizeof(event->mmap) +
- machine->id_hdr_size));
+ event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
if (event == NULL) {
pr_debug("Not enough memory synthesizing mmap event "
"for kernel modules\n");
@@ -784,9 +788,9 @@ try_again:
* "[vdso]" dso, but for now lets use the old trick of looking
* in the whole kernel symbol list.
*/
- if ((long long)al->addr < 0 &&
- cpumode == PERF_RECORD_MISC_USER &&
- machine && mg != &machine->kmaps) {
+ if (cpumode == PERF_RECORD_MISC_USER && machine &&
+ mg != &machine->kmaps &&
+ machine__kernel_ip(machine, al->addr)) {
mg = &machine->kmaps;
load_map = true;
goto try_again;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 94d6976180da..7eb7107731ec 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -156,6 +156,8 @@ struct perf_sample {
u32 cpu;
u32 raw_size;
u64 data_src;
+ u32 flags;
+ u16 insn_len;
void *raw_data;
struct ip_callchain *callchain;
struct branch_stack *branch_stack;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 814e954c1318..3cebc9a8d52e 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -25,6 +25,9 @@
#include <linux/bitops.h>
#include <linux/hash.h>
+static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
+static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
+
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
@@ -37,6 +40,7 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
INIT_HLIST_HEAD(&evlist->heads[i]);
INIT_LIST_HEAD(&evlist->entries);
perf_evlist__set_maps(evlist, cpus, threads);
+ fdarray__init(&evlist->pollfd, 64);
evlist->workload.pid = -1;
}
@@ -102,7 +106,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
void perf_evlist__exit(struct perf_evlist *evlist)
{
zfree(&evlist->mmap);
- zfree(&evlist->pollfd);
+ fdarray__exit(&evlist->pollfd);
}
void perf_evlist__delete(struct perf_evlist *evlist)
@@ -122,6 +126,7 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
{
list_add_tail(&entry->node, &evlist->entries);
entry->idx = evlist->nr_entries;
+ entry->tracking = !entry->idx;
if (!evlist->nr_entries++)
perf_evlist__set_id_pos(evlist);
@@ -265,17 +270,27 @@ int perf_evlist__add_newtp(struct perf_evlist *evlist,
return 0;
}
+static int perf_evlist__nr_threads(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ if (evsel->system_wide)
+ return 1;
+ else
+ return thread_map__nr(evlist->threads);
+}
+
void perf_evlist__disable(struct perf_evlist *evlist)
{
int cpu, thread;
struct perf_evsel *pos;
int nr_cpus = cpu_map__nr(evlist->cpus);
- int nr_threads = thread_map__nr(evlist->threads);
+ int nr_threads;
for (cpu = 0; cpu < nr_cpus; cpu++) {
evlist__for_each(evlist, pos) {
if (!perf_evsel__is_group_leader(pos) || !pos->fd)
continue;
+ nr_threads = perf_evlist__nr_threads(evlist, pos);
for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread),
PERF_EVENT_IOC_DISABLE, 0);
@@ -288,12 +303,13 @@ void perf_evlist__enable(struct perf_evlist *evlist)
int cpu, thread;
struct perf_evsel *pos;
int nr_cpus = cpu_map__nr(evlist->cpus);
- int nr_threads = thread_map__nr(evlist->threads);
+ int nr_threads;
for (cpu = 0; cpu < nr_cpus; cpu++) {
evlist__for_each(evlist, pos) {
if (!perf_evsel__is_group_leader(pos) || !pos->fd)
continue;
+ nr_threads = perf_evlist__nr_threads(evlist, pos);
for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread),
PERF_EVENT_IOC_ENABLE, 0);
@@ -305,12 +321,14 @@ int perf_evlist__disable_event(struct perf_evlist *evlist,
struct perf_evsel *evsel)
{
int cpu, thread, err;
+ int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_threads = perf_evlist__nr_threads(evlist, evsel);
if (!evsel->fd)
return 0;
- for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
- for (thread = 0; thread < evlist->threads->nr; thread++) {
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ for (thread = 0; thread < nr_threads; thread++) {
err = ioctl(FD(evsel, cpu, thread),
PERF_EVENT_IOC_DISABLE, 0);
if (err)
@@ -324,12 +342,14 @@ int perf_evlist__enable_event(struct perf_evlist *evlist,
struct perf_evsel *evsel)
{
int cpu, thread, err;
+ int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_threads = perf_evlist__nr_threads(evlist, evsel);
if (!evsel->fd)
return -EINVAL;
- for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
- for (thread = 0; thread < evlist->threads->nr; thread++) {
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ for (thread = 0; thread < nr_threads; thread++) {
err = ioctl(FD(evsel, cpu, thread),
PERF_EVENT_IOC_ENABLE, 0);
if (err)
@@ -339,21 +359,111 @@ int perf_evlist__enable_event(struct perf_evlist *evlist,
return 0;
}
-static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
+static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
+ struct perf_evsel *evsel, int cpu)
+{
+ int thread, err;
+ int nr_threads = perf_evlist__nr_threads(evlist, evsel);
+
+ if (!evsel->fd)
+ return -EINVAL;
+
+ for (thread = 0; thread < nr_threads; thread++) {
+ err = ioctl(FD(evsel, cpu, thread),
+ PERF_EVENT_IOC_ENABLE, 0);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
+ struct perf_evsel *evsel,
+ int thread)
+{
+ int cpu, err;
+ int nr_cpus = cpu_map__nr(evlist->cpus);
+
+ if (!evsel->fd)
+ return -EINVAL;
+
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
+ struct perf_evsel *evsel, int idx)
+{
+ bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
+
+ if (per_cpu_mmaps)
+ return perf_evlist__enable_event_cpu(evlist, evsel, idx);
+ else
+ return perf_evlist__enable_event_thread(evlist, evsel, idx);
+}
+
+int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = thread_map__nr(evlist->threads);
- int nfds = nr_cpus * nr_threads * evlist->nr_entries;
- evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
- return evlist->pollfd != NULL ? 0 : -ENOMEM;
+ int nfds = 0;
+ struct perf_evsel *evsel;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ if (evsel->system_wide)
+ nfds += nr_cpus;
+ else
+ nfds += nr_cpus * nr_threads;
+ }
+
+ if (fdarray__available_entries(&evlist->pollfd) < nfds &&
+ fdarray__grow(&evlist->pollfd, nfds) < 0)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx)
+{
+ int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP);
+ /*
+ * Save the idx so that when we filter out fds POLLHUP'ed we can
+ * close the associated evlist->mmap[] entry.
+ */
+ if (pos >= 0) {
+ evlist->pollfd.priv[pos].idx = idx;
+
+ fcntl(fd, F_SETFL, O_NONBLOCK);
+ }
+
+ return pos;
+}
+
+int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
+{
+ return __perf_evlist__add_pollfd(evlist, fd, -1);
+}
+
+static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd)
+{
+ struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd);
+
+ perf_evlist__mmap_put(evlist, fda->priv[fd].idx);
+}
+
+int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
+{
+ return fdarray__filter(&evlist->pollfd, revents_and_mask,
+ perf_evlist__munmap_filtered);
}
-void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
+int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
{
- fcntl(fd, F_SETFL, O_NONBLOCK);
- evlist->pollfd[evlist->nr_fds].fd = fd;
- evlist->pollfd[evlist->nr_fds].events = POLLIN;
- evlist->nr_fds++;
+ return fdarray__poll(&evlist->pollfd, timeout);
}
static void perf_evlist__id_hash(struct perf_evlist *evlist,
@@ -566,14 +676,36 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
return event;
}
+static bool perf_mmap__empty(struct perf_mmap *md)
+{
+ return perf_mmap__read_head(md) != md->prev;
+}
+
+static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
+{
+ ++evlist->mmap[idx].refcnt;
+}
+
+static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
+{
+ BUG_ON(evlist->mmap[idx].refcnt == 0);
+
+ if (--evlist->mmap[idx].refcnt == 0)
+ __perf_evlist__munmap(evlist, idx);
+}
+
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
{
+ struct perf_mmap *md = &evlist->mmap[idx];
+
if (!evlist->overwrite) {
- struct perf_mmap *md = &evlist->mmap[idx];
unsigned int old = md->prev;
perf_mmap__write_tail(md, old);
}
+
+ if (md->refcnt == 1 && perf_mmap__empty(md))
+ perf_evlist__mmap_put(evlist, idx);
}
static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
@@ -581,6 +713,7 @@ static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
if (evlist->mmap[idx].base != NULL) {
munmap(evlist->mmap[idx].base, evlist->mmap_len);
evlist->mmap[idx].base = NULL;
+ evlist->mmap[idx].refcnt = 0;
}
}
@@ -614,6 +747,20 @@ struct mmap_params {
static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
struct mmap_params *mp, int fd)
{
+ /*
+ * The last one will be done at perf_evlist__mmap_consume(), so that we
+ * make sure we don't prevent tools from consuming every last event in
+ * the ring buffer.
+ *
+ * I.e. we can get the POLLHUP meaning that the fd doesn't exist
+ * anymore, but the last events for it are still in the ring buffer,
+ * waiting to be consumed.
+ *
+ * Tools can chose to ignore this at their own discretion, but the
+ * evlist layer can't just drop it when filtering events in
+ * perf_evlist__filter_pollfd().
+ */
+ evlist->mmap[idx].refcnt = 2;
evlist->mmap[idx].prev = 0;
evlist->mmap[idx].mask = mp->mask;
evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
@@ -625,7 +772,6 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
return -1;
}
- perf_evlist__add_pollfd(evlist, fd);
return 0;
}
@@ -636,7 +782,12 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
struct perf_evsel *evsel;
evlist__for_each(evlist, evsel) {
- int fd = FD(evsel, cpu, thread);
+ int fd;
+
+ if (evsel->system_wide && thread)
+ continue;
+
+ fd = FD(evsel, cpu, thread);
if (*output == -1) {
*output = fd;
@@ -645,6 +796,13 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
} else {
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
return -1;
+
+ perf_evlist__mmap_get(evlist, idx);
+ }
+
+ if (__perf_evlist__add_pollfd(evlist, fd, idx) < 0) {
+ perf_evlist__mmap_put(evlist, idx);
+ return -1;
}
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
@@ -804,7 +962,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
return -ENOMEM;
- if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
+ if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
return -ENOMEM;
evlist->overwrite = overwrite;
@@ -1061,6 +1219,8 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *tar
}
if (!evlist->workload.pid) {
+ int ret;
+
if (pipe_output)
dup2(2, 1);
@@ -1078,8 +1238,22 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *tar
/*
* Wait until the parent tells us to go.
*/
- if (read(go_pipe[0], &bf, 1) == -1)
- perror("unable to read pipe");
+ ret = read(go_pipe[0], &bf, 1);
+ /*
+ * The parent will ask for the execvp() to be performed by
+ * writing exactly one byte, in workload.cork_fd, usually via
+ * perf_evlist__start_workload().
+ *
+ * For cancelling the workload without actuallin running it,
+ * the parent will just close workload.cork_fd, without writing
+ * anything, i.e. read will return zero and we just exit()
+ * here.
+ */
+ if (ret != 1) {
+ if (ret == -1)
+ perror("unable to read pipe");
+ exit(ret);
+ }
execvp(argv[0], (char **)argv);
@@ -1202,7 +1376,7 @@ int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
int err, char *buf, size_t size)
{
int printed, value;
- char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
+ char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
switch (err) {
case EACCES:
@@ -1250,3 +1424,19 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
list_splice(&move, &evlist->entries);
}
+
+void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
+ struct perf_evsel *tracking_evsel)
+{
+ struct perf_evsel *evsel;
+
+ if (tracking_evsel->tracking)
+ return;
+
+ evlist__for_each(evlist, evsel) {
+ if (evsel != tracking_evsel)
+ evsel->tracking = false;
+ }
+
+ tracking_evsel->tracking = true;
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index f5173cd63693..bd312b01e876 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -2,6 +2,7 @@
#define __PERF_EVLIST_H 1
#include <linux/list.h>
+#include <api/fd/array.h>
#include <stdio.h>
#include "../perf.h"
#include "event.h"
@@ -17,9 +18,15 @@ struct record_opts;
#define PERF_EVLIST__HLIST_BITS 8
#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
+/**
+ * struct perf_mmap - perf's ring buffer mmap details
+ *
+ * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
+ */
struct perf_mmap {
void *base;
int mask;
+ int refcnt;
unsigned int prev;
char event_copy[PERF_SAMPLE_MAX_SIZE];
};
@@ -29,7 +36,6 @@ struct perf_evlist {
struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
int nr_entries;
int nr_groups;
- int nr_fds;
int nr_mmaps;
size_t mmap_len;
int id_pos;
@@ -40,8 +46,8 @@ struct perf_evlist {
pid_t pid;
} workload;
bool overwrite;
+ struct fdarray pollfd;
struct perf_mmap *mmap;
- struct pollfd *pollfd;
struct thread_map *threads;
struct cpu_map *cpus;
struct perf_evsel *selected;
@@ -82,7 +88,11 @@ perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
int cpu, int thread, u64 id);
-void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
+int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
+int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
+int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask);
+
+int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
@@ -122,6 +132,8 @@ int perf_evlist__disable_event(struct perf_evlist *evlist,
struct perf_evsel *evsel);
int perf_evlist__enable_event(struct perf_evlist *evlist,
struct perf_evsel *evsel);
+int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
+ struct perf_evsel *evsel, int idx);
void perf_evlist__set_selected(struct perf_evlist *evlist,
struct perf_evsel *evsel);
@@ -262,4 +274,7 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
#define evlist__for_each_safe(evlist, tmp, evsel) \
__evlist__for_each_safe(&(evlist)->entries, tmp, evsel)
+void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
+ struct perf_evsel *tracking_evsel);
+
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 21a373ebea22..e0868a901c4a 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -162,6 +162,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
struct perf_event_attr *attr, int idx)
{
evsel->idx = idx;
+ evsel->tracking = !idx;
evsel->attr = *attr;
evsel->leader = evsel;
evsel->unit = "";
@@ -502,20 +503,19 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
}
static void
-perf_evsel__config_callgraph(struct perf_evsel *evsel,
- struct record_opts *opts)
+perf_evsel__config_callgraph(struct perf_evsel *evsel)
{
bool function = perf_evsel__is_function_event(evsel);
struct perf_event_attr *attr = &evsel->attr;
perf_evsel__set_sample_bit(evsel, CALLCHAIN);
- if (opts->call_graph == CALLCHAIN_DWARF) {
+ if (callchain_param.record_mode == CALLCHAIN_DWARF) {
if (!function) {
perf_evsel__set_sample_bit(evsel, REGS_USER);
perf_evsel__set_sample_bit(evsel, STACK_USER);
attr->sample_regs_user = PERF_REGS_MASK;
- attr->sample_stack_user = opts->stack_dump_size;
+ attr->sample_stack_user = callchain_param.dump_size;
attr->exclude_callchain_user = 1;
} else {
pr_info("Cannot use DWARF unwind for function trace event,"
@@ -561,7 +561,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
{
struct perf_evsel *leader = evsel->leader;
struct perf_event_attr *attr = &evsel->attr;
- int track = !evsel->idx; /* only the first counter needs these */
+ int track = evsel->tracking;
bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
@@ -624,8 +624,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
attr->mmap_data = track;
}
- if (opts->call_graph_enabled && !evsel->no_aux_samples)
- perf_evsel__config_callgraph(evsel, opts);
+ if (callchain_param.enabled && !evsel->no_aux_samples)
+ perf_evsel__config_callgraph(evsel);
if (target__has_cpu(&opts->target))
perf_evsel__set_sample_bit(evsel, CPU);
@@ -633,9 +633,12 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
if (opts->period)
perf_evsel__set_sample_bit(evsel, PERIOD);
- if (!perf_missing_features.sample_id_all &&
- (opts->sample_time || !opts->no_inherit ||
- target__has_cpu(&opts->target) || per_cpu))
+ /*
+ * When the user explicitely disabled time don't force it here.
+ */
+ if (opts->sample_time &&
+ (!perf_missing_features.sample_id_all &&
+ (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu)))
perf_evsel__set_sample_bit(evsel, TIME);
if (opts->raw_samples && !evsel->no_aux_samples) {
@@ -692,6 +695,10 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
int cpu, thread;
+
+ if (evsel->system_wide)
+ nthreads = 1;
+
evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
if (evsel->fd) {
@@ -710,6 +717,9 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthrea
{
int cpu, thread;
+ if (evsel->system_wide)
+ nthreads = 1;
+
for (cpu = 0; cpu < ncpus; cpu++) {
for (thread = 0; thread < nthreads; thread++) {
int fd = FD(evsel, cpu, thread),
@@ -740,6 +750,9 @@ int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
{
+ if (evsel->system_wide)
+ nthreads = 1;
+
evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
if (evsel->sample_id == NULL)
return -ENOMEM;
@@ -784,6 +797,9 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
int cpu, thread;
+ if (evsel->system_wide)
+ nthreads = 1;
+
for (cpu = 0; cpu < ncpus; cpu++)
for (thread = 0; thread < nthreads; ++thread) {
close(FD(evsel, cpu, thread));
@@ -872,6 +888,9 @@ int __perf_evsel__read(struct perf_evsel *evsel,
int cpu, thread;
struct perf_counts_values *aggr = &evsel->counts->aggr, count;
+ if (evsel->system_wide)
+ nthreads = 1;
+
aggr->val = aggr->ena = aggr->run = 0;
for (cpu = 0; cpu < ncpus; cpu++) {
@@ -994,13 +1013,18 @@ static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
struct thread_map *threads)
{
- int cpu, thread;
+ int cpu, thread, nthreads;
unsigned long flags = PERF_FLAG_FD_CLOEXEC;
int pid = -1, err;
enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
+ if (evsel->system_wide)
+ nthreads = 1;
+ else
+ nthreads = threads->nr;
+
if (evsel->fd == NULL &&
- perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
+ perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0)
return -ENOMEM;
if (evsel->cgrp) {
@@ -1024,10 +1048,10 @@ retry_sample_id:
for (cpu = 0; cpu < cpus->nr; cpu++) {
- for (thread = 0; thread < threads->nr; thread++) {
+ for (thread = 0; thread < nthreads; thread++) {
int group_fd;
- if (!evsel->cgrp)
+ if (!evsel->cgrp && !evsel->system_wide)
pid = threads->map[thread];
group_fd = get_group_fd(evsel, cpu, thread);
@@ -1100,7 +1124,7 @@ out_close:
close(FD(evsel, cpu, thread));
FD(evsel, cpu, thread) = -1;
}
- thread = threads->nr;
+ thread = nthreads;
} while (--cpu >= 0);
return err;
}
@@ -2002,6 +2026,8 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
int err, char *msg, size_t size)
{
+ char sbuf[STRERR_BUFSIZE];
+
switch (err) {
case EPERM:
case EACCES:
@@ -2036,13 +2062,20 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
"No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
#endif
break;
+ case EBUSY:
+ if (find_process("oprofiled"))
+ return scnprintf(msg, size,
+ "The PMU counters are busy/taken by another profiler.\n"
+ "We found oprofile daemon running, please stop it and try again.");
+ break;
default:
break;
}
return scnprintf(msg, size,
- "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n"
+ "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
"/bin/dmesg may provide additional information.\n"
"No CONFIG_PERF_EVENTS=y kernel support configured?\n",
- err, strerror(err), perf_evsel__name(evsel));
+ err, strerror_r(err, sbuf, sizeof(sbuf)),
+ perf_evsel__name(evsel));
}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index d7f93ce0ebc1..7bc314be6a7b 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -85,6 +85,8 @@ struct perf_evsel {
bool needs_swap;
bool no_aux_samples;
bool immediate;
+ bool system_wide;
+ bool tracking;
/* parse modifier helper */
int exclude_GH;
int nr_members;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 158c787ce0c4..ce0de00399da 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -214,11 +214,11 @@ static int machine__hit_all_dsos(struct machine *machine)
{
int err;
- err = __dsos__hit_all(&machine->kernel_dsos);
+ err = __dsos__hit_all(&machine->kernel_dsos.head);
if (err)
return err;
- return __dsos__hit_all(&machine->user_dsos);
+ return __dsos__hit_all(&machine->user_dsos.head);
}
int dsos__hit_all(struct perf_session *session)
@@ -288,11 +288,12 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
umisc = PERF_RECORD_MISC_GUEST_USER;
}
- err = __dsos__write_buildid_table(&machine->kernel_dsos, machine,
+ err = __dsos__write_buildid_table(&machine->kernel_dsos.head, machine,
machine->pid, kmisc, fd);
if (err == 0)
- err = __dsos__write_buildid_table(&machine->user_dsos, machine,
- machine->pid, umisc, fd);
+ err = __dsos__write_buildid_table(&machine->user_dsos.head,
+ machine, machine->pid, umisc,
+ fd);
return err;
}
@@ -455,9 +456,10 @@ static int __dsos__cache_build_ids(struct list_head *head,
static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
{
- int ret = __dsos__cache_build_ids(&machine->kernel_dsos, machine,
+ int ret = __dsos__cache_build_ids(&machine->kernel_dsos.head, machine,
debugdir);
- ret |= __dsos__cache_build_ids(&machine->user_dsos, machine, debugdir);
+ ret |= __dsos__cache_build_ids(&machine->user_dsos.head, machine,
+ debugdir);
return ret;
}
@@ -483,8 +485,10 @@ static int perf_session__cache_build_ids(struct perf_session *session)
static bool machine__read_build_ids(struct machine *machine, bool with_hits)
{
- bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
- ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
+ bool ret;
+
+ ret = __dsos__read_build_ids(&machine->kernel_dsos.head, with_hits);
+ ret |= __dsos__read_build_ids(&machine->user_dsos.head, with_hits);
return ret;
}
@@ -1548,7 +1552,7 @@ static int __event_process_build_id(struct build_id_event *bev,
struct perf_session *session)
{
int err = -1;
- struct list_head *head;
+ struct dsos *dsos;
struct machine *machine;
u16 misc;
struct dso *dso;
@@ -1563,22 +1567,22 @@ static int __event_process_build_id(struct build_id_event *bev,
switch (misc) {
case PERF_RECORD_MISC_KERNEL:
dso_type = DSO_TYPE_KERNEL;
- head = &machine->kernel_dsos;
+ dsos = &machine->kernel_dsos;
break;
case PERF_RECORD_MISC_GUEST_KERNEL:
dso_type = DSO_TYPE_GUEST_KERNEL;
- head = &machine->kernel_dsos;
+ dsos = &machine->kernel_dsos;
break;
case PERF_RECORD_MISC_USER:
case PERF_RECORD_MISC_GUEST_USER:
dso_type = DSO_TYPE_USER;
- head = &machine->user_dsos;
+ dsos = &machine->user_dsos;
break;
default:
goto out;
}
- dso = __dsos__findnew(head, filename);
+ dso = __dsos__findnew(dsos, filename);
if (dso != NULL) {
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 30df6187ee02..86569fa3651d 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -277,6 +277,28 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
}
}
+void hists__delete_entries(struct hists *hists)
+{
+ struct rb_node *next = rb_first(&hists->entries);
+ struct hist_entry *n;
+
+ while (next) {
+ n = rb_entry(next, struct hist_entry, rb_node);
+ next = rb_next(&n->rb_node);
+
+ rb_erase(&n->rb_node, &hists->entries);
+
+ if (sort__need_collapse)
+ rb_erase(&n->rb_node_in, &hists->entries_collapsed);
+
+ --hists->nr_entries;
+ if (!n->filtered)
+ --hists->nr_non_filtered_entries;
+
+ hist_entry__free(n);
+ }
+}
+
/*
* histogram, sorted on item, collects periods
*/
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 742f49a85725..8c9c70e18cbb 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -152,6 +152,7 @@ void hists__output_resort(struct hists *hists);
void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
+void hists__delete_entries(struct hists *hists);
void hists__output_recalc_col_len(struct hists *hists, int max_rows);
u64 hists__total_period(struct hists *hists);
@@ -192,6 +193,7 @@ struct perf_hpp {
};
struct perf_hpp_fmt {
+ const char *name;
int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct perf_evsel *evsel);
int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
@@ -207,6 +209,8 @@ struct perf_hpp_fmt {
struct list_head list;
struct list_head sort_list;
bool elide;
+ int len;
+ int user_len;
};
extern struct list_head perf_hpp__list;
@@ -261,17 +265,19 @@ static inline bool perf_hpp__should_skip(struct perf_hpp_fmt *format)
}
void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists);
+void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists);
+void perf_hpp__set_user_width(const char *width_list_str);
typedef u64 (*hpp_field_fn)(struct hist_entry *he);
typedef int (*hpp_callback_fn)(struct perf_hpp *hpp, bool front);
typedef int (*hpp_snprint_fn)(struct perf_hpp *hpp, const char *fmt, ...);
-int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
- hpp_field_fn get_field, const char *fmt,
- hpp_snprint_fn print_fn, bool fmt_percent);
-int __hpp__fmt_acc(struct perf_hpp *hpp, struct hist_entry *he,
- hpp_field_fn get_field, const char *fmt,
- hpp_snprint_fn print_fn, bool fmt_percent);
+int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he, hpp_field_fn get_field,
+ const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent);
+int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he, hpp_field_fn get_field,
+ const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent);
static inline void advance_hpp(struct perf_hpp *hpp, int inc)
{
diff --git a/tools/perf/util/kvm-stat.h b/tools/perf/util/kvm-stat.h
index 0b5a8cd2ee79..cf1d7913783b 100644
--- a/tools/perf/util/kvm-stat.h
+++ b/tools/perf/util/kvm-stat.h
@@ -92,7 +92,6 @@ struct perf_kvm_stat {
u64 lost_events;
u64 duration;
- const char *pid_str;
struct intlist *pid_list;
struct rb_root result;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 16bba9fff2c8..b7d477fbda02 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -17,8 +17,8 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
{
map_groups__init(&machine->kmaps);
RB_CLEAR_NODE(&machine->rb_node);
- INIT_LIST_HEAD(&machine->user_dsos);
- INIT_LIST_HEAD(&machine->kernel_dsos);
+ INIT_LIST_HEAD(&machine->user_dsos.head);
+ INIT_LIST_HEAD(&machine->kernel_dsos.head);
machine->threads = RB_ROOT;
INIT_LIST_HEAD(&machine->dead_threads);
@@ -31,6 +31,8 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
machine->symbol_filter = NULL;
machine->id_hdr_size = 0;
+ machine->comm_exec = false;
+ machine->kernel_start = 0;
machine->root_dir = strdup(root_dir);
if (machine->root_dir == NULL)
@@ -70,11 +72,12 @@ out_delete:
return NULL;
}
-static void dsos__delete(struct list_head *dsos)
+static void dsos__delete(struct dsos *dsos)
{
struct dso *pos, *n;
- list_for_each_entry_safe(pos, n, dsos, node) {
+ list_for_each_entry_safe(pos, n, &dsos->head, node) {
+ RB_CLEAR_NODE(&pos->rb_node);
list_del(&pos->node);
dso__delete(pos);
}
@@ -179,6 +182,19 @@ void machines__set_symbol_filter(struct machines *machines,
}
}
+void machines__set_comm_exec(struct machines *machines, bool comm_exec)
+{
+ struct rb_node *nd;
+
+ machines->host.comm_exec = comm_exec;
+
+ for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ struct machine *machine = rb_entry(nd, struct machine, rb_node);
+
+ machine->comm_exec = comm_exec;
+ }
+}
+
struct machine *machines__find(struct machines *machines, pid_t pid)
{
struct rb_node **p = &machines->guests.rb_node;
@@ -398,17 +414,31 @@ struct thread *machine__find_thread(struct machine *machine, pid_t pid,
return __machine__findnew_thread(machine, pid, tid, false);
}
+struct comm *machine__thread_exec_comm(struct machine *machine,
+ struct thread *thread)
+{
+ if (machine->comm_exec)
+ return thread__exec_comm(thread);
+ else
+ return thread__comm(thread);
+}
+
int machine__process_comm_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample)
{
struct thread *thread = machine__findnew_thread(machine,
event->comm.pid,
event->comm.tid);
+ bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
+
+ if (exec)
+ machine->comm_exec = true;
if (dump_trace)
perf_event__fprintf_comm(event, stdout);
- if (thread == NULL || thread__set_comm(thread, event->comm.comm, sample->time)) {
+ if (thread == NULL ||
+ __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
return -1;
}
@@ -448,23 +478,23 @@ struct map *machine__new_module(struct machine *machine, u64 start,
size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
{
struct rb_node *nd;
- size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) +
- __dsos__fprintf(&machines->host.user_dsos, fp);
+ size_t ret = __dsos__fprintf(&machines->host.kernel_dsos.head, fp) +
+ __dsos__fprintf(&machines->host.user_dsos.head, fp);
for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
- ret += __dsos__fprintf(&pos->kernel_dsos, fp);
- ret += __dsos__fprintf(&pos->user_dsos, fp);
+ ret += __dsos__fprintf(&pos->kernel_dsos.head, fp);
+ ret += __dsos__fprintf(&pos->user_dsos.head, fp);
}
return ret;
}
-size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
+size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
bool (skip)(struct dso *dso, int parm), int parm)
{
- return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) +
- __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm);
+ return __dsos__fprintf_buildid(&m->kernel_dsos.head, fp, skip, parm) +
+ __dsos__fprintf_buildid(&m->user_dsos.head, fp, skip, parm);
}
size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
@@ -565,8 +595,8 @@ const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
* Returns the name of the start symbol in *symbol_name. Pass in NULL as
* symbol_name if it's not that important.
*/
-static u64 machine__get_kernel_start_addr(struct machine *machine,
- const char **symbol_name)
+static u64 machine__get_running_kernel_start(struct machine *machine,
+ const char **symbol_name)
{
char filename[PATH_MAX];
int i;
@@ -593,7 +623,7 @@ static u64 machine__get_kernel_start_addr(struct machine *machine,
int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
{
enum map_type type;
- u64 start = machine__get_kernel_start_addr(machine, NULL);
+ u64 start = machine__get_running_kernel_start(machine, NULL);
for (type = 0; type < MAP__NR_TYPES; ++type) {
struct kmap *kmap;
@@ -912,7 +942,7 @@ int machine__create_kernel_maps(struct machine *machine)
{
struct dso *kernel = machine__get_kernel(machine);
const char *name;
- u64 addr = machine__get_kernel_start_addr(machine, &name);
+ u64 addr = machine__get_running_kernel_start(machine, &name);
if (!addr)
return -1;
@@ -965,7 +995,7 @@ static bool machine__uses_kcore(struct machine *machine)
{
struct dso *dso;
- list_for_each_entry(dso, &machine->kernel_dsos, node) {
+ list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
if (dso__is_kcore(dso))
return true;
}
@@ -1285,6 +1315,16 @@ static void ip__resolve_data(struct machine *machine, struct thread *thread,
thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr,
&al);
+ if (al.map == NULL) {
+ /*
+ * some shared data regions have execute bit set which puts
+ * their mapping in the MAP__FUNCTION type array.
+ * Check there as a fallback option before dropping the sample.
+ */
+ thread__find_addr_location(thread, machine, m, MAP__FUNCTION, addr,
+ &al);
+ }
+
ams->addr = addr;
ams->al_addr = al.addr;
ams->sym = al.sym;
@@ -1531,3 +1571,25 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
return 0;
}
+
+int machine__get_kernel_start(struct machine *machine)
+{
+ struct map *map = machine__kernel_map(machine, MAP__FUNCTION);
+ int err = 0;
+
+ /*
+ * The only addresses above 2^63 are kernel addresses of a 64-bit
+ * kernel. Note that addresses are unsigned so that on a 32-bit system
+ * all addresses including kernel addresses are less than 2^32. In
+ * that case (32-bit system), if the kernel mapping is unknown, all
+ * addresses will be assumed to be in user space - see
+ * machine__kernel_ip().
+ */
+ machine->kernel_start = 1ULL << 63;
+ if (map) {
+ err = map__load(map, machine->symbol_filter);
+ if (map->start)
+ machine->kernel_start = map->start;
+ }
+ return err;
+}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index b972824e6294..2b651a7f5d0d 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -4,6 +4,7 @@
#include <sys/types.h>
#include <linux/rbtree.h>
#include "map.h"
+#include "dso.h"
#include "event.h"
struct addr_location;
@@ -26,15 +27,17 @@ struct machine {
struct rb_node rb_node;
pid_t pid;
u16 id_hdr_size;
+ bool comm_exec;
char *root_dir;
struct rb_root threads;
struct list_head dead_threads;
struct thread *last_match;
struct vdso_info *vdso_info;
- struct list_head user_dsos;
- struct list_head kernel_dsos;
+ struct dsos user_dsos;
+ struct dsos kernel_dsos;
struct map_groups kmaps;
struct map *vmlinux_maps[MAP__NR_TYPES];
+ u64 kernel_start;
symbol_filter_t symbol_filter;
pid_t *current_tid;
};
@@ -45,8 +48,26 @@ struct map *machine__kernel_map(struct machine *machine, enum map_type type)
return machine->vmlinux_maps[type];
}
+int machine__get_kernel_start(struct machine *machine);
+
+static inline u64 machine__kernel_start(struct machine *machine)
+{
+ if (!machine->kernel_start)
+ machine__get_kernel_start(machine);
+ return machine->kernel_start;
+}
+
+static inline bool machine__kernel_ip(struct machine *machine, u64 ip)
+{
+ u64 kernel_start = machine__kernel_start(machine);
+
+ return ip >= kernel_start;
+}
+
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
pid_t tid);
+struct comm *machine__thread_exec_comm(struct machine *machine,
+ struct thread *thread);
int machine__process_comm_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
@@ -88,6 +109,7 @@ char *machine__mmap_name(struct machine *machine, char *bf, size_t size);
void machines__set_symbol_filter(struct machines *machines,
symbol_filter_t symbol_filter);
+void machines__set_comm_exec(struct machines *machines, bool comm_exec);
struct machine *machine__new_host(void);
int machine__init(struct machine *machine, const char *root_dir, pid_t pid);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 31b8905dd863..b7090596ac50 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -31,6 +31,7 @@ static inline int is_anon_memory(const char *filename)
static inline int is_no_dso_memory(const char *filename)
{
return !strncmp(filename, "[stack", 6) ||
+ !strncmp(filename, "/SYSV",5) ||
!strcmp(filename, "[heap]");
}
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
new file mode 100644
index 000000000000..706ce1a66169
--- /dev/null
+++ b/tools/perf/util/ordered-events.c
@@ -0,0 +1,245 @@
+#include <linux/list.h>
+#include <linux/compiler.h>
+#include "ordered-events.h"
+#include "evlist.h"
+#include "session.h"
+#include "asm/bug.h"
+#include "debug.h"
+
+#define pr_N(n, fmt, ...) \
+ eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
+
+#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
+
+static void queue_event(struct ordered_events *oe, struct ordered_event *new)
+{
+ struct ordered_event *last = oe->last;
+ u64 timestamp = new->timestamp;
+ struct list_head *p;
+
+ ++oe->nr_events;
+ oe->last = new;
+
+ pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
+
+ if (!last) {
+ list_add(&new->list, &oe->events);
+ oe->max_timestamp = timestamp;
+ return;
+ }
+
+ /*
+ * last event might point to some random place in the list as it's
+ * the last queued event. We expect that the new event is close to
+ * this.
+ */
+ if (last->timestamp <= timestamp) {
+ while (last->timestamp <= timestamp) {
+ p = last->list.next;
+ if (p == &oe->events) {
+ list_add_tail(&new->list, &oe->events);
+ oe->max_timestamp = timestamp;
+ return;
+ }
+ last = list_entry(p, struct ordered_event, list);
+ }
+ list_add_tail(&new->list, &last->list);
+ } else {
+ while (last->timestamp > timestamp) {
+ p = last->list.prev;
+ if (p == &oe->events) {
+ list_add(&new->list, &oe->events);
+ return;
+ }
+ last = list_entry(p, struct ordered_event, list);
+ }
+ list_add(&new->list, &last->list);
+ }
+}
+
+#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
+static struct ordered_event *alloc_event(struct ordered_events *oe)
+{
+ struct list_head *cache = &oe->cache;
+ struct ordered_event *new = NULL;
+
+ if (!list_empty(cache)) {
+ new = list_entry(cache->next, struct ordered_event, list);
+ list_del(&new->list);
+ } else if (oe->buffer) {
+ new = oe->buffer + oe->buffer_idx;
+ if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
+ oe->buffer = NULL;
+ } else if (oe->cur_alloc_size < oe->max_alloc_size) {
+ size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
+
+ oe->buffer = malloc(size);
+ if (!oe->buffer)
+ return NULL;
+
+ pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
+ oe->cur_alloc_size, size, oe->max_alloc_size);
+
+ oe->cur_alloc_size += size;
+ list_add(&oe->buffer->list, &oe->to_free);
+
+ /* First entry is abused to maintain the to_free list. */
+ oe->buffer_idx = 2;
+ new = oe->buffer + 1;
+ } else {
+ pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
+ }
+
+ return new;
+}
+
+struct ordered_event *
+ordered_events__new(struct ordered_events *oe, u64 timestamp)
+{
+ struct ordered_event *new;
+
+ new = alloc_event(oe);
+ if (new) {
+ new->timestamp = timestamp;
+ queue_event(oe, new);
+ }
+
+ return new;
+}
+
+void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
+{
+ list_move(&event->list, &oe->cache);
+ oe->nr_events--;
+}
+
+static int __ordered_events__flush(struct perf_session *s,
+ struct perf_tool *tool)
+{
+ struct ordered_events *oe = &s->ordered_events;
+ struct list_head *head = &oe->events;
+ struct ordered_event *tmp, *iter;
+ struct perf_sample sample;
+ u64 limit = oe->next_flush;
+ u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
+ bool show_progress = limit == ULLONG_MAX;
+ struct ui_progress prog;
+ int ret;
+
+ if (!tool->ordered_events || !limit)
+ return 0;
+
+ if (show_progress)
+ ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
+
+ list_for_each_entry_safe(iter, tmp, head, list) {
+ if (session_done())
+ return 0;
+
+ if (iter->timestamp > limit)
+ break;
+
+ ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
+ if (ret)
+ pr_err("Can't parse sample, err = %d\n", ret);
+ else {
+ ret = perf_session__deliver_event(s, iter->event, &sample, tool,
+ iter->file_offset);
+ if (ret)
+ return ret;
+ }
+
+ ordered_events__delete(oe, iter);
+ oe->last_flush = iter->timestamp;
+
+ if (show_progress)
+ ui_progress__update(&prog, 1);
+ }
+
+ if (list_empty(head))
+ oe->last = NULL;
+ else if (last_ts <= limit)
+ oe->last = list_entry(head->prev, struct ordered_event, list);
+
+ return 0;
+}
+
+int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
+ enum oe_flush how)
+{
+ struct ordered_events *oe = &s->ordered_events;
+ static const char * const str[] = {
+ "NONE",
+ "FINAL",
+ "ROUND",
+ "HALF ",
+ };
+ int err;
+
+ switch (how) {
+ case OE_FLUSH__FINAL:
+ oe->next_flush = ULLONG_MAX;
+ break;
+
+ case OE_FLUSH__HALF:
+ {
+ struct ordered_event *first, *last;
+ struct list_head *head = &oe->events;
+
+ first = list_entry(head->next, struct ordered_event, list);
+ last = oe->last;
+
+ /* Warn if we are called before any event got allocated. */
+ if (WARN_ONCE(!last || list_empty(head), "empty queue"))
+ return 0;
+
+ oe->next_flush = first->timestamp;
+ oe->next_flush += (last->timestamp - first->timestamp) / 2;
+ break;
+ }
+
+ case OE_FLUSH__ROUND:
+ case OE_FLUSH__NONE:
+ default:
+ break;
+ };
+
+ pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
+ str[how], oe->nr_events);
+ pr_oe_time(oe->max_timestamp, "max_timestamp\n");
+
+ err = __ordered_events__flush(s, tool);
+
+ if (!err) {
+ if (how == OE_FLUSH__ROUND)
+ oe->next_flush = oe->max_timestamp;
+
+ oe->last_flush_type = how;
+ }
+
+ pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
+ str[how], oe->nr_events);
+ pr_oe_time(oe->last_flush, "last_flush\n");
+
+ return err;
+}
+
+void ordered_events__init(struct ordered_events *oe)
+{
+ INIT_LIST_HEAD(&oe->events);
+ INIT_LIST_HEAD(&oe->cache);
+ INIT_LIST_HEAD(&oe->to_free);
+ oe->max_alloc_size = (u64) -1;
+ oe->cur_alloc_size = 0;
+}
+
+void ordered_events__free(struct ordered_events *oe)
+{
+ while (!list_empty(&oe->to_free)) {
+ struct ordered_event *event;
+
+ event = list_entry(oe->to_free.next, struct ordered_event, list);
+ list_del(&event->list);
+ free(event);
+ }
+}
diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h
new file mode 100644
index 000000000000..3b2f20542a01
--- /dev/null
+++ b/tools/perf/util/ordered-events.h
@@ -0,0 +1,51 @@
+#ifndef __ORDERED_EVENTS_H
+#define __ORDERED_EVENTS_H
+
+#include <linux/types.h>
+#include "tool.h"
+
+struct perf_session;
+
+struct ordered_event {
+ u64 timestamp;
+ u64 file_offset;
+ union perf_event *event;
+ struct list_head list;
+};
+
+enum oe_flush {
+ OE_FLUSH__NONE,
+ OE_FLUSH__FINAL,
+ OE_FLUSH__ROUND,
+ OE_FLUSH__HALF,
+};
+
+struct ordered_events {
+ u64 last_flush;
+ u64 next_flush;
+ u64 max_timestamp;
+ u64 max_alloc_size;
+ u64 cur_alloc_size;
+ struct list_head events;
+ struct list_head cache;
+ struct list_head to_free;
+ struct ordered_event *buffer;
+ struct ordered_event *last;
+ int buffer_idx;
+ unsigned int nr_events;
+ enum oe_flush last_flush_type;
+};
+
+struct ordered_event *ordered_events__new(struct ordered_events *oe, u64 timestamp);
+void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event);
+int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
+ enum oe_flush how);
+void ordered_events__init(struct ordered_events *oe);
+void ordered_events__free(struct ordered_events *oe);
+
+static inline
+void ordered_events__set_alloc_size(struct ordered_events *oe, u64 size)
+{
+ oe->max_alloc_size = size;
+}
+#endif /* __ORDERED_EVENTS_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 1e15df10a88c..d76aa30cb1fb 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -10,6 +10,7 @@
#include "symbol.h"
#include "cache.h"
#include "header.h"
+#include "debug.h"
#include <api/fs/debugfs.h>
#include "parse-events-bison.h"
#define YY_EXTRA_TYPE int
@@ -633,18 +634,28 @@ int parse_events_add_pmu(struct list_head *list, int *idx,
char *name, struct list_head *head_config)
{
struct perf_event_attr attr;
+ struct perf_pmu_info info;
struct perf_pmu *pmu;
struct perf_evsel *evsel;
- const char *unit;
- double scale;
pmu = perf_pmu__find(name);
if (!pmu)
return -EINVAL;
- memset(&attr, 0, sizeof(attr));
+ if (pmu->default_config) {
+ memcpy(&attr, pmu->default_config,
+ sizeof(struct perf_event_attr));
+ } else {
+ memset(&attr, 0, sizeof(attr));
+ }
+
+ if (!head_config) {
+ attr.type = pmu->type;
+ evsel = __add_event(list, idx, &attr, NULL, pmu->cpus);
+ return evsel ? 0 : -ENOMEM;
+ }
- if (perf_pmu__check_alias(pmu, head_config, &unit, &scale))
+ if (perf_pmu__check_alias(pmu, head_config, &info))
return -EINVAL;
/*
@@ -659,8 +670,8 @@ int parse_events_add_pmu(struct list_head *list, int *idx,
evsel = __add_event(list, idx, &attr, pmu_event_name(head_config),
pmu->cpus);
if (evsel) {
- evsel->unit = unit;
- evsel->scale = scale;
+ evsel->unit = info.unit;
+ evsel->scale = info.scale;
}
return evsel ? 0 : -ENOMEM;
@@ -973,7 +984,7 @@ int parse_filter(const struct option *opt, const char *str,
if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
fprintf(stderr,
- "-F option should follow a -e tracepoint option\n");
+ "--filter option should follow a -e tracepoint option\n");
return -1;
}
@@ -1006,9 +1017,11 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
+ char sbuf[STRERR_BUFSIZE];
if (debugfs_valid_mountpoint(tracing_events_path)) {
- printf(" [ Tracepoints not available: %s ]\n", strerror(errno));
+ printf(" [ Tracepoints not available: %s ]\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
return;
}
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 0bc87ba46bf3..55fab6ad609a 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -210,6 +210,16 @@ PE_NAME '/' event_config '/'
parse_events__free_terms($3);
$$ = list;
}
+|
+PE_NAME '/' '/'
+{
+ struct parse_events_evlist *data = _data;
+ struct list_head *list;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_pmu(list, &data->idx, $1, NULL));
+ $$ = list;
+}
value_sym:
PE_VALUE_SYM_HW
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 7a811eb61f75..93a41ca96b8e 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -2,6 +2,8 @@
#include <sys/types.h>
#include <unistd.h>
#include <stdio.h>
+#include <stdbool.h>
+#include <stdarg.h>
#include <dirent.h>
#include <api/fs/fs.h>
#include <locale.h>
@@ -14,8 +16,8 @@
struct perf_pmu_alias {
char *name;
- struct list_head terms;
- struct list_head list;
+ struct list_head terms; /* HEAD struct parse_events_term -> list */
+ struct list_head list; /* ELEM */
char unit[UNIT_MAX_LEN+1];
double scale;
};
@@ -208,6 +210,19 @@ static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FI
return 0;
}
+static inline bool pmu_alias_info_file(char *name)
+{
+ size_t len;
+
+ len = strlen(name);
+ if (len > 5 && !strcmp(name + len - 5, ".unit"))
+ return true;
+ if (len > 6 && !strcmp(name + len - 6, ".scale"))
+ return true;
+
+ return false;
+}
+
/*
* Process all the sysfs attributes located under the directory
* specified in 'dir' parameter.
@@ -216,7 +231,6 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
{
struct dirent *evt_ent;
DIR *event_dir;
- size_t len;
int ret = 0;
event_dir = opendir(dir);
@@ -232,13 +246,9 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
continue;
/*
- * skip .unit and .scale info files
- * parsed in perf_pmu__new_alias()
+ * skip info files parsed in perf_pmu__new_alias()
*/
- len = strlen(name);
- if (len > 5 && !strcmp(name + len - 5, ".unit"))
- continue;
- if (len > 6 && !strcmp(name + len - 6, ".scale"))
+ if (pmu_alias_info_file(name))
continue;
snprintf(path, PATH_MAX, "%s/%s", dir, name);
@@ -387,6 +397,12 @@ static struct cpu_map *pmu_cpumask(const char *name)
return cpus;
}
+struct perf_event_attr *__attribute__((weak))
+perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
+{
+ return NULL;
+}
+
static struct perf_pmu *pmu_lookup(const char *name)
{
struct perf_pmu *pmu;
@@ -421,6 +437,9 @@ static struct perf_pmu *pmu_lookup(const char *name)
pmu->name = strdup(name);
pmu->type = type;
list_add_tail(&pmu->list, &pmus);
+
+ pmu->default_config = perf_pmu__get_default_config(pmu);
+
return pmu;
}
@@ -479,28 +498,24 @@ pmu_find_format(struct list_head *formats, char *name)
}
/*
- * Returns value based on the format definition (format parameter)
+ * Sets value based on the format definition (format parameter)
* and unformated value (value parameter).
- *
- * TODO maybe optimize a little ;)
*/
-static __u64 pmu_format_value(unsigned long *format, __u64 value)
+static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
+ bool zero)
{
unsigned long fbit, vbit;
- __u64 v = 0;
for (fbit = 0, vbit = 0; fbit < PERF_PMU_FORMAT_BITS; fbit++) {
if (!test_bit(fbit, format))
continue;
- if (!(value & (1llu << vbit++)))
- continue;
-
- v |= (1llu << fbit);
+ if (value & (1llu << vbit++))
+ *v |= (1llu << fbit);
+ else if (zero)
+ *v &= ~(1llu << fbit);
}
-
- return v;
}
/*
@@ -509,7 +524,8 @@ static __u64 pmu_format_value(unsigned long *format, __u64 value)
*/
static int pmu_config_term(struct list_head *formats,
struct perf_event_attr *attr,
- struct parse_events_term *term)
+ struct parse_events_term *term,
+ bool zero)
{
struct perf_pmu_format *format;
__u64 *vp;
@@ -548,18 +564,19 @@ static int pmu_config_term(struct list_head *formats,
* non-hardcoded terms, here's the place to translate
* them into value.
*/
- *vp |= pmu_format_value(format->bits, term->val.num);
+ pmu_format_value(format->bits, term->val.num, vp, zero);
return 0;
}
int perf_pmu__config_terms(struct list_head *formats,
struct perf_event_attr *attr,
- struct list_head *head_terms)
+ struct list_head *head_terms,
+ bool zero)
{
struct parse_events_term *term;
list_for_each_entry(term, head_terms, list)
- if (pmu_config_term(formats, attr, term))
+ if (pmu_config_term(formats, attr, term, zero))
return -EINVAL;
return 0;
@@ -573,8 +590,10 @@ int perf_pmu__config_terms(struct list_head *formats,
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
struct list_head *head_terms)
{
+ bool zero = !!pmu->default_config;
+
attr->type = pmu->type;
- return perf_pmu__config_terms(&pmu->format, attr, head_terms);
+ return perf_pmu__config_terms(&pmu->format, attr, head_terms, zero);
}
static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,
@@ -634,7 +653,7 @@ static int check_unit_scale(struct perf_pmu_alias *alias,
* defined for the alias
*/
int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
- const char **unit, double *scale)
+ struct perf_pmu_info *info)
{
struct parse_events_term *term, *h;
struct perf_pmu_alias *alias;
@@ -644,8 +663,8 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
* Mark unit and scale as not set
* (different from default values, see below)
*/
- *unit = NULL;
- *scale = 0.0;
+ info->unit = NULL;
+ info->scale = 0.0;
list_for_each_entry_safe(term, h, head_terms, list) {
alias = pmu_find_alias(pmu, term);
@@ -655,7 +674,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
if (ret)
return ret;
- ret = check_unit_scale(alias, unit, scale);
+ ret = check_unit_scale(alias, &info->unit, &info->scale);
if (ret)
return ret;
@@ -668,11 +687,11 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
* set defaults as for evsel
* unit cannot left to NULL
*/
- if (*unit == NULL)
- *unit = "";
+ if (info->unit == NULL)
+ info->unit = "";
- if (*scale == 0.0)
- *scale = 1.0;
+ if (info->scale == 0.0)
+ info->scale = 1.0;
return 0;
}
@@ -794,3 +813,39 @@ bool pmu_have_event(const char *pname, const char *name)
}
return false;
}
+
+static FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ const char *sysfs;
+
+ sysfs = sysfs__mountpoint();
+ if (!sysfs)
+ return NULL;
+
+ snprintf(path, PATH_MAX,
+ "%s" EVENT_SOURCE_DEVICE_PATH "%s/%s", sysfs, pmu->name, name);
+
+ if (stat(path, &st) < 0)
+ return NULL;
+
+ return fopen(path, "r");
+}
+
+int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
+ ...)
+{
+ va_list args;
+ FILE *file;
+ int ret = EOF;
+
+ va_start(args, fmt);
+ file = perf_pmu__open_file(pmu, name);
+ if (file) {
+ ret = vfscanf(file, fmt, args);
+ fclose(file);
+ }
+ va_end(args);
+ return ret;
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index c14a543ce1f3..fe90a012c003 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -13,13 +13,21 @@ enum {
#define PERF_PMU_FORMAT_BITS 64
+struct perf_event_attr;
+
struct perf_pmu {
char *name;
__u32 type;
+ struct perf_event_attr *default_config;
struct cpu_map *cpus;
- struct list_head format;
- struct list_head aliases;
- struct list_head list;
+ struct list_head format; /* HEAD struct perf_pmu_format -> list */
+ struct list_head aliases; /* HEAD struct perf_pmu_alias -> list */
+ struct list_head list; /* ELEM */
+};
+
+struct perf_pmu_info {
+ const char *unit;
+ double scale;
};
struct perf_pmu *perf_pmu__find(const char *name);
@@ -27,9 +35,10 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
struct list_head *head_terms);
int perf_pmu__config_terms(struct list_head *formats,
struct perf_event_attr *attr,
- struct list_head *head_terms);
+ struct list_head *head_terms,
+ bool zero);
int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
- const char **unit, double *scale);
+ struct perf_pmu_info *info);
struct list_head *perf_pmu__alias(struct perf_pmu *pmu,
struct list_head *head_terms);
int perf_pmu_wrap(void);
@@ -45,5 +54,11 @@ struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu);
void print_pmu_events(const char *event_glob, bool name_only);
bool pmu_have_event(const char *pname, const char *name);
+int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
+ ...) __attribute__((format(scanf, 3, 4)));
+
int perf_pmu__test(void);
+
+struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu);
+
#endif /* __PMU_H */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 9a0a1839a377..c150ca4343eb 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -79,7 +79,7 @@ static int init_symbol_maps(bool user_only)
int ret;
symbol_conf.sort_by_name = true;
- ret = symbol__init();
+ ret = symbol__init(NULL);
if (ret < 0) {
pr_debug("Failed to init symbol map.\n");
goto out;
@@ -184,7 +184,8 @@ static struct dso *kernel_get_module_dso(const char *module)
const char *vmlinux_name;
if (module) {
- list_for_each_entry(dso, &host_machine->kernel_dsos, node) {
+ list_for_each_entry(dso, &host_machine->kernel_dsos.head,
+ node) {
if (strncmp(dso->short_name + 1, module,
dso->short_name_len - 2) == 0)
goto found;
@@ -258,21 +259,33 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
#ifdef HAVE_DWARF_SUPPORT
/* Open new debuginfo of given module */
-static struct debuginfo *open_debuginfo(const char *module)
+static struct debuginfo *open_debuginfo(const char *module, bool silent)
{
const char *path = module;
+ struct debuginfo *ret;
if (!module || !strchr(module, '/')) {
path = kernel_get_module_path(module);
if (!path) {
- pr_err("Failed to find path of %s module.\n",
- module ?: "kernel");
+ if (!silent)
+ pr_err("Failed to find path of %s module.\n",
+ module ?: "kernel");
return NULL;
}
}
- return debuginfo__new(path);
+ ret = debuginfo__new(path);
+ if (!ret && !silent) {
+ pr_warning("The %s file has no debug information.\n", path);
+ if (!module || !strtailcmp(path, ".ko"))
+ pr_warning("Rebuild with CONFIG_DEBUG_INFO=y, ");
+ else
+ pr_warning("Rebuild with -g, ");
+ pr_warning("or install an appropriate debuginfo package.\n");
+ }
+ return ret;
}
+
static int get_text_start_address(const char *exec, unsigned long *address)
{
Elf *elf;
@@ -333,15 +346,13 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
pr_debug("try to find information at %" PRIx64 " in %s\n", addr,
tp->module ? : "kernel");
- dinfo = open_debuginfo(tp->module);
+ dinfo = open_debuginfo(tp->module, verbose == 0);
if (dinfo) {
ret = debuginfo__find_probe_point(dinfo,
(unsigned long)addr, pp);
debuginfo__delete(dinfo);
- } else {
- pr_debug("Failed to open debuginfo at 0x%" PRIx64 "\n", addr);
+ } else
ret = -ENOENT;
- }
if (ret > 0) {
pp->retprobe = tp->retprobe;
@@ -457,13 +468,11 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
struct debuginfo *dinfo;
int ntevs, ret = 0;
- dinfo = open_debuginfo(target);
+ dinfo = open_debuginfo(target, !need_dwarf);
if (!dinfo) {
- if (need_dwarf) {
- pr_warning("Failed to open debuginfo file.\n");
+ if (need_dwarf)
return -ENOENT;
- }
pr_debug("Could not open debuginfo. Try to use symbols.\n");
return 0;
}
@@ -565,7 +574,7 @@ static int get_real_path(const char *raw_path, const char *comp_dir,
static int __show_one_line(FILE *fp, int l, bool skip, bool show_num)
{
- char buf[LINEBUF_SIZE];
+ char buf[LINEBUF_SIZE], sbuf[STRERR_BUFSIZE];
const char *color = show_num ? "" : PERF_COLOR_BLUE;
const char *prefix = NULL;
@@ -585,7 +594,8 @@ static int __show_one_line(FILE *fp, int l, bool skip, bool show_num)
return 1;
error:
if (ferror(fp)) {
- pr_warning("File read error: %s\n", strerror(errno));
+ pr_warning("File read error: %s\n",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
return -1;
}
return 0;
@@ -618,13 +628,12 @@ static int __show_line_range(struct line_range *lr, const char *module)
FILE *fp;
int ret;
char *tmp;
+ char sbuf[STRERR_BUFSIZE];
/* Search a line range */
- dinfo = open_debuginfo(module);
- if (!dinfo) {
- pr_warning("Failed to open debuginfo file.\n");
+ dinfo = open_debuginfo(module, false);
+ if (!dinfo)
return -ENOENT;
- }
ret = debuginfo__find_line_range(dinfo, lr);
debuginfo__delete(dinfo);
@@ -656,7 +665,7 @@ static int __show_line_range(struct line_range *lr, const char *module)
fp = fopen(lr->path, "r");
if (fp == NULL) {
pr_warning("Failed to open %s: %s\n", lr->path,
- strerror(errno));
+ strerror_r(errno, sbuf, sizeof(sbuf)));
return -errno;
}
/* Skip to starting line number */
@@ -689,11 +698,11 @@ end:
return ret;
}
-int show_line_range(struct line_range *lr, const char *module)
+int show_line_range(struct line_range *lr, const char *module, bool user)
{
int ret;
- ret = init_symbol_maps(false);
+ ret = init_symbol_maps(user);
if (ret < 0)
return ret;
ret = __show_line_range(lr, module);
@@ -768,13 +777,12 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
int i, ret = 0;
struct debuginfo *dinfo;
- ret = init_symbol_maps(false);
+ ret = init_symbol_maps(pevs->uprobes);
if (ret < 0)
return ret;
- dinfo = open_debuginfo(module);
+ dinfo = open_debuginfo(module, false);
if (!dinfo) {
- pr_warning("Failed to open debuginfo file.\n");
ret = -ENOENT;
goto out;
}
@@ -815,7 +823,8 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
}
int show_line_range(struct line_range *lr __maybe_unused,
- const char *module __maybe_unused)
+ const char *module __maybe_unused,
+ bool user __maybe_unused)
{
pr_warning("Debuginfo-analysis is not supported.\n");
return -ENOSYS;
@@ -1405,8 +1414,7 @@ int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf, size_t len)
return tmp - buf;
error:
- pr_debug("Failed to synthesize perf probe argument: %s\n",
- strerror(-ret));
+ pr_debug("Failed to synthesize perf probe argument: %d\n", ret);
return ret;
}
@@ -1455,8 +1463,7 @@ static char *synthesize_perf_probe_point(struct perf_probe_point *pp)
return buf;
error:
- pr_debug("Failed to synthesize perf probe point: %s\n",
- strerror(-ret));
+ pr_debug("Failed to synthesize perf probe point: %d\n", ret);
free(buf);
return NULL;
}
@@ -1780,10 +1787,11 @@ static void clear_probe_trace_event(struct probe_trace_event *tev)
memset(tev, 0, sizeof(*tev));
}
-static void print_warn_msg(const char *file, bool is_kprobe)
+static void print_open_warning(int err, bool is_kprobe)
{
+ char sbuf[STRERR_BUFSIZE];
- if (errno == ENOENT) {
+ if (err == -ENOENT) {
const char *config;
if (!is_kprobe)
@@ -1791,25 +1799,43 @@ static void print_warn_msg(const char *file, bool is_kprobe)
else
config = "CONFIG_KPROBE_EVENTS";
- pr_warning("%s file does not exist - please rebuild kernel"
- " with %s.\n", file, config);
- } else
- pr_warning("Failed to open %s file: %s\n", file,
- strerror(errno));
+ pr_warning("%cprobe_events file does not exist"
+ " - please rebuild kernel with %s.\n",
+ is_kprobe ? 'k' : 'u', config);
+ } else if (err == -ENOTSUP)
+ pr_warning("Debugfs is not mounted.\n");
+ else
+ pr_warning("Failed to open %cprobe_events: %s\n",
+ is_kprobe ? 'k' : 'u',
+ strerror_r(-err, sbuf, sizeof(sbuf)));
+}
+
+static void print_both_open_warning(int kerr, int uerr)
+{
+ /* Both kprobes and uprobes are disabled, warn it. */
+ if (kerr == -ENOTSUP && uerr == -ENOTSUP)
+ pr_warning("Debugfs is not mounted.\n");
+ else if (kerr == -ENOENT && uerr == -ENOENT)
+ pr_warning("Please rebuild kernel with CONFIG_KPROBE_EVENTS "
+ "or/and CONFIG_UPROBE_EVENTS.\n");
+ else {
+ char sbuf[STRERR_BUFSIZE];
+ pr_warning("Failed to open kprobe events: %s.\n",
+ strerror_r(-kerr, sbuf, sizeof(sbuf)));
+ pr_warning("Failed to open uprobe events: %s.\n",
+ strerror_r(-uerr, sbuf, sizeof(sbuf)));
+ }
}
-static int open_probe_events(const char *trace_file, bool readwrite,
- bool is_kprobe)
+static int open_probe_events(const char *trace_file, bool readwrite)
{
char buf[PATH_MAX];
const char *__debugfs;
int ret;
__debugfs = debugfs_find_mountpoint();
- if (__debugfs == NULL) {
- pr_warning("Debugfs is not mounted.\n");
- return -ENOENT;
- }
+ if (__debugfs == NULL)
+ return -ENOTSUP;
ret = e_snprintf(buf, PATH_MAX, "%s/%s", __debugfs, trace_file);
if (ret >= 0) {
@@ -1820,19 +1846,19 @@ static int open_probe_events(const char *trace_file, bool readwrite,
ret = open(buf, O_RDONLY, 0);
if (ret < 0)
- print_warn_msg(buf, is_kprobe);
+ ret = -errno;
}
return ret;
}
static int open_kprobe_events(bool readwrite)
{
- return open_probe_events("tracing/kprobe_events", readwrite, true);
+ return open_probe_events("tracing/kprobe_events", readwrite);
}
static int open_uprobe_events(bool readwrite)
{
- return open_probe_events("tracing/uprobe_events", readwrite, false);
+ return open_probe_events("tracing/uprobe_events", readwrite);
}
/* Get raw string list of current kprobe_events or uprobe_events */
@@ -1857,7 +1883,7 @@ static struct strlist *get_probe_trace_command_rawlist(int fd)
p[idx] = '\0';
ret = strlist__add(sl, buf);
if (ret < 0) {
- pr_debug("strlist__add failed: %s\n", strerror(-ret));
+ pr_debug("strlist__add failed (%d)\n", ret);
strlist__delete(sl);
return NULL;
}
@@ -1916,7 +1942,7 @@ static int __show_perf_probe_events(int fd, bool is_kprobe)
rawlist = get_probe_trace_command_rawlist(fd);
if (!rawlist)
- return -ENOENT;
+ return -ENOMEM;
strlist__for_each(ent, rawlist) {
ret = parse_probe_trace_command(ent->s, &tev);
@@ -1940,27 +1966,34 @@ static int __show_perf_probe_events(int fd, bool is_kprobe)
/* List up current perf-probe events */
int show_perf_probe_events(void)
{
- int fd, ret;
+ int kp_fd, up_fd, ret;
setup_pager();
- fd = open_kprobe_events(false);
-
- if (fd < 0)
- return fd;
ret = init_symbol_maps(false);
if (ret < 0)
return ret;
- ret = __show_perf_probe_events(fd, true);
- close(fd);
+ kp_fd = open_kprobe_events(false);
+ if (kp_fd >= 0) {
+ ret = __show_perf_probe_events(kp_fd, true);
+ close(kp_fd);
+ if (ret < 0)
+ goto out;
+ }
- fd = open_uprobe_events(false);
- if (fd >= 0) {
- ret = __show_perf_probe_events(fd, false);
- close(fd);
+ up_fd = open_uprobe_events(false);
+ if (kp_fd < 0 && up_fd < 0) {
+ print_both_open_warning(kp_fd, up_fd);
+ ret = kp_fd;
+ goto out;
}
+ if (up_fd >= 0) {
+ ret = __show_perf_probe_events(up_fd, false);
+ close(up_fd);
+ }
+out:
exit_symbol_maps();
return ret;
}
@@ -1976,6 +2009,8 @@ static struct strlist *get_probe_trace_event_names(int fd, bool include_group)
memset(&tev, 0, sizeof(tev));
rawlist = get_probe_trace_command_rawlist(fd);
+ if (!rawlist)
+ return NULL;
sl = strlist__new(true, NULL);
strlist__for_each(ent, rawlist) {
ret = parse_probe_trace_command(ent->s, &tev);
@@ -2005,6 +2040,7 @@ static int write_probe_trace_event(int fd, struct probe_trace_event *tev)
{
int ret = 0;
char *buf = synthesize_probe_trace_command(tev);
+ char sbuf[STRERR_BUFSIZE];
if (!buf) {
pr_debug("Failed to synthesize probe trace event.\n");
@@ -2016,7 +2052,7 @@ static int write_probe_trace_event(int fd, struct probe_trace_event *tev)
ret = write(fd, buf, strlen(buf));
if (ret <= 0)
pr_warning("Failed to write event: %s\n",
- strerror(errno));
+ strerror_r(errno, sbuf, sizeof(sbuf)));
}
free(buf);
return ret;
@@ -2030,7 +2066,7 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
/* Try no suffix */
ret = e_snprintf(buf, len, "%s", base);
if (ret < 0) {
- pr_debug("snprintf() failed: %s\n", strerror(-ret));
+ pr_debug("snprintf() failed: %d\n", ret);
return ret;
}
if (!strlist__has_entry(namelist, buf))
@@ -2046,7 +2082,7 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
for (i = 1; i < MAX_EVENT_INDEX; i++) {
ret = e_snprintf(buf, len, "%s_%d", base, i);
if (ret < 0) {
- pr_debug("snprintf() failed: %s\n", strerror(-ret));
+ pr_debug("snprintf() failed: %d\n", ret);
return ret;
}
if (!strlist__has_entry(namelist, buf))
@@ -2075,8 +2111,11 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
else
fd = open_kprobe_events(true);
- if (fd < 0)
+ if (fd < 0) {
+ print_open_warning(fd, !pev->uprobes);
return fd;
+ }
+
/* Get current event names */
namelist = get_probe_trace_event_names(fd, false);
if (!namelist) {
@@ -2408,7 +2447,8 @@ static int __del_trace_probe_event(int fd, struct str_node *ent)
printf("Removed event: %s\n", ent->s);
return 0;
error:
- pr_warning("Failed to delete event: %s\n", strerror(-ret));
+ pr_warning("Failed to delete event: %s\n",
+ strerror_r(-ret, buf, sizeof(buf)));
return ret;
}
@@ -2449,15 +2489,18 @@ int del_perf_probe_events(struct strlist *dellist)
/* Get current event names */
kfd = open_kprobe_events(true);
- if (kfd < 0)
- return kfd;
+ if (kfd >= 0)
+ namelist = get_probe_trace_event_names(kfd, true);
- namelist = get_probe_trace_event_names(kfd, true);
ufd = open_uprobe_events(true);
-
if (ufd >= 0)
unamelist = get_probe_trace_event_names(ufd, true);
+ if (kfd < 0 && ufd < 0) {
+ print_both_open_warning(kfd, ufd);
+ goto error;
+ }
+
if (namelist == NULL && unamelist == NULL)
goto error;
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 776c9347a3b6..e01e9943139f 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -128,7 +128,8 @@ extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
bool force_add);
extern int del_perf_probe_events(struct strlist *dellist);
extern int show_perf_probe_events(void);
-extern int show_line_range(struct line_range *lr, const char *module);
+extern int show_line_range(struct line_range *lr, const char *module,
+ bool user);
extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
int max_probe_points, const char *module,
struct strfilter *filter, bool externs);
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index dca9145d704c..c7918f83b300 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -281,6 +281,7 @@ static int convert_variable_type(Dwarf_Die *vr_die,
struct probe_trace_arg_ref **ref_ptr = &tvar->ref;
Dwarf_Die type;
char buf[16];
+ char sbuf[STRERR_BUFSIZE];
int bsize, boffs, total;
int ret;
@@ -367,7 +368,7 @@ formatted:
if (ret >= 16)
ret = -E2BIG;
pr_warning("Failed to convert variable type: %s\n",
- strerror(-ret));
+ strerror_r(-ret, sbuf, sizeof(sbuf)));
return ret;
}
tvar->type = strdup(buf);
@@ -608,14 +609,18 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
return -EINVAL;
}
- /* Get an appropriate symbol from symtab */
- symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
+ symbol = dwarf_diename(sp_die);
if (!symbol) {
- pr_warning("Failed to find symbol at 0x%lx\n",
- (unsigned long)paddr);
- return -ENOENT;
+ /* Try to get the symbol name from symtab */
+ symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
+ if (!symbol) {
+ pr_warning("Failed to find symbol at 0x%lx\n",
+ (unsigned long)paddr);
+ return -ENOENT;
+ }
+ eaddr = sym.st_value;
}
- tp->offset = (unsigned long)(paddr - sym.st_value);
+ tp->offset = (unsigned long)(paddr - eaddr);
tp->address = (unsigned long)paddr;
tp->symbol = strdup(symbol);
if (!tp->symbol)
@@ -779,10 +784,12 @@ static int find_lazy_match_lines(struct intlist *list,
size_t line_len;
ssize_t len;
int count = 0, linenum = 1;
+ char sbuf[STRERR_BUFSIZE];
fp = fopen(fname, "r");
if (!fp) {
- pr_warning("Failed to open %s: %s\n", fname, strerror(errno));
+ pr_warning("Failed to open %s: %s\n", fname,
+ strerror_r(errno, sbuf, sizeof(sbuf)));
return -errno;
}
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 12aa9b0d0ba1..3dda85ca50c1 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -736,7 +736,7 @@ static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
return NULL;
- n = poll(evlist->pollfd, evlist->nr_fds, timeout);
+ n = perf_evlist__poll(evlist, timeout);
if (n < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
@@ -753,9 +753,9 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
PyObject *list = PyList_New(0);
int i;
- for (i = 0; i < evlist->nr_fds; ++i) {
+ for (i = 0; i < evlist->pollfd.nr; ++i) {
PyObject *file;
- FILE *fp = fdopen(evlist->pollfd[i].fd, "r");
+ FILE *fp = fdopen(evlist->pollfd.entries[i].fd, "r");
if (fp == NULL)
goto free_list;
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index fe8079edbdc1..cf69325b985f 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -14,6 +14,7 @@ static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
struct perf_evsel *evsel;
unsigned long flags = perf_event_open_cloexec_flag();
int err = -EAGAIN, fd;
+ static pid_t pid = -1;
evlist = perf_evlist__new();
if (!evlist)
@@ -24,14 +25,22 @@ static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
evsel = perf_evlist__first(evlist);
- fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, flags);
- if (fd < 0)
- goto out_delete;
+ while (1) {
+ fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
+ if (fd < 0) {
+ if (pid == -1 && errno == EACCES) {
+ pid = 0;
+ continue;
+ }
+ goto out_delete;
+ }
+ break;
+ }
close(fd);
fn(evsel);
- fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, flags);
+ fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
if (fd < 0) {
if (errno == EINVAL)
err = -EINVAL;
@@ -47,7 +56,7 @@ out_delete:
static bool perf_probe_api(setup_probe_fn_t fn)
{
- const char *try[] = {"cycles:u", "instructions:u", "cpu-clock", NULL};
+ const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
struct cpu_map *cpus;
int cpu, ret, i = 0;
@@ -106,7 +115,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
evlist__for_each(evlist, evsel) {
perf_evsel__config(evsel, opts);
- if (!evsel->idx && use_comm_exec)
+ if (evsel->tracking && use_comm_exec)
evsel->attr.comm_exec = 1;
}
@@ -201,6 +210,7 @@ bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
struct perf_evsel *evsel;
int err, fd, cpu;
bool ret = false;
+ pid_t pid = -1;
temp_evlist = perf_evlist__new();
if (!temp_evlist)
@@ -221,12 +231,20 @@ bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
cpu = evlist->cpus->map[0];
}
- fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1,
- perf_event_open_cloexec_flag());
- if (fd >= 0) {
- close(fd);
- ret = true;
+ while (1) {
+ fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1,
+ perf_event_open_cloexec_flag());
+ if (fd < 0) {
+ if (pid == -1 && errno == EACCES) {
+ pid = 0;
+ continue;
+ }
+ goto out_delete;
+ }
+ break;
}
+ close(fd);
+ ret = true;
out_delete:
perf_evlist__delete(temp_evlist);
diff --git a/tools/perf/util/run-command.c b/tools/perf/util/run-command.c
index da8e9b285f51..34622b53e733 100644
--- a/tools/perf/util/run-command.c
+++ b/tools/perf/util/run-command.c
@@ -1,6 +1,7 @@
#include "cache.h"
#include "run-command.h"
#include "exec_cmd.h"
+#include "debug.h"
static inline void close_pair(int fd[2])
{
@@ -19,6 +20,7 @@ int start_command(struct child_process *cmd)
{
int need_in, need_out, need_err;
int fdin[2], fdout[2], fderr[2];
+ char sbuf[STRERR_BUFSIZE];
/*
* In case of errors we must keep the promise to close FDs
@@ -99,7 +101,7 @@ int start_command(struct child_process *cmd)
if (cmd->dir && chdir(cmd->dir))
die("exec %s: cd to %s failed (%s)", cmd->argv[0],
- cmd->dir, strerror(errno));
+ cmd->dir, strerror_r(errno, sbuf, sizeof(sbuf)));
if (cmd->env) {
for (; *cmd->env; cmd->env++) {
if (strchr(*cmd->env, '='))
@@ -153,6 +155,8 @@ int start_command(struct child_process *cmd)
static int wait_or_whine(pid_t pid)
{
+ char sbuf[STRERR_BUFSIZE];
+
for (;;) {
int status, code;
pid_t waiting = waitpid(pid, &status, 0);
@@ -160,7 +164,8 @@ static int wait_or_whine(pid_t pid)
if (waiting < 0) {
if (errno == EINTR)
continue;
- error("waitpid failed (%s)", strerror(errno));
+ error("waitpid failed (%s)",
+ strerror_r(errno, sbuf, sizeof(sbuf)));
return -ERR_RUN_COMMAND_WAITPID;
}
if (waiting != pid)
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index b2dba9c0a3a1..0a01bac4ce02 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -432,6 +432,11 @@ error:
return err;
}
+static int perl_flush_script(void)
+{
+ return 0;
+}
+
/*
* Stop trace script
*/
@@ -633,6 +638,7 @@ static int perl_generate_script(struct pevent *pevent, const char *outfile)
struct scripting_ops perl_scripting_ops = {
.name = "Perl",
.start_script = perl_start_script,
+ .flush_script = perl_flush_script,
.stop_script = perl_stop_script,
.process_event = perl_process_event,
.generate_script = perl_generate_script,
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index cbce2545da45..56ba07cce549 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -73,6 +73,35 @@ static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObj
Py_DECREF(val);
}
+static PyObject *get_handler(const char *handler_name)
+{
+ PyObject *handler;
+
+ handler = PyDict_GetItemString(main_dict, handler_name);
+ if (handler && !PyCallable_Check(handler))
+ return NULL;
+ return handler;
+}
+
+static void call_object(PyObject *handler, PyObject *args, const char *die_msg)
+{
+ PyObject *retval;
+
+ retval = PyObject_CallObject(handler, args);
+ if (retval == NULL)
+ handler_call_die(die_msg);
+ Py_DECREF(retval);
+}
+
+static void try_call_object(const char *handler_name, PyObject *args)
+{
+ PyObject *handler;
+
+ handler = get_handler(handler_name);
+ if (handler)
+ call_object(handler, args, handler_name);
+}
+
static void define_value(enum print_arg_type field_type,
const char *ev_name,
const char *field_name,
@@ -80,7 +109,7 @@ static void define_value(enum print_arg_type field_type,
const char *field_str)
{
const char *handler_name = "define_flag_value";
- PyObject *handler, *t, *retval;
+ PyObject *t;
unsigned long long value;
unsigned n = 0;
@@ -98,13 +127,7 @@ static void define_value(enum print_arg_type field_type,
PyTuple_SetItem(t, n++, PyInt_FromLong(value));
PyTuple_SetItem(t, n++, PyString_FromString(field_str));
- handler = PyDict_GetItemString(main_dict, handler_name);
- if (handler && PyCallable_Check(handler)) {
- retval = PyObject_CallObject(handler, t);
- if (retval == NULL)
- handler_call_die(handler_name);
- Py_DECREF(retval);
- }
+ try_call_object(handler_name, t);
Py_DECREF(t);
}
@@ -127,7 +150,7 @@ static void define_field(enum print_arg_type field_type,
const char *delim)
{
const char *handler_name = "define_flag_field";
- PyObject *handler, *t, *retval;
+ PyObject *t;
unsigned n = 0;
if (field_type == PRINT_SYMBOL)
@@ -145,13 +168,7 @@ static void define_field(enum print_arg_type field_type,
if (field_type == PRINT_FLAGS)
PyTuple_SetItem(t, n++, PyString_FromString(delim));
- handler = PyDict_GetItemString(main_dict, handler_name);
- if (handler && PyCallable_Check(handler)) {
- retval = PyObject_CallObject(handler, t);
- if (retval == NULL)
- handler_call_die(handler_name);
- Py_DECREF(retval);
- }
+ try_call_object(handler_name, t);
Py_DECREF(t);
}
@@ -362,7 +379,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
struct thread *thread,
struct addr_location *al)
{
- PyObject *handler, *retval, *context, *t, *obj, *callchain;
+ PyObject *handler, *context, *t, *obj, *callchain;
PyObject *dict = NULL;
static char handler_name[256];
struct format_field *field;
@@ -387,9 +404,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
sprintf(handler_name, "%s__%s", event->system, event->name);
- handler = PyDict_GetItemString(main_dict, handler_name);
- if (handler && !PyCallable_Check(handler))
- handler = NULL;
+ handler = get_handler(handler_name);
if (!handler) {
dict = PyDict_New();
if (!dict)
@@ -450,19 +465,9 @@ static void python_process_tracepoint(struct perf_sample *sample,
Py_FatalError("error resizing Python tuple");
if (handler) {
- retval = PyObject_CallObject(handler, t);
- if (retval == NULL)
- handler_call_die(handler_name);
- Py_DECREF(retval);
+ call_object(handler, t, handler_name);
} else {
- handler = PyDict_GetItemString(main_dict, "trace_unhandled");
- if (handler && PyCallable_Check(handler)) {
-
- retval = PyObject_CallObject(handler, t);
- if (retval == NULL)
- handler_call_die("trace_unhandled");
- Py_DECREF(retval);
- }
+ try_call_object("trace_unhandled", t);
Py_DECREF(dict);
}
@@ -474,7 +479,7 @@ static void python_process_general_event(struct perf_sample *sample,
struct thread *thread,
struct addr_location *al)
{
- PyObject *handler, *retval, *t, *dict, *callchain, *dict_sample;
+ PyObject *handler, *t, *dict, *callchain, *dict_sample;
static char handler_name[64];
unsigned n = 0;
@@ -496,8 +501,8 @@ static void python_process_general_event(struct perf_sample *sample,
snprintf(handler_name, sizeof(handler_name), "%s", "process_event");
- handler = PyDict_GetItemString(main_dict, handler_name);
- if (!handler || !PyCallable_Check(handler))
+ handler = get_handler(handler_name);
+ if (!handler)
goto exit;
pydict_set_item_string_decref(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
@@ -539,10 +544,7 @@ static void python_process_general_event(struct perf_sample *sample,
if (_PyTuple_Resize(&t, n) == -1)
Py_FatalError("error resizing Python tuple");
- retval = PyObject_CallObject(handler, t);
- if (retval == NULL)
- handler_call_die(handler_name);
- Py_DECREF(retval);
+ call_object(handler, t, handler_name);
exit:
Py_DECREF(dict);
Py_DECREF(t);
@@ -566,36 +568,24 @@ static void python_process_event(union perf_event *event __maybe_unused,
static int run_start_sub(void)
{
- PyObject *handler, *retval;
- int err = 0;
-
main_module = PyImport_AddModule("__main__");
if (main_module == NULL)
return -1;
Py_INCREF(main_module);
main_dict = PyModule_GetDict(main_module);
- if (main_dict == NULL) {
- err = -1;
+ if (main_dict == NULL)
goto error;
- }
Py_INCREF(main_dict);
- handler = PyDict_GetItemString(main_dict, "trace_begin");
- if (handler == NULL || !PyCallable_Check(handler))
- goto out;
+ try_call_object("trace_begin", NULL);
- retval = PyObject_CallObject(handler, NULL);
- if (retval == NULL)
- handler_call_die("trace_begin");
+ return 0;
- Py_DECREF(retval);
- return err;
error:
Py_XDECREF(main_dict);
Py_XDECREF(main_module);
-out:
- return err;
+ return -1;
}
/*
@@ -649,28 +639,23 @@ error:
return err;
}
+static int python_flush_script(void)
+{
+ return 0;
+}
+
/*
* Stop trace script
*/
static int python_stop_script(void)
{
- PyObject *handler, *retval;
- int err = 0;
+ try_call_object("trace_end", NULL);
- handler = PyDict_GetItemString(main_dict, "trace_end");
- if (handler == NULL || !PyCallable_Check(handler))
- goto out;
-
- retval = PyObject_CallObject(handler, NULL);
- if (retval == NULL)
- handler_call_die("trace_end");
- Py_DECREF(retval);
-out:
Py_XDECREF(main_dict);
Py_XDECREF(main_module);
Py_Finalize();
- return err;
+ return 0;
}
static int python_generate_script(struct pevent *pevent, const char *outfile)
@@ -843,6 +828,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
struct scripting_ops python_scripting_ops = {
.name = "Python",
.start_script = python_start_script,
+ .flush_script = python_flush_script,
.stop_script = python_stop_script,
.process_event = python_process_event,
.generate_script = python_generate_script,
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 88dfef70c13d..883406f4b381 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -14,6 +14,7 @@
#include "util.h"
#include "cpumap.h"
#include "perf_regs.h"
+#include "asm/bug.h"
static int perf_session__open(struct perf_session *session)
{
@@ -66,6 +67,25 @@ static void perf_session__destroy_kernel_maps(struct perf_session *session)
machines__destroy_kernel_maps(&session->machines);
}
+static bool perf_session__has_comm_exec(struct perf_session *session)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each(session->evlist, evsel) {
+ if (evsel->attr.comm_exec)
+ return true;
+ }
+
+ return false;
+}
+
+static void perf_session__set_comm_exec(struct perf_session *session)
+{
+ bool comm_exec = perf_session__has_comm_exec(session);
+
+ machines__set_comm_exec(&session->machines, comm_exec);
+}
+
struct perf_session *perf_session__new(struct perf_data_file *file,
bool repipe, struct perf_tool *tool)
{
@@ -75,9 +95,7 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
goto out;
session->repipe = repipe;
- INIT_LIST_HEAD(&session->ordered_samples.samples);
- INIT_LIST_HEAD(&session->ordered_samples.sample_cache);
- INIT_LIST_HEAD(&session->ordered_samples.to_free);
+ ordered_events__init(&session->ordered_events);
machines__init(&session->machines);
if (file) {
@@ -91,6 +109,7 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
goto out_close;
perf_session__set_id_hdr_size(session);
+ perf_session__set_comm_exec(session);
}
}
@@ -100,13 +119,13 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
* kernel MMAP event, in perf_event__process_mmap().
*/
if (perf_session__create_kernel_maps(session) < 0)
- goto out_delete;
+ pr_warning("Cannot read kernel map\n");
}
if (tool && tool->ordering_requires_timestamps &&
- tool->ordered_samples && !perf_evlist__sample_id_all(session->evlist)) {
+ tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
- tool->ordered_samples = false;
+ tool->ordered_events = false;
}
return session;
@@ -238,7 +257,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
if (tool->build_id == NULL)
tool->build_id = process_finished_round_stub;
if (tool->finished_round == NULL) {
- if (tool->ordered_samples)
+ if (tool->ordered_events)
tool->finished_round = process_finished_round;
else
tool->finished_round = process_finished_round_stub;
@@ -444,87 +463,6 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_HEADER_MAX] = NULL,
};
-struct sample_queue {
- u64 timestamp;
- u64 file_offset;
- union perf_event *event;
- struct list_head list;
-};
-
-static void perf_session_free_sample_buffers(struct perf_session *session)
-{
- struct ordered_samples *os = &session->ordered_samples;
-
- while (!list_empty(&os->to_free)) {
- struct sample_queue *sq;
-
- sq = list_entry(os->to_free.next, struct sample_queue, list);
- list_del(&sq->list);
- free(sq);
- }
-}
-
-static int perf_session_deliver_event(struct perf_session *session,
- union perf_event *event,
- struct perf_sample *sample,
- struct perf_tool *tool,
- u64 file_offset);
-
-static int flush_sample_queue(struct perf_session *s,
- struct perf_tool *tool)
-{
- struct ordered_samples *os = &s->ordered_samples;
- struct list_head *head = &os->samples;
- struct sample_queue *tmp, *iter;
- struct perf_sample sample;
- u64 limit = os->next_flush;
- u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
- bool show_progress = limit == ULLONG_MAX;
- struct ui_progress prog;
- int ret;
-
- if (!tool->ordered_samples || !limit)
- return 0;
-
- if (show_progress)
- ui_progress__init(&prog, os->nr_samples, "Processing time ordered events...");
-
- list_for_each_entry_safe(iter, tmp, head, list) {
- if (session_done())
- return 0;
-
- if (iter->timestamp > limit)
- break;
-
- ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
- if (ret)
- pr_err("Can't parse sample, err = %d\n", ret);
- else {
- ret = perf_session_deliver_event(s, iter->event, &sample, tool,
- iter->file_offset);
- if (ret)
- return ret;
- }
-
- os->last_flush = iter->timestamp;
- list_del(&iter->list);
- list_add(&iter->list, &os->sample_cache);
- os->nr_samples--;
-
- if (show_progress)
- ui_progress__update(&prog, 1);
- }
-
- if (list_empty(head)) {
- os->last_sample = NULL;
- } else if (last_ts <= limit) {
- os->last_sample =
- list_entry(head->prev, struct sample_queue, list);
- }
-
- return 0;
-}
-
/*
* When perf record finishes a pass on every buffers, it records this pseudo
* event.
@@ -568,99 +506,43 @@ static int process_finished_round(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_session *session)
{
- int ret = flush_sample_queue(session, tool);
- if (!ret)
- session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
-
- return ret;
-}
-
-/* The queue is ordered by time */
-static void __queue_event(struct sample_queue *new, struct perf_session *s)
-{
- struct ordered_samples *os = &s->ordered_samples;
- struct sample_queue *sample = os->last_sample;
- u64 timestamp = new->timestamp;
- struct list_head *p;
-
- ++os->nr_samples;
- os->last_sample = new;
-
- if (!sample) {
- list_add(&new->list, &os->samples);
- os->max_timestamp = timestamp;
- return;
- }
-
- /*
- * last_sample might point to some random place in the list as it's
- * the last queued event. We expect that the new event is close to
- * this.
- */
- if (sample->timestamp <= timestamp) {
- while (sample->timestamp <= timestamp) {
- p = sample->list.next;
- if (p == &os->samples) {
- list_add_tail(&new->list, &os->samples);
- os->max_timestamp = timestamp;
- return;
- }
- sample = list_entry(p, struct sample_queue, list);
- }
- list_add_tail(&new->list, &sample->list);
- } else {
- while (sample->timestamp > timestamp) {
- p = sample->list.prev;
- if (p == &os->samples) {
- list_add(&new->list, &os->samples);
- return;
- }
- sample = list_entry(p, struct sample_queue, list);
- }
- list_add(&new->list, &sample->list);
- }
+ return ordered_events__flush(session, tool, OE_FLUSH__ROUND);
}
-#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
-
int perf_session_queue_event(struct perf_session *s, union perf_event *event,
- struct perf_sample *sample, u64 file_offset)
+ struct perf_tool *tool, struct perf_sample *sample,
+ u64 file_offset)
{
- struct ordered_samples *os = &s->ordered_samples;
- struct list_head *sc = &os->sample_cache;
+ struct ordered_events *oe = &s->ordered_events;
u64 timestamp = sample->time;
- struct sample_queue *new;
+ struct ordered_event *new;
if (!timestamp || timestamp == ~0ULL)
return -ETIME;
- if (timestamp < s->ordered_samples.last_flush) {
- printf("Warning: Timestamp below last timeslice flush\n");
- return -EINVAL;
+ if (timestamp < oe->last_flush) {
+ WARN_ONCE(1, "Timestamp below last timeslice flush\n");
+
+ pr_oe_time(timestamp, "out of order event");
+ pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
+ oe->last_flush_type);
+
+ /* We could get out of order messages after forced flush. */
+ if (oe->last_flush_type != OE_FLUSH__HALF)
+ return -EINVAL;
}
- if (!list_empty(sc)) {
- new = list_entry(sc->next, struct sample_queue, list);
- list_del(&new->list);
- } else if (os->sample_buffer) {
- new = os->sample_buffer + os->sample_buffer_idx;
- if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
- os->sample_buffer = NULL;
- } else {
- os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
- if (!os->sample_buffer)
- return -ENOMEM;
- list_add(&os->sample_buffer->list, &os->to_free);
- os->sample_buffer_idx = 2;
- new = os->sample_buffer + 1;
+ new = ordered_events__new(oe, timestamp);
+ if (!new) {
+ ordered_events__flush(s, tool, OE_FLUSH__HALF);
+ new = ordered_events__new(oe, timestamp);
}
- new->timestamp = timestamp;
+ if (!new)
+ return -ENOMEM;
+
new->file_offset = file_offset;
new->event = event;
-
- __queue_event(new, s);
-
return 0;
}
@@ -920,11 +802,10 @@ perf_session__deliver_sample(struct perf_session *session,
&sample->read.one, machine);
}
-static int perf_session_deliver_event(struct perf_session *session,
- union perf_event *event,
- struct perf_sample *sample,
- struct perf_tool *tool,
- u64 file_offset)
+int perf_session__deliver_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_tool *tool, u64 file_offset)
{
struct perf_evsel *evsel;
struct machine *machine;
@@ -1005,8 +886,10 @@ static s64 perf_session__process_user_event(struct perf_session *session,
switch (event->header.type) {
case PERF_RECORD_HEADER_ATTR:
err = tool->attr(tool, event, &session->evlist);
- if (err == 0)
+ if (err == 0) {
perf_session__set_id_hdr_size(session);
+ perf_session__set_comm_exec(session);
+ }
return err;
case PERF_RECORD_HEADER_EVENT_TYPE:
/*
@@ -1036,6 +919,61 @@ static void event_swap(union perf_event *event, bool sample_id_all)
swap(event, sample_id_all);
}
+int perf_session__peek_event(struct perf_session *session, off_t file_offset,
+ void *buf, size_t buf_sz,
+ union perf_event **event_ptr,
+ struct perf_sample *sample)
+{
+ union perf_event *event;
+ size_t hdr_sz, rest;
+ int fd;
+
+ if (session->one_mmap && !session->header.needs_swap) {
+ event = file_offset - session->one_mmap_offset +
+ session->one_mmap_addr;
+ goto out_parse_sample;
+ }
+
+ if (perf_data_file__is_pipe(session->file))
+ return -1;
+
+ fd = perf_data_file__fd(session->file);
+ hdr_sz = sizeof(struct perf_event_header);
+
+ if (buf_sz < hdr_sz)
+ return -1;
+
+ if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
+ readn(fd, &buf, hdr_sz) != (ssize_t)hdr_sz)
+ return -1;
+
+ event = (union perf_event *)buf;
+
+ if (session->header.needs_swap)
+ perf_event_header__bswap(&event->header);
+
+ if (event->header.size < hdr_sz)
+ return -1;
+
+ rest = event->header.size - hdr_sz;
+
+ if (readn(fd, &buf, rest) != (ssize_t)rest)
+ return -1;
+
+ if (session->header.needs_swap)
+ event_swap(event, perf_evlist__sample_id_all(session->evlist));
+
+out_parse_sample:
+
+ if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
+ perf_evlist__parse_sample(session->evlist, event, sample))
+ return -1;
+
+ *event_ptr = event;
+
+ return 0;
+}
+
static s64 perf_session__process_event(struct perf_session *session,
union perf_event *event,
struct perf_tool *tool,
@@ -1062,15 +1000,15 @@ static s64 perf_session__process_event(struct perf_session *session,
if (ret)
return ret;
- if (tool->ordered_samples) {
- ret = perf_session_queue_event(session, event, &sample,
+ if (tool->ordered_events) {
+ ret = perf_session_queue_event(session, event, tool, &sample,
file_offset);
if (ret != -ETIME)
return ret;
}
- return perf_session_deliver_event(session, event, &sample, tool,
- file_offset);
+ return perf_session__deliver_event(session, event, &sample, tool,
+ file_offset);
}
void perf_event_header__bswap(struct perf_event_header *hdr)
@@ -1222,12 +1160,11 @@ more:
goto more;
done:
/* do the final flush for ordered samples */
- session->ordered_samples.next_flush = ULLONG_MAX;
- err = flush_sample_queue(session, tool);
+ err = ordered_events__flush(session, tool, OE_FLUSH__FINAL);
out_err:
free(buf);
perf_session__warn_about_errors(session, tool);
- perf_session_free_sample_buffers(session);
+ ordered_events__free(&session->ordered_events);
return err;
}
@@ -1368,12 +1305,11 @@ more:
out:
/* do the final flush for ordered samples */
- session->ordered_samples.next_flush = ULLONG_MAX;
- err = flush_sample_queue(session, tool);
+ err = ordered_events__flush(session, tool, OE_FLUSH__FINAL);
out_err:
ui_progress__finish();
perf_session__warn_about_errors(session, tool);
- perf_session_free_sample_buffers(session);
+ ordered_events__free(&session->ordered_events);
session->one_mmap = false;
return err;
}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 0321013bd9fd..ffb440462008 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -9,26 +9,13 @@
#include "symbol.h"
#include "thread.h"
#include "data.h"
+#include "ordered-events.h"
#include <linux/rbtree.h>
#include <linux/perf_event.h>
-struct sample_queue;
struct ip_callchain;
struct thread;
-struct ordered_samples {
- u64 last_flush;
- u64 next_flush;
- u64 max_timestamp;
- struct list_head samples;
- struct list_head sample_cache;
- struct list_head to_free;
- struct sample_queue *sample_buffer;
- struct sample_queue *last_sample;
- int sample_buffer_idx;
- unsigned int nr_samples;
-};
-
struct perf_session {
struct perf_header header;
struct machines machines;
@@ -39,7 +26,7 @@ struct perf_session {
bool one_mmap;
void *one_mmap_addr;
u64 one_mmap_offset;
- struct ordered_samples ordered_samples;
+ struct ordered_events ordered_events;
struct perf_data_file *file;
};
@@ -58,6 +45,11 @@ void perf_session__delete(struct perf_session *session);
void perf_event_header__bswap(struct perf_event_header *hdr);
+int perf_session__peek_event(struct perf_session *session, off_t file_offset,
+ void *buf, size_t buf_sz,
+ union perf_event **event_ptr,
+ struct perf_sample *sample);
+
int __perf_session__process_events(struct perf_session *session,
u64 data_offset, u64 data_size, u64 size,
struct perf_tool *tool);
@@ -65,10 +57,16 @@ int perf_session__process_events(struct perf_session *session,
struct perf_tool *tool);
int perf_session_queue_event(struct perf_session *s, union perf_event *event,
- struct perf_sample *sample, u64 file_offset);
+ struct perf_tool *tool, struct perf_sample *sample,
+ u64 file_offset);
void perf_tool__fill_defaults(struct perf_tool *tool);
+int perf_session__deliver_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_tool *tool, u64 file_offset);
+
int perf_session__resolve_callchain(struct perf_session *session,
struct perf_evsel *evsel,
struct thread *thread,
@@ -128,5 +126,5 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
extern volatile int session_done;
-#define session_done() (*(volatile int *)(&session_done))
+#define session_done() ACCESS_ONCE(session_done)
#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 14e5a039bc45..289df9d1e65a 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -70,12 +70,14 @@ static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
const char *comm = thread__comm_str(he->thread);
- return repsep_snprintf(bf, size, "%*s:%5d", width - 6,
- comm ?: "", he->thread->tid);
+
+ width = max(7U, width) - 6;
+ return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
+ width, width, comm ?: "");
}
struct sort_entry sort_thread = {
- .se_header = "Command: Pid",
+ .se_header = " Pid:Command",
.se_cmp = sort__thread_cmp,
.se_snprintf = hist_entry__thread_snprintf,
.se_width_idx = HISTC_THREAD,
@@ -106,7 +108,7 @@ sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%*s", width, comm__str(he->comm));
+ return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
}
struct sort_entry sort_comm = {
@@ -152,10 +154,10 @@ static int _hist_entry__dso_snprintf(struct map *map, char *bf,
if (map && map->dso) {
const char *dso_name = !verbose ? map->dso->short_name :
map->dso->long_name;
- return repsep_snprintf(bf, size, "%-*s", width, dso_name);
+ return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
}
- return repsep_snprintf(bf, size, "%-*s", width, "[unknown]");
+ return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
}
static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
@@ -257,7 +259,10 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
width - ret, "");
}
- return ret;
+ if (ret > width)
+ bf[width] = '\0';
+
+ return width;
}
static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
@@ -302,10 +307,9 @@ sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
}
static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
- size_t size,
- unsigned int width __maybe_unused)
+ size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%s", he->srcline);
+ return repsep_snprintf(bf, size, "%*.*-s", width, width, he->srcline);
}
struct sort_entry sort_srcline = {
@@ -332,7 +336,7 @@ sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%-*s", width,
+ return repsep_snprintf(bf, size, "%-*.*s", width, width,
he->parent ? he->parent->name : "[other]");
}
@@ -354,7 +358,7 @@ sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%*d", width, he->cpu);
+ return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
}
struct sort_entry sort_cpu = {
@@ -484,7 +488,7 @@ static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
else if (he->branch_info->flags.mispred)
out = "Y";
- return repsep_snprintf(bf, size, "%-*s", width, out);
+ return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
}
/* --sort daddr_sym */
@@ -1194,7 +1198,7 @@ bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
return hse_a->se == hse_b->se;
}
-void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
+void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
{
struct hpp_sort_entry *hse;
@@ -1202,20 +1206,21 @@ void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
return;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
- hists__new_col_len(hists, hse->se->se_width_idx,
- strlen(hse->se->se_header));
+ hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
}
static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct perf_evsel *evsel)
{
struct hpp_sort_entry *hse;
- size_t len;
+ size_t len = fmt->user_len;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
- len = hists__col_len(&evsel->hists, hse->se->se_width_idx);
- return scnprintf(hpp->buf, hpp->size, "%-*s", len, hse->se->se_header);
+ if (!len)
+ len = hists__col_len(&evsel->hists, hse->se->se_width_idx);
+
+ return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
}
static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
@@ -1223,20 +1228,26 @@ static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
struct perf_evsel *evsel)
{
struct hpp_sort_entry *hse;
+ size_t len = fmt->user_len;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
- return hists__col_len(&evsel->hists, hse->se->se_width_idx);
+ if (!len)
+ len = hists__col_len(&evsel->hists, hse->se->se_width_idx);
+
+ return len;
}
static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
struct hpp_sort_entry *hse;
- size_t len;
+ size_t len = fmt->user_len;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
- len = hists__col_len(he->hists, hse->se->se_width_idx);
+
+ if (!len)
+ len = hists__col_len(he->hists, hse->se->se_width_idx);
return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
}
@@ -1253,6 +1264,7 @@ __sort_dimension__alloc_hpp(struct sort_dimension *sd)
}
hse->se = sd->entry;
+ hse->hpp.name = sd->entry->se_header;
hse->hpp.header = __sort__hpp_header;
hse->hpp.width = __sort__hpp_width;
hse->hpp.entry = __sort__hpp_entry;
@@ -1265,6 +1277,8 @@ __sort_dimension__alloc_hpp(struct sort_dimension *sd)
INIT_LIST_HEAD(&hse->hpp.list);
INIT_LIST_HEAD(&hse->hpp.sort_list);
hse->hpp.elide = false;
+ hse->hpp.len = 0;
+ hse->hpp.user_len = 0;
return hse;
}
@@ -1432,14 +1446,49 @@ static const char *get_default_sort_order(void)
return default_sort_orders[sort__mode];
}
+static int setup_sort_order(void)
+{
+ char *new_sort_order;
+
+ /*
+ * Append '+'-prefixed sort order to the default sort
+ * order string.
+ */
+ if (!sort_order || is_strict_order(sort_order))
+ return 0;
+
+ if (sort_order[1] == '\0') {
+ error("Invalid --sort key: `+'");
+ return -EINVAL;
+ }
+
+ /*
+ * We allocate new sort_order string, but we never free it,
+ * because it's checked over the rest of the code.
+ */
+ if (asprintf(&new_sort_order, "%s,%s",
+ get_default_sort_order(), sort_order + 1) < 0) {
+ error("Not enough memory to set up --sort");
+ return -ENOMEM;
+ }
+
+ sort_order = new_sort_order;
+ return 0;
+}
+
static int __setup_sorting(void)
{
char *tmp, *tok, *str;
- const char *sort_keys = sort_order;
+ const char *sort_keys;
int ret = 0;
+ ret = setup_sort_order();
+ if (ret)
+ return ret;
+
+ sort_keys = sort_order;
if (sort_keys == NULL) {
- if (field_order) {
+ if (is_strict_order(field_order)) {
/*
* If user specified field order but no sort order,
* we'll honor it and not add default sort orders.
@@ -1625,23 +1674,36 @@ static void reset_dimensions(void)
memory_sort_dimensions[i].taken = 0;
}
+bool is_strict_order(const char *order)
+{
+ return order && (*order != '+');
+}
+
static int __setup_output_field(void)
{
- char *tmp, *tok, *str;
- int ret = 0;
+ char *tmp, *tok, *str, *strp;
+ int ret = -EINVAL;
if (field_order == NULL)
return 0;
reset_dimensions();
- str = strdup(field_order);
+ strp = str = strdup(field_order);
if (str == NULL) {
error("Not enough memory to setup output fields");
return -ENOMEM;
}
- for (tok = strtok_r(str, ", ", &tmp);
+ if (!is_strict_order(field_order))
+ strp++;
+
+ if (!strlen(strp)) {
+ error("Invalid --fields key: `+'");
+ goto out;
+ }
+
+ for (tok = strtok_r(strp, ", ", &tmp);
tok; tok = strtok_r(NULL, ", ", &tmp)) {
ret = output_field_add(tok);
if (ret == -EINVAL) {
@@ -1653,6 +1715,7 @@ static int __setup_output_field(void)
}
}
+out:
free(str);
return ret;
}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 041f0c9cea2b..c03e4ff8beff 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -218,4 +218,5 @@ void perf_hpp__set_elide(int idx, bool elide);
int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
+bool is_strict_order(const char *order);
#endif /* __PERF_SORT_H */
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 2553e5b55b89..d87767f76903 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -9,78 +9,48 @@
*/
s64 perf_atoll(const char *str)
{
- unsigned int i;
- s64 length = -1, unit = 1;
+ s64 length;
+ char *p;
+ char c;
if (!isdigit(str[0]))
goto out_err;
- for (i = 1; i < strlen(str); i++) {
- switch (str[i]) {
- case 'B':
- case 'b':
- break;
- case 'K':
- if (str[i + 1] != 'B')
- goto out_err;
- else
- goto kilo;
- case 'k':
- if (str[i + 1] != 'b')
- goto out_err;
-kilo:
- unit = K;
- break;
- case 'M':
- if (str[i + 1] != 'B')
- goto out_err;
- else
- goto mega;
- case 'm':
- if (str[i + 1] != 'b')
+ length = strtoll(str, &p, 10);
+ switch (c = *p++) {
+ case 'b': case 'B':
+ if (*p)
goto out_err;
-mega:
- unit = K * K;
+ case '\0':
+ return length;
+ default:
+ goto out_err;
+ /* two-letter suffices */
+ case 'k': case 'K':
+ length <<= 10;
break;
- case 'G':
- if (str[i + 1] != 'B')
- goto out_err;
- else
- goto giga;
- case 'g':
- if (str[i + 1] != 'b')
- goto out_err;
-giga:
- unit = K * K * K;
+ case 'm': case 'M':
+ length <<= 20;
break;
- case 'T':
- if (str[i + 1] != 'B')
- goto out_err;
- else
- goto tera;
- case 't':
- if (str[i + 1] != 'b')
- goto out_err;
-tera:
- unit = K * K * K * K;
+ case 'g': case 'G':
+ length <<= 30;
break;
- case '\0': /* only specified figures */
- unit = 1;
+ case 't': case 'T':
+ length <<= 40;
break;
- default:
- if (!isdigit(str[i]))
- goto out_err;
- break;
- }
}
-
- length = atoll(str) * unit;
- goto out;
+ /* we want the cases to match */
+ if (islower(c)) {
+ if (strcmp(p, "b") != 0)
+ goto out_err;
+ } else {
+ if (strcmp(p, "B") != 0)
+ goto out_err;
+ }
+ return length;
out_err:
- length = -1;
-out:
- return length;
+ return -1;
}
/*
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index d75349979e65..1e23a5bfb044 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -6,6 +6,7 @@
#include <inttypes.h>
#include "symbol.h"
+#include "machine.h"
#include "vdso.h"
#include <symbol/kallsyms.h>
#include "debug.h"
@@ -680,6 +681,11 @@ static u64 ref_reloc(struct kmap *kmap)
return 0;
}
+static bool want_demangle(bool is_kernel_sym)
+{
+ return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
+}
+
int dso__load_sym(struct dso *dso, struct map *map,
struct symsrc *syms_ss, struct symsrc *runtime_ss,
symbol_filter_t filter, int kmodule)
@@ -712,6 +718,14 @@ int dso__load_sym(struct dso *dso, struct map *map,
symbols__delete(&dso->symbols[map->type]);
if (!syms_ss->symtab) {
+ /*
+ * If the vmlinux is stripped, fail so we will fall back
+ * to using kallsyms. The vmlinux runtime symbols aren't
+ * of much use.
+ */
+ if (dso->kernel)
+ goto out_elf_end;
+
syms_ss->symtab = syms_ss->dynsym;
syms_ss->symshdr = syms_ss->dynshdr;
}
@@ -736,7 +750,7 @@ int dso__load_sym(struct dso *dso, struct map *map,
if (symstrs == NULL)
goto out_elf_end;
- sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
+ sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
if (sec_strndx == NULL)
goto out_elf_end;
@@ -916,7 +930,11 @@ int dso__load_sym(struct dso *dso, struct map *map,
}
curr_dso->symtab_type = dso->symtab_type;
map_groups__insert(kmap->kmaps, curr_map);
- dsos__add(&dso->node, curr_dso);
+ /*
+ * The new DSO should go to the kernel DSOS
+ */
+ dsos__add(&map->groups->machine->kernel_dsos,
+ curr_dso);
dso__set_loaded(curr_dso, map->type);
} else
curr_dso = curr_map->dso;
@@ -938,9 +956,12 @@ new_symbol:
* DWARF DW_compile_unit has this, but we don't always have access
* to it...
*/
- if (symbol_conf.demangle) {
- demangled = bfd_demangle(NULL, elf_name,
- DMGL_PARAMS | DMGL_ANSI);
+ if (want_demangle(dso->kernel || kmodule)) {
+ int demangle_flags = DMGL_NO_OPTS;
+ if (verbose)
+ demangle_flags = DMGL_PARAMS | DMGL_ANSI;
+
+ demangled = bfd_demangle(NULL, elf_name, demangle_flags);
if (demangled != NULL)
elf_name = demangled;
}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index eb06746b06b2..be84f7a9838b 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -15,6 +15,7 @@
#include "machine.h"
#include "symbol.h"
#include "strlist.h"
+#include "header.h"
#include <elf.h>
#include <limits.h>
@@ -33,6 +34,7 @@ struct symbol_conf symbol_conf = {
.try_vmlinux_path = true,
.annotate_src = true,
.demangle = true,
+ .demangle_kernel = false,
.cumulate_callchain = true,
.show_hist_headers = true,
.symfs = "",
@@ -523,10 +525,15 @@ struct process_kallsyms_args {
struct dso *dso;
};
+/*
+ * These are symbols in the kernel image, so make sure that
+ * sym is from a kernel DSO.
+ */
bool symbol__is_idle(struct symbol *sym)
{
const char * const idle_symbols[] = {
"cpu_idle",
+ "cpu_startup_entry",
"intel_idle",
"default_idle",
"native_safe_halt",
@@ -1468,8 +1475,7 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
if (vmlinux[0] == '/')
snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
else
- snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s",
- symbol_conf.symfs, vmlinux);
+ symbol__join_symfs(symfs_vmlinux, vmlinux);
if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
@@ -1745,12 +1751,13 @@ static void vmlinux_path__exit(void)
zfree(&vmlinux_path);
}
-static int vmlinux_path__init(void)
+static int vmlinux_path__init(struct perf_session_env *env)
{
struct utsname uts;
char bf[PATH_MAX];
+ char *kernel_version;
- vmlinux_path = malloc(sizeof(char *) * 5);
+ vmlinux_path = malloc(sizeof(char *) * 6);
if (vmlinux_path == NULL)
return -1;
@@ -1763,25 +1770,37 @@ static int vmlinux_path__init(void)
goto out_fail;
++vmlinux_path__nr_entries;
- /* only try running kernel version if no symfs was given */
+ /* only try kernel version if no symfs was given */
if (symbol_conf.symfs[0] != 0)
return 0;
- if (uname(&uts) < 0)
- return -1;
+ if (env) {
+ kernel_version = env->os_release;
+ } else {
+ if (uname(&uts) < 0)
+ goto out_fail;
- snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release);
+ kernel_version = uts.release;
+ }
+
+ snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", kernel_version);
vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
goto out_fail;
++vmlinux_path__nr_entries;
- snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", uts.release);
+ snprintf(bf, sizeof(bf), "/usr/lib/debug/boot/vmlinux-%s",
+ kernel_version);
+ vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
+ if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
+ goto out_fail;
+ ++vmlinux_path__nr_entries;
+ snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", kernel_version);
vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
goto out_fail;
++vmlinux_path__nr_entries;
snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux",
- uts.release);
+ kernel_version);
vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
goto out_fail;
@@ -1827,7 +1846,7 @@ static bool symbol__read_kptr_restrict(void)
return value;
}
-int symbol__init(void)
+int symbol__init(struct perf_session_env *env)
{
const char *symfs;
@@ -1842,7 +1861,7 @@ int symbol__init(void)
symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
sizeof(struct symbol));
- if (symbol_conf.try_vmlinux_path && vmlinux_path__init() < 0)
+ if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
return -1;
if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index e7295e93cff9..bec4b7bd09de 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -13,6 +13,7 @@
#include <libgen.h>
#include "build-id.h"
#include "event.h"
+#include "util.h"
#ifdef HAVE_LIBELF_SUPPORT
#include <libelf.h>
@@ -59,6 +60,7 @@ extern Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
#endif
#ifndef DMGL_PARAMS
+#define DMGL_NO_OPTS 0 /* For readability... */
#define DMGL_PARAMS (1 << 0) /* Include function args */
#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
#endif
@@ -118,6 +120,7 @@ struct symbol_conf {
annotate_src,
event_group,
demangle,
+ demangle_kernel,
filter_relative,
show_hist_headers;
const char *vmlinux_name,
@@ -143,6 +146,14 @@ struct symbol_conf {
};
extern struct symbol_conf symbol_conf;
+
+static inline int __symbol__join_symfs(char *bf, size_t size, const char *path)
+{
+ return path__join(bf, size, symbol_conf.symfs, path);
+}
+
+#define symbol__join_symfs(bf, path) __symbol__join_symfs(bf, sizeof(bf), path)
+
extern int vmlinux_path__nr_entries;
extern char **vmlinux_path;
@@ -253,7 +264,8 @@ int modules__parse(const char *filename, void *arg,
int filename__read_debuglink(const char *filename, char *debuglink,
size_t size);
-int symbol__init(void);
+struct perf_session_env;
+int symbol__init(struct perf_session_env *env);
void symbol__exit(void);
void symbol__elf_init(void);
struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 12c7a253a63c..a9df7f2c6dc9 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -42,7 +42,7 @@ struct thread *thread__new(pid_t pid, pid_t tid)
goto err_thread;
snprintf(comm_str, 32, ":%d", tid);
- comm = comm__new(comm_str, 0);
+ comm = comm__new(comm_str, 0, false);
free(comm_str);
if (!comm)
goto err_thread;
@@ -81,19 +81,33 @@ struct comm *thread__comm(const struct thread *thread)
return list_first_entry(&thread->comm_list, struct comm, list);
}
+struct comm *thread__exec_comm(const struct thread *thread)
+{
+ struct comm *comm, *last = NULL;
+
+ list_for_each_entry(comm, &thread->comm_list, list) {
+ if (comm->exec)
+ return comm;
+ last = comm;
+ }
+
+ return last;
+}
+
/* CHECKME: time should always be 0 if event aren't ordered */
-int thread__set_comm(struct thread *thread, const char *str, u64 timestamp)
+int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
+ bool exec)
{
struct comm *new, *curr = thread__comm(thread);
int err;
/* Override latest entry if it had no specific time coverage */
- if (!curr->start) {
- err = comm__override(curr, str, timestamp);
+ if (!curr->start && !curr->exec) {
+ err = comm__override(curr, str, timestamp, exec);
if (err)
return err;
} else {
- new = comm__new(str, timestamp);
+ new = comm__new(str, timestamp, exec);
if (!new)
return -ENOMEM;
list_add(&new->list, &thread->comm_list);
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 716b7723cce2..8c75fa774706 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -38,9 +38,17 @@ static inline void thread__exited(struct thread *thread)
thread->dead = true;
}
-int thread__set_comm(struct thread *thread, const char *comm, u64 timestamp);
+int __thread__set_comm(struct thread *thread, const char *comm, u64 timestamp,
+ bool exec);
+static inline int thread__set_comm(struct thread *thread, const char *comm,
+ u64 timestamp)
+{
+ return __thread__set_comm(thread, comm, timestamp, false);
+}
+
int thread__comm_len(struct thread *thread);
struct comm *thread__comm(const struct thread *thread);
+struct comm *thread__exec_comm(const struct thread *thread);
const char *thread__comm_str(const struct thread *thread);
void thread__insert_map(struct thread *thread, struct map *map);
int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp);
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index 4385816d3d49..f11636966a0f 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -40,7 +40,7 @@ struct perf_tool {
event_op2 tracing_data;
event_op2 finished_round,
build_id;
- bool ordered_samples;
+ bool ordered_events;
bool ordering_requires_timestamps;
};
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index 57aaccc1692e..5c9bdd1591a9 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -30,6 +30,11 @@
struct scripting_context *scripting_context;
+static int flush_script_unsupported(void)
+{
+ return 0;
+}
+
static int stop_script_unsupported(void)
{
return 0;
@@ -74,6 +79,7 @@ static int python_generate_script_unsupported(struct pevent *pevent
struct scripting_ops python_scripting_unsupported_ops = {
.name = "Python",
.start_script = python_start_script_unsupported,
+ .flush_script = flush_script_unsupported,
.stop_script = stop_script_unsupported,
.process_event = process_event_unsupported,
.generate_script = python_generate_script_unsupported,
@@ -137,6 +143,7 @@ static int perl_generate_script_unsupported(struct pevent *pevent
struct scripting_ops perl_scripting_unsupported_ops = {
.name = "Perl",
.start_script = perl_start_script_unsupported,
+ .flush_script = flush_script_unsupported,
.stop_script = stop_script_unsupported,
.process_event = process_event_unsupported,
.generate_script = perl_generate_script_unsupported,
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 7b6d68688327..52aaa19e1eb1 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -64,6 +64,7 @@ struct perf_session;
struct scripting_ops {
const char *name;
int (*start_script) (const char *script, int argc, const char **argv);
+ int (*flush_script) (void);
int (*stop_script) (void);
void (*process_event) (union perf_event *event,
struct perf_sample *sample,
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index e52e7461911b..24e8d871b74e 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -13,6 +13,7 @@
#include <limits.h>
#include <byteswap.h>
#include <linux/kernel.h>
+#include <unistd.h>
/*
* XXX We need to find a better place for these things...
@@ -282,6 +283,18 @@ void get_term_dimensions(struct winsize *ws)
ws->ws_col = 80;
}
+void set_term_quiet_input(struct termios *old)
+{
+ struct termios tc;
+
+ tcgetattr(0, old);
+ tc = *old;
+ tc.c_lflag &= ~(ICANON | ECHO);
+ tc.c_cc[VMIN] = 0;
+ tc.c_cc[VTIME] = 0;
+ tcsetattr(0, TCSANOW, &tc);
+}
+
static void set_tracing_events_path(const char *mountpoint)
{
snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s",
@@ -443,6 +456,7 @@ int filename__read_str(const char *filename, char **buf, size_t *sizep)
size_t size = 0, alloc_size = 0;
void *bf = NULL, *nbf;
int fd, n, err = 0;
+ char sbuf[STRERR_BUFSIZE];
fd = open(filename, O_RDONLY);
if (fd < 0)
@@ -463,8 +477,8 @@ int filename__read_str(const char *filename, char **buf, size_t *sizep)
n = read(fd, bf + size, alloc_size - size);
if (n < 0) {
if (size) {
- pr_warning("read failed %d: %s\n",
- errno, strerror(errno));
+ pr_warning("read failed %d: %s\n", errno,
+ strerror_r(errno, sbuf, sizeof(sbuf)));
err = 0;
} else
err = -errno;
@@ -536,3 +550,39 @@ void mem_bswap_64(void *src, int byte_size)
++m;
}
}
+
+bool find_process(const char *name)
+{
+ size_t len = strlen(name);
+ DIR *dir;
+ struct dirent *d;
+ int ret = -1;
+
+ dir = opendir(procfs__mountpoint());
+ if (!dir)
+ return -1;
+
+ /* Walk through the directory. */
+ while (ret && (d = readdir(dir)) != NULL) {
+ char path[PATH_MAX];
+ char *data;
+ size_t size;
+
+ if ((d->d_type != DT_DIR) ||
+ !strcmp(".", d->d_name) ||
+ !strcmp("..", d->d_name))
+ continue;
+
+ scnprintf(path, sizeof(path), "%s/%s/comm",
+ procfs__mountpoint(), d->d_name);
+
+ if (filename__read_str(path, &data, &size))
+ continue;
+
+ ret = strncmp(name, data, len);
+ free(data);
+ }
+
+ closedir(dir);
+ return ret ? false : true;
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 66864364ccb4..80bfdaa0e2a4 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -39,6 +39,8 @@
#define _ALL_SOURCE 1
#define _BSD_SOURCE 1
+/* glibc 2.20 deprecates _BSD_SOURCE in favour of _DEFAULT_SOURCE */
+#define _DEFAULT_SOURCE 1
#define HAS_BOOL
#include <unistd.h>
@@ -64,16 +66,18 @@
#include <regex.h>
#include <utime.h>
#include <sys/wait.h>
-#include <sys/poll.h>
+#include <poll.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <inttypes.h>
+#include <linux/kernel.h>
#include <linux/magic.h>
#include <linux/types.h>
#include <sys/ttydefaults.h>
#include <api/fs/debugfs.h>
#include <termios.h>
#include <linux/bitops.h>
+#include <termios.h>
extern const char *graph_line;
extern const char *graph_dotted_line;
@@ -307,6 +311,7 @@ extern unsigned int page_size;
extern int cacheline_size;
void get_term_dimensions(struct winsize *ws);
+void set_term_quiet_input(struct termios *old);
struct parse_tag {
char tag;
@@ -317,6 +322,21 @@ unsigned long parse_tag_value(const char *str, struct parse_tag *tags);
#define SRCLINE_UNKNOWN ((char *) "??:0")
+static inline int path__join(char *bf, size_t size,
+ const char *path1, const char *path2)
+{
+ return scnprintf(bf, size, "%s%s%s", path1, path1[0] ? "/" : "", path2);
+}
+
+static inline int path__join3(char *bf, size_t size,
+ const char *path1, const char *path2,
+ const char *path3)
+{
+ return scnprintf(bf, size, "%s%s%s%s%s",
+ path1, path1[0] ? "/" : "",
+ path2, path2[0] ? "/" : "", path3);
+}
+
struct dso;
char *get_srcline(struct dso *dso, unsigned long addr);
@@ -330,4 +350,5 @@ void mem_bswap_64(void *src, int byte_size);
void mem_bswap_32(void *src, int byte_size);
const char *get_filename_for_perf_kvm(void);
+bool find_process(const char *name);
#endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 55ab700f6ba5..bf1398180785 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -194,6 +194,7 @@ my $config_bisect_check;
my $patchcheck_type;
my $patchcheck_start;
+my $patchcheck_cherry;
my $patchcheck_end;
# set when a test is something other that just building or install
@@ -320,6 +321,7 @@ my %option_map = (
"PATCHCHECK_TYPE" => \$patchcheck_type,
"PATCHCHECK_START" => \$patchcheck_start,
+ "PATCHCHECK_CHERRY" => \$patchcheck_cherry,
"PATCHCHECK_END" => \$patchcheck_end,
);
@@ -1448,6 +1450,12 @@ sub wait_for_monitor {
}
}
print "** Monitor flushed **\n";
+
+ # if stop is defined but wasn't hit, return error
+ # used by reboot (which wants to see a reboot)
+ if (defined($stop) && !$booted) {
+ $bug = 1;
+ }
return $bug;
}
@@ -2336,15 +2344,17 @@ sub success {
sub answer_bisect {
for (;;) {
- doprint "Pass or fail? [p/f]";
+ doprint "Pass, fail, or skip? [p/f/s]";
my $ans = <STDIN>;
chomp $ans;
if ($ans eq "p" || $ans eq "P") {
return 1;
} elsif ($ans eq "f" || $ans eq "F") {
return 0;
+ } elsif ($ans eq "s" || $ans eq "S") {
+ return -1;
} else {
- print "Please answer 'P' or 'F'\n";
+ print "Please answer 'p', 'f', or 's'\n";
}
}
}
@@ -2726,15 +2736,17 @@ sub bisect {
run_command "git bisect start$start_files" or
dodie "could not start bisect";
- run_command "git bisect good $good" or
- dodie "could not set bisect good to $good";
-
- run_git_bisect "git bisect bad $bad" or
- dodie "could not set bisect bad to $bad";
-
if (defined($replay)) {
run_command "git bisect replay $replay" or
dodie "failed to run replay";
+ } else {
+
+ run_command "git bisect good $good" or
+ dodie "could not set bisect good to $good";
+
+ run_git_bisect "git bisect bad $bad" or
+ dodie "could not set bisect bad to $bad";
+
}
if (defined($start)) {
@@ -3181,9 +3193,16 @@ sub patchcheck {
my $start = $patchcheck_start;
+ my $cherry = $patchcheck_cherry;
+ if (!defined($cherry)) {
+ $cherry = 0;
+ }
+
my $end = "HEAD";
if (defined($patchcheck_end)) {
$end = $patchcheck_end;
+ } elsif ($cherry) {
+ die "PATCHCHECK_END must be defined with PATCHCHECK_CHERRY\n";
}
# Get the true sha1's since we can use things like HEAD~3
@@ -3197,24 +3216,38 @@ sub patchcheck {
$type = "boot";
}
- open (IN, "git log --pretty=oneline $end|") or
- dodie "could not get git list";
+ if ($cherry) {
+ open (IN, "git cherry -v $start $end|") or
+ dodie "could not get git list";
+ } else {
+ open (IN, "git log --pretty=oneline $end|") or
+ dodie "could not get git list";
+ }
my @list;
while (<IN>) {
chomp;
+ # git cherry adds a '+' we want to remove
+ s/^\+ //;
$list[$#list+1] = $_;
last if (/^$start/);
}
close(IN);
- if ($list[$#list] !~ /^$start/) {
- fail "SHA1 $start not found";
+ if (!$cherry) {
+ if ($list[$#list] !~ /^$start/) {
+ fail "SHA1 $start not found";
+ }
+
+ # go backwards in the list
+ @list = reverse @list;
}
- # go backwards in the list
- @list = reverse @list;
+ doprint("Going to test the following commits:\n");
+ foreach my $l (@list) {
+ doprint "$l\n";
+ }
my $save_clean = $noclean;
my %ignored_warnings;
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index 911e45ad657a..6c58cd8bbbae 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -906,6 +906,16 @@
#
# PATCHCHECK_END is the last patch to check (default HEAD)
#
+# PATCHCHECK_CHERRY if set to non zero, then git cherry will be
+# performed against PATCHCHECK_START and PATCHCHECK_END. That is
+#
+# git cherry ${PATCHCHECK_START} ${PATCHCHECK_END}
+#
+# Then the changes found will be tested.
+#
+# Note, PATCHCHECK_CHERRY requires PATCHCHECK_END to be defined.
+# (default 0)
+#
# PATCHCHECK_TYPE is required and is the type of test to run:
# build, boot, test.
#
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 36ff2e4c7b6f..45f145c6f843 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -14,6 +14,7 @@ TARGETS += powerpc
TARGETS += user
TARGETS += sysctl
TARGETS += firmware
+TARGETS += ftrace
TARGETS_HOTPLUG = cpu-hotplug
TARGETS_HOTPLUG += memory-hotplug
diff --git a/tools/testing/selftests/ftrace/Makefile b/tools/testing/selftests/ftrace/Makefile
new file mode 100644
index 000000000000..76cc9f156267
--- /dev/null
+++ b/tools/testing/selftests/ftrace/Makefile
@@ -0,0 +1,7 @@
+all:
+
+run_tests:
+ @/bin/sh ./ftracetest || echo "ftrace selftests: [FAIL]"
+
+clean:
+ rm -rf logs/*
diff --git a/tools/testing/selftests/ftrace/README b/tools/testing/selftests/ftrace/README
new file mode 100644
index 000000000000..182e76fa4b82
--- /dev/null
+++ b/tools/testing/selftests/ftrace/README
@@ -0,0 +1,82 @@
+Linux Ftrace Testcases
+
+This is a collection of testcases for ftrace tracing feature in the Linux
+kernel. Since ftrace exports interfaces via the debugfs, we just need
+shell scripts for testing. Feel free to add new test cases.
+
+Running the ftrace testcases
+============================
+
+At first, you need to be the root user to run this script.
+To run all testcases:
+
+ $ sudo ./ftracetest
+
+To run specific testcases:
+
+ # ./ftracetest test.d/basic3.tc
+
+Or you can also run testcases under given directory:
+
+ # ./ftracetest test.d/kprobe/
+
+Contributing new testcases
+==========================
+
+Copy test.d/template to your testcase (whose filename must have *.tc
+extension) and rewrite the test description line.
+
+ * The working directory of the script is <debugfs>/tracing/.
+
+ * Take care with side effects as the tests are run with root privilege.
+
+ * The tests should not run for a long period of time (more than 1 min.)
+ These are to be unit tests.
+
+ * You can add a directory for your testcases under test.d/ if needed.
+
+ * The test cases should run on dash (busybox shell) for testing on
+ minimal cross-build environments.
+
+ * Note that the tests are run with "set -e" (errexit) option. If any
+ command fails, the test will be terminated immediately.
+
+ * The tests can return some result codes instead of pass or fail by
+ using exit_unresolved, exit_untested, exit_unsupported and exit_xfail.
+
+Result code
+===========
+
+Ftracetest supports following result codes.
+
+ * PASS: The test succeeded as expected. The test which exits with 0 is
+ counted as passed test.
+
+ * FAIL: The test failed, but was expected to succeed. The test which exits
+ with !0 is counted as failed test.
+
+ * UNRESOLVED: The test produced unclear or intermidiate results.
+ for example, the test was interrupted
+ or the test depends on a previous test, which failed.
+ or the test was set up incorrectly
+ The test which is in above situation, must call exit_unresolved.
+
+ * UNTESTED: The test was not run, currently just a placeholder.
+ In this case, the test must call exit_untested.
+
+ * UNSUPPORTED: The test failed because of lack of feature.
+ In this case, the test must call exit_unsupported.
+
+ * XFAIL: The test failed, and was expected to fail.
+ To return XFAIL, call exit_xfail from the test.
+
+There are some sample test scripts for result code under samples/.
+You can also run samples as below:
+
+ # ./ftracetest samples/
+
+TODO
+====
+
+ * Fancy colored output :)
+
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
new file mode 100755
index 000000000000..a8f81c782856
--- /dev/null
+++ b/tools/testing/selftests/ftrace/ftracetest
@@ -0,0 +1,253 @@
+#!/bin/sh
+
+# ftracetest - Ftrace test shell scripts
+#
+# Copyright (C) Hitachi Ltd., 2014
+# Written by Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+#
+# Released under the terms of the GPL v2.
+
+usage() { # errno [message]
+[ "$2" ] && echo $2
+echo "Usage: ftracetest [options] [testcase(s)] [testcase-directory(s)]"
+echo " Options:"
+echo " -h|--help Show help message"
+echo " -k|--keep Keep passed test logs"
+echo " -d|--debug Debug mode (trace all shell commands)"
+exit $1
+}
+
+errexit() { # message
+ echo "Error: $1" 1>&2
+ exit 1
+}
+
+# Ensuring user privilege
+if [ `id -u` -ne 0 ]; then
+ errexit "this must be run by root user"
+fi
+
+# Utilities
+absdir() { # file_path
+ (cd `dirname $1`; pwd)
+}
+
+abspath() {
+ echo `absdir $1`/`basename $1`
+}
+
+find_testcases() { #directory
+ echo `find $1 -name \*.tc`
+}
+
+parse_opts() { # opts
+ local OPT_TEST_CASES=
+ local OPT_TEST_DIR=
+
+ while [ "$1" ]; do
+ case "$1" in
+ --help|-h)
+ usage 0
+ ;;
+ --keep|-k)
+ KEEP_LOG=1
+ shift 1
+ ;;
+ --debug|-d)
+ DEBUG=1
+ shift 1
+ ;;
+ *.tc)
+ if [ -f "$1" ]; then
+ OPT_TEST_CASES="$OPT_TEST_CASES `abspath $1`"
+ shift 1
+ else
+ usage 1 "$1 is not a testcase"
+ fi
+ ;;
+ *)
+ if [ -d "$1" ]; then
+ OPT_TEST_DIR=`abspath $1`
+ OPT_TEST_CASES="$OPT_TEST_CASES `find_testcases $OPT_TEST_DIR`"
+ shift 1
+ else
+ usage 1 "Invalid option ($1)"
+ fi
+ ;;
+ esac
+ done
+ if [ "$OPT_TEST_CASES" ]; then
+ TEST_CASES=$OPT_TEST_CASES
+ fi
+}
+
+# Parameters
+DEBUGFS_DIR=`grep debugfs /proc/mounts | cut -f2 -d' '`
+TRACING_DIR=$DEBUGFS_DIR/tracing
+TOP_DIR=`absdir $0`
+TEST_DIR=$TOP_DIR/test.d
+TEST_CASES=`find_testcases $TEST_DIR`
+LOG_DIR=$TOP_DIR/logs/`date +%Y%m%d-%H%M%S`/
+KEEP_LOG=0
+DEBUG=0
+# Parse command-line options
+parse_opts $*
+
+[ $DEBUG -ne 0 ] && set -x
+
+# Verify parameters
+if [ -z "$DEBUGFS_DIR" -o ! -d "$TRACING_DIR" ]; then
+ errexit "No ftrace directory found"
+fi
+
+# Preparing logs
+LOG_FILE=$LOG_DIR/ftracetest.log
+mkdir -p $LOG_DIR || errexit "Failed to make a log directory: $LOG_DIR"
+date > $LOG_FILE
+prlog() { # messages
+ echo "$@" | tee -a $LOG_FILE
+}
+catlog() { #file
+ cat $1 | tee -a $LOG_FILE
+}
+prlog "=== Ftrace unit tests ==="
+
+
+# Testcase management
+# Test result codes - Dejagnu extended code
+PASS=0 # The test succeeded.
+FAIL=1 # The test failed, but was expected to succeed.
+UNRESOLVED=2 # The test produced indeterminate results. (e.g. interrupted)
+UNTESTED=3 # The test was not run, currently just a placeholder.
+UNSUPPORTED=4 # The test failed because of lack of feature.
+XFAIL=5 # The test failed, and was expected to fail.
+
+# Accumulations
+PASSED_CASES=
+FAILED_CASES=
+UNRESOLVED_CASES=
+UNTESTED_CASES=
+UNSUPPORTED_CASES=
+XFAILED_CASES=
+UNDEFINED_CASES=
+TOTAL_RESULT=0
+
+CASENO=0
+testcase() { # testfile
+ CASENO=$((CASENO+1))
+ prlog -n "[$CASENO]"`grep "^#[ \t]*description:" $1 | cut -f2 -d:`
+}
+
+eval_result() { # retval sigval
+ local retval=$2
+ if [ $2 -eq 0 ]; then
+ test $1 -ne 0 && retval=$FAIL
+ fi
+ case $retval in
+ $PASS)
+ prlog " [PASS]"
+ PASSED_CASES="$PASSED_CASES $CASENO"
+ return 0
+ ;;
+ $FAIL)
+ prlog " [FAIL]"
+ FAILED_CASES="$FAILED_CASES $CASENO"
+ return 1 # this is a bug.
+ ;;
+ $UNRESOLVED)
+ prlog " [UNRESOLVED]"
+ UNRESOLVED_CASES="$UNRESOLVED_CASES $CASENO"
+ return 1 # this is a kind of bug.. something happened.
+ ;;
+ $UNTESTED)
+ prlog " [UNTESTED]"
+ UNTESTED_CASES="$UNTESTED_CASES $CASENO"
+ return 0
+ ;;
+ $UNSUPPORTED)
+ prlog " [UNSUPPORTED]"
+ UNSUPPORTED_CASES="$UNSUPPORTED_CASES $CASENO"
+ return 1 # this is not a bug, but the result should be reported.
+ ;;
+ $XFAIL)
+ prlog " [XFAIL]"
+ XFAILED_CASES="$XFAILED_CASES $CASENO"
+ return 0
+ ;;
+ *)
+ prlog " [UNDEFINED]"
+ UNDEFINED_CASES="$UNDEFINED_CASES $CASENO"
+ return 1 # this must be a test bug
+ ;;
+ esac
+}
+
+# Signal handling for result codes
+SIG_RESULT=
+SIG_BASE=36 # Use realtime signals
+SIG_PID=$$
+
+SIG_UNRESOLVED=$((SIG_BASE + UNRESOLVED))
+exit_unresolved () {
+ kill -s $SIG_UNRESOLVED $SIG_PID
+ exit 0
+}
+trap 'SIG_RESULT=$UNRESOLVED' $SIG_UNRESOLVED
+
+SIG_UNTESTED=$((SIG_BASE + UNTESTED))
+exit_untested () {
+ kill -s $SIG_UNTESTED $SIG_PID
+ exit 0
+}
+trap 'SIG_RESULT=$UNTESTED' $SIG_UNTESTED
+
+SIG_UNSUPPORTED=$((SIG_BASE + UNSUPPORTED))
+exit_unsupported () {
+ kill -s $SIG_UNSUPPORTED $SIG_PID
+ exit 0
+}
+trap 'SIG_RESULT=$UNSUPPORTED' $SIG_UNSUPPORTED
+
+SIG_XFAIL=$((SIG_BASE + XFAIL))
+exit_xfail () {
+ kill -s $SIG_XFAIL $SIG_PID
+ exit 0
+}
+trap 'SIG_RESULT=$XFAIL' $SIG_XFAIL
+
+# Run one test case
+run_test() { # testfile
+ local testname=`basename $1`
+ local testlog=`mktemp --tmpdir=$LOG_DIR ${testname}-XXXXXX.log`
+ testcase $1
+ echo "execute: "$1 > $testlog
+ SIG_RESULT=0
+ # setup PID and PPID, $$ is not updated.
+ (cd $TRACING_DIR; read PID _ < /proc/self/stat ;
+ set -e; set -x; . $1) >> $testlog 2>&1
+ eval_result $? $SIG_RESULT
+ if [ $? -eq 0 ]; then
+ # Remove test log if the test was done as it was expected.
+ [ $KEEP_LOG -eq 0 ] && rm $testlog
+ else
+ catlog $testlog
+ TOTAL_RESULT=1
+ fi
+}
+
+# Main loop
+for t in $TEST_CASES; do
+ run_test $t
+done
+
+prlog ""
+prlog "# of passed: " `echo $PASSED_CASES | wc -w`
+prlog "# of failed: " `echo $FAILED_CASES | wc -w`
+prlog "# of unresolved: " `echo $UNRESOLVED_CASES | wc -w`
+prlog "# of untested: " `echo $UNTESTED_CASES | wc -w`
+prlog "# of unsupported: " `echo $UNSUPPORTED_CASES | wc -w`
+prlog "# of xfailed: " `echo $XFAILED_CASES | wc -w`
+prlog "# of undefined(test bug): " `echo $UNDEFINED_CASES | wc -w`
+
+# if no error, return 0
+exit $TOTAL_RESULT
diff --git a/tools/testing/selftests/ftrace/samples/fail.tc b/tools/testing/selftests/ftrace/samples/fail.tc
new file mode 100644
index 000000000000..15e35b956e05
--- /dev/null
+++ b/tools/testing/selftests/ftrace/samples/fail.tc
@@ -0,0 +1,4 @@
+#!/bin/sh
+# description: failure-case example
+cat non-exist-file
+echo "this is not executed"
diff --git a/tools/testing/selftests/ftrace/samples/pass.tc b/tools/testing/selftests/ftrace/samples/pass.tc
new file mode 100644
index 000000000000..d01549370041
--- /dev/null
+++ b/tools/testing/selftests/ftrace/samples/pass.tc
@@ -0,0 +1,3 @@
+#!/bin/sh
+# description: pass-case example
+return 0
diff --git a/tools/testing/selftests/ftrace/samples/unresolved.tc b/tools/testing/selftests/ftrace/samples/unresolved.tc
new file mode 100644
index 000000000000..41e99d3358d1
--- /dev/null
+++ b/tools/testing/selftests/ftrace/samples/unresolved.tc
@@ -0,0 +1,4 @@
+#!/bin/sh
+# description: unresolved-case example
+trap exit_unresolved INT
+kill -INT $PID
diff --git a/tools/testing/selftests/ftrace/samples/unsupported.tc b/tools/testing/selftests/ftrace/samples/unsupported.tc
new file mode 100644
index 000000000000..45910ff13328
--- /dev/null
+++ b/tools/testing/selftests/ftrace/samples/unsupported.tc
@@ -0,0 +1,3 @@
+#!/bin/sh
+# description: unsupported-case example
+exit_unsupported
diff --git a/tools/testing/selftests/ftrace/samples/untested.tc b/tools/testing/selftests/ftrace/samples/untested.tc
new file mode 100644
index 000000000000..35a45946ec60
--- /dev/null
+++ b/tools/testing/selftests/ftrace/samples/untested.tc
@@ -0,0 +1,3 @@
+#!/bin/sh
+# description: untested-case example
+exit_untested
diff --git a/tools/testing/selftests/ftrace/samples/xfail.tc b/tools/testing/selftests/ftrace/samples/xfail.tc
new file mode 100644
index 000000000000..9dd395323259
--- /dev/null
+++ b/tools/testing/selftests/ftrace/samples/xfail.tc
@@ -0,0 +1,3 @@
+#!/bin/sh
+# description: xfail-case example
+cat non-exist-file || exit_xfail
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/basic1.tc b/tools/testing/selftests/ftrace/test.d/00basic/basic1.tc
new file mode 100644
index 000000000000..9980ff14ae44
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/00basic/basic1.tc
@@ -0,0 +1,3 @@
+#!/bin/sh
+# description: Basic trace file check
+test -f README -a -f trace -a -f tracing_on -a -f trace_pipe
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/basic2.tc b/tools/testing/selftests/ftrace/test.d/00basic/basic2.tc
new file mode 100644
index 000000000000..bf9a7b037924
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/00basic/basic2.tc
@@ -0,0 +1,7 @@
+#!/bin/sh
+# description: Basic test for tracers
+test -f available_tracers
+for t in `cat available_tracers`; do
+ echo $t > current_tracer
+done
+echo nop > current_tracer
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/basic3.tc b/tools/testing/selftests/ftrace/test.d/00basic/basic3.tc
new file mode 100644
index 000000000000..bde6625d9785
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/00basic/basic3.tc
@@ -0,0 +1,8 @@
+#!/bin/sh
+# description: Basic trace clock test
+test -f trace_clock
+for c in `cat trace_clock | tr -d \[\]`; do
+ echo $c > trace_clock
+ grep '\['$c'\]' trace_clock
+done
+echo local > trace_clock
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/add_and_remove.tc b/tools/testing/selftests/ftrace/test.d/kprobe/add_and_remove.tc
new file mode 100644
index 000000000000..1b8b665ab2b3
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/add_and_remove.tc
@@ -0,0 +1,11 @@
+#!/bin/sh
+# description: Kprobe dynamic event - adding and removing
+
+[ -f kprobe_events ] || exit_unsupported # this is configurable
+
+echo 0 > events/enable
+echo > kprobe_events
+echo p:myevent do_fork > kprobe_events
+grep myevent kprobe_events
+test -d events/kprobes/myevent
+echo > kprobe_events
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc b/tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc
new file mode 100644
index 000000000000..b55c84003587
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc
@@ -0,0 +1,13 @@
+#!/bin/sh
+# description: Kprobe dynamic event - busy event check
+
+[ -f kprobe_events ] || exit_unsupported
+
+echo 0 > events/enable
+echo > kprobe_events
+echo p:myevent do_fork > kprobe_events
+test -d events/kprobes/myevent
+echo 1 > events/kprobes/myevent/enable
+echo > kprobe_events && exit 1 # this must fail
+echo 0 > events/kprobes/myevent/enable
+echo > kprobe_events # this must succeed
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc
new file mode 100644
index 000000000000..a603d3f8db7b
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc
@@ -0,0 +1,16 @@
+#!/bin/sh
+# description: Kprobe dynamic event with arguments
+
+[ -f kprobe_events ] || exit_unsupported # this is configurable
+
+echo 0 > events/enable
+echo > kprobe_events
+echo 'p:testprobe do_fork $stack $stack0 +0($stack)' > kprobe_events
+grep testprobe kprobe_events
+test -d events/kprobes/testprobe
+echo 1 > events/kprobes/testprobe/enable
+( echo "forked")
+echo 0 > events/kprobes/testprobe/enable
+echo "-:testprobe" >> kprobe_events
+test -d events/kprobes/testprobe && exit 1 || exit 0
+
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc
new file mode 100644
index 000000000000..283c29e7f7c4
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc
@@ -0,0 +1,15 @@
+#!/bin/sh
+# description: Kretprobe dynamic event with arguments
+
+[ -f kprobe_events ] || exit_unsupported # this is configurable
+
+echo 0 > events/enable
+echo > kprobe_events
+echo 'r:testprobe2 do_fork $retval' > kprobe_events
+grep testprobe2 kprobe_events
+test -d events/kprobes/testprobe2
+echo 1 > events/kprobes/testprobe2/enable
+( echo "forked")
+echo 0 > events/kprobes/testprobe2/enable
+echo '-:testprobe2' >> kprobe_events
+test -d events/kprobes/testprobe2 && exit 1 || exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/template b/tools/testing/selftests/ftrace/test.d/template
new file mode 100644
index 000000000000..5448f7abad5f
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/template
@@ -0,0 +1,9 @@
+#!/bin/sh
+# description: %HERE DESCRIBE WHAT THIS DOES%
+# you have to add ".tc" extention for your testcase file
+# Note that all tests are run with "errexit" option.
+
+exit 0 # Return 0 if the test is passed, otherwise return !0
+# If the test could not run because of lack of feature, call exit_unsupported
+# If the test returned unclear results, call exit_unresolved
+# If the test is a dummy, or a placeholder, call exit_untested
diff --git a/tools/testing/selftests/memfd/Makefile b/tools/testing/selftests/memfd/Makefile
index ad4ab01cd28f..b80cd10d53ba 100644
--- a/tools/testing/selftests/memfd/Makefile
+++ b/tools/testing/selftests/memfd/Makefile
@@ -1,38 +1,17 @@
-uname_M := $(shell uname -m 2>/dev/null || echo not)
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/)
-ifeq ($(ARCH),i386)
- ARCH := x86
-endif
-ifeq ($(ARCH),x86_64)
- ARCH := x86
-endif
-
CFLAGS += -D_FILE_OFFSET_BITS=64
-CFLAGS += -I../../../../arch/x86/include/generated/uapi/
-CFLAGS += -I../../../../arch/x86/include/uapi/
CFLAGS += -I../../../../include/uapi/
CFLAGS += -I../../../../include/
all:
-ifeq ($(ARCH),x86)
gcc $(CFLAGS) memfd_test.c -o memfd_test
-else
- echo "Not an x86 target, can't build memfd selftest"
-endif
run_tests: all
-ifeq ($(ARCH),x86)
gcc $(CFLAGS) memfd_test.c -o memfd_test
-endif
@./memfd_test || echo "memfd_test: [FAIL]"
build_fuse:
-ifeq ($(ARCH),x86)
gcc $(CFLAGS) fuse_mnt.c `pkg-config fuse --cflags --libs` -o fuse_mnt
gcc $(CFLAGS) fuse_test.c -o fuse_test
-else
- echo "Not an x86 target, can't build memfd selftest"
-endif
run_fuse: build_fuse
@./run_fuse_test.sh || echo "fuse_test: [FAIL]"
diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
index 3634c909b1b0..0b9eafb7ab7b 100644
--- a/tools/testing/selftests/memfd/memfd_test.c
+++ b/tools/testing/selftests/memfd/memfd_test.c
@@ -59,9 +59,9 @@ static void mfd_fail_new(const char *name, unsigned int flags)
}
}
-static __u64 mfd_assert_get_seals(int fd)
+static unsigned int mfd_assert_get_seals(int fd)
{
- long r;
+ int r;
r = fcntl(fd, F_GET_SEALS);
if (r < 0) {
@@ -69,50 +69,48 @@ static __u64 mfd_assert_get_seals(int fd)
abort();
}
- return r;
+ return (unsigned int)r;
}
-static void mfd_assert_has_seals(int fd, __u64 seals)
+static void mfd_assert_has_seals(int fd, unsigned int seals)
{
- __u64 s;
+ unsigned int s;
s = mfd_assert_get_seals(fd);
if (s != seals) {
- printf("%llu != %llu = GET_SEALS(%d)\n",
- (unsigned long long)seals, (unsigned long long)s, fd);
+ printf("%u != %u = GET_SEALS(%d)\n", seals, s, fd);
abort();
}
}
-static void mfd_assert_add_seals(int fd, __u64 seals)
+static void mfd_assert_add_seals(int fd, unsigned int seals)
{
- long r;
- __u64 s;
+ int r;
+ unsigned int s;
s = mfd_assert_get_seals(fd);
r = fcntl(fd, F_ADD_SEALS, seals);
if (r < 0) {
- printf("ADD_SEALS(%d, %llu -> %llu) failed: %m\n",
- fd, (unsigned long long)s, (unsigned long long)seals);
+ printf("ADD_SEALS(%d, %u -> %u) failed: %m\n", fd, s, seals);
abort();
}
}
-static void mfd_fail_add_seals(int fd, __u64 seals)
+static void mfd_fail_add_seals(int fd, unsigned int seals)
{
- long r;
- __u64 s;
+ int r;
+ unsigned int s;
r = fcntl(fd, F_GET_SEALS);
if (r < 0)
s = 0;
else
- s = r;
+ s = (unsigned int)r;
r = fcntl(fd, F_ADD_SEALS, seals);
if (r >= 0) {
- printf("ADD_SEALS(%d, %llu -> %llu) didn't fail as expected\n",
- fd, (unsigned long long)s, (unsigned long long)seals);
+ printf("ADD_SEALS(%d, %u -> %u) didn't fail as expected\n",
+ fd, s, seals);
abort();
}
}
@@ -205,7 +203,7 @@ static void mfd_fail_open(int fd, int flags, mode_t mode)
sprintf(buf, "/proc/self/fd/%d", fd);
r = open(buf, flags, mode);
if (r >= 0) {
- printf("open(%s) didn't fail as expected\n");
+ printf("open(%s) didn't fail as expected\n", buf);
abort();
}
}
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 74a78cedce37..f6ff90a76bd7 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -13,7 +13,7 @@ CFLAGS := -Wall -O2 -flto -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CUR
export CC CFLAGS
-TARGETS = pmu copyloops mm tm
+TARGETS = pmu copyloops mm tm primitives
endif
diff --git a/tools/testing/selftests/powerpc/primitives/Makefile b/tools/testing/selftests/powerpc/primitives/Makefile
new file mode 100644
index 000000000000..ea737ca01732
--- /dev/null
+++ b/tools/testing/selftests/powerpc/primitives/Makefile
@@ -0,0 +1,17 @@
+CFLAGS += -I$(CURDIR)
+
+PROGS := load_unaligned_zeropad
+
+all: $(PROGS)
+
+$(PROGS): ../harness.c
+
+run_tests: all
+ @-for PROG in $(PROGS); do \
+ ./$$PROG; \
+ done;
+
+clean:
+ rm -f $(PROGS) *.o
+
+.PHONY: all run_tests clean
diff --git a/tools/testing/selftests/powerpc/primitives/asm/asm-compat.h b/tools/testing/selftests/powerpc/primitives/asm/asm-compat.h
new file mode 120000
index 000000000000..b14255e15a25
--- /dev/null
+++ b/tools/testing/selftests/powerpc/primitives/asm/asm-compat.h
@@ -0,0 +1 @@
+../.././../../../../arch/powerpc/include/asm/asm-compat.h \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/primitives/asm/ppc-opcode.h b/tools/testing/selftests/powerpc/primitives/asm/ppc-opcode.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/testing/selftests/powerpc/primitives/asm/ppc-opcode.h
diff --git a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
new file mode 100644
index 000000000000..d1b647509596
--- /dev/null
+++ b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
@@ -0,0 +1,147 @@
+/*
+ * Userspace test harness for load_unaligned_zeropad. Creates two
+ * pages and uses mprotect to prevent access to the second page and
+ * a SEGV handler that walks the exception tables and runs the fixup
+ * routine.
+ *
+ * The results are compared against a normal load that is that is
+ * performed while access to the second page is enabled via mprotect.
+ *
+ * Copyright (C) 2014 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <signal.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#define FIXUP_SECTION ".ex_fixup"
+
+#include "word-at-a-time.h"
+
+#include "utils.h"
+
+
+static int page_size;
+static char *mem_region;
+
+static int protect_region(void)
+{
+ if (mprotect(mem_region + page_size, page_size, PROT_NONE)) {
+ perror("mprotect");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int unprotect_region(void)
+{
+ if (mprotect(mem_region + page_size, page_size, PROT_READ|PROT_WRITE)) {
+ perror("mprotect");
+ return 1;
+ }
+
+ return 0;
+}
+
+extern char __start___ex_table[];
+extern char __stop___ex_table[];
+
+#if defined(__powerpc64__)
+#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.gp_regs[PT_NIP]
+#elif defined(__powerpc__)
+#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.uc_regs->gregs[PT_NIP]
+#else
+#error implement UCONTEXT_NIA
+#endif
+
+static int segv_error;
+
+static void segv_handler(int signr, siginfo_t *info, void *ptr)
+{
+ ucontext_t *uc = (ucontext_t *)ptr;
+ unsigned long addr = (unsigned long)info->si_addr;
+ unsigned long *ip = &UCONTEXT_NIA(uc);
+ unsigned long *ex_p = (unsigned long *)__start___ex_table;
+
+ while (ex_p < (unsigned long *)__stop___ex_table) {
+ unsigned long insn, fixup;
+
+ insn = *ex_p++;
+ fixup = *ex_p++;
+
+ if (insn == *ip) {
+ *ip = fixup;
+ return;
+ }
+ }
+
+ printf("No exception table match for NIA %lx ADDR %lx\n", *ip, addr);
+ segv_error++;
+}
+
+static void setup_segv_handler(void)
+{
+ struct sigaction action;
+
+ memset(&action, 0, sizeof(action));
+ action.sa_sigaction = segv_handler;
+ action.sa_flags = SA_SIGINFO;
+ sigaction(SIGSEGV, &action, NULL);
+}
+
+static int do_one_test(char *p, int page_offset)
+{
+ unsigned long should;
+ unsigned long got;
+
+ FAIL_IF(unprotect_region());
+ should = *(unsigned long *)p;
+ FAIL_IF(protect_region());
+
+ got = load_unaligned_zeropad(p);
+
+ if (should != got)
+ printf("offset %u load_unaligned_zeropad returned 0x%lx, should be 0x%lx\n", page_offset, got, should);
+
+ return 0;
+}
+
+static int test_body(void)
+{
+ unsigned long i;
+
+ page_size = getpagesize();
+ mem_region = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+
+ FAIL_IF(mem_region == MAP_FAILED);
+
+ for (i = 0; i < page_size; i++)
+ mem_region[i] = i;
+
+ memset(mem_region+page_size, 0, page_size);
+
+ setup_segv_handler();
+
+ for (i = 0; i < page_size; i++)
+ FAIL_IF(do_one_test(mem_region+i, i));
+
+ FAIL_IF(segv_error);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test_body, "load_unaligned_zeropad");
+}
diff --git a/tools/testing/selftests/powerpc/primitives/word-at-a-time.h b/tools/testing/selftests/powerpc/primitives/word-at-a-time.h
new file mode 120000
index 000000000000..eb74401b591f
--- /dev/null
+++ b/tools/testing/selftests/powerpc/primitives/word-at-a-time.h
@@ -0,0 +1 @@
+../../../../../arch/powerpc/include/asm/word-at-a-time.h \ No newline at end of file
diff --git a/tools/testing/selftests/rcutorture/bin/config2frag.sh b/tools/testing/selftests/rcutorture/bin/config2frag.sh
index 9f9ffcd427d3..56f51ae13d73 100644..100755
--- a/tools/testing/selftests/rcutorture/bin/config2frag.sh
+++ b/tools/testing/selftests/rcutorture/bin/config2frag.sh
@@ -1,5 +1,5 @@
-#!/bin/sh
-# Usage: sh config2frag.sh < .config > configfrag
+#!/bin/bash
+# Usage: config2frag.sh < .config > configfrag
#
# Converts the "# CONFIG_XXX is not set" to "CONFIG_XXX=n" so that the
# resulting file becomes a legitimate Kconfig fragment.
diff --git a/tools/testing/selftests/rcutorture/bin/configcheck.sh b/tools/testing/selftests/rcutorture/bin/configcheck.sh
index d686537dd55c..eee31e261bf7 100755
--- a/tools/testing/selftests/rcutorture/bin/configcheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/configcheck.sh
@@ -1,5 +1,5 @@
-#!/bin/sh
-# Usage: sh configcheck.sh .config .config-template
+#!/bin/bash
+# Usage: configcheck.sh .config .config-template
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
index 9c3f3d39b934..15f1a17ca96e 100755
--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -1,6 +1,6 @@
-#!/bin/sh
+#!/bin/bash
#
-# sh configinit.sh config-spec-file [ build output dir ]
+# Usage: configinit.sh config-spec-file [ build output dir ]
#
# Create a .config file from the spec file. Run from the kernel source tree.
# Exits with 0 if all went well, with 1 if all went well but the config
diff --git a/tools/testing/selftests/rcutorture/bin/functions.sh b/tools/testing/selftests/rcutorture/bin/functions.sh
index d01b865bb100..b325470c01b3 100644
--- a/tools/testing/selftests/rcutorture/bin/functions.sh
+++ b/tools/testing/selftests/rcutorture/bin/functions.sh
@@ -64,6 +64,26 @@ configfrag_boot_params () {
fi
}
+# configfrag_boot_cpus bootparam-string config-fragment-file config-cpus
+#
+# Decreases number of CPUs based on any maxcpus= boot parameters specified.
+configfrag_boot_cpus () {
+ local bootargs="`configfrag_boot_params "$1" "$2"`"
+ local maxcpus
+ if echo "${bootargs}" | grep -q 'maxcpus=[0-9]'
+ then
+ maxcpus="`echo "${bootargs}" | sed -e 's/^.*maxcpus=\([0-9]*\).*$/\1/'`"
+ if test "$3" -gt "$maxcpus"
+ then
+ echo $maxcpus
+ else
+ echo $3
+ fi
+ else
+ echo $3
+ fi
+}
+
# configfrag_hotplug_cpu config-fragment-file
#
# Returns 1 if the config fragment specifies hotplug CPU.
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
index 7c1e56b46de4..00cb0db2643d 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
@@ -2,7 +2,7 @@
#
# Build a kvm-ready Linux kernel from the tree in the current directory.
#
-# Usage: sh kvm-build.sh config-template build-dir more-configs
+# Usage: kvm-build.sh config-template build-dir more-configs
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
index 7f1ff1a8fc4b..43f764098e50 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
@@ -2,7 +2,7 @@
#
# Analyze a given results directory for locktorture progress.
#
-# Usage: sh kvm-recheck-lock.sh resdir
+# Usage: kvm-recheck-lock.sh resdir
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
index 307c4b95f325..d6cc07fc137f 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
@@ -2,7 +2,7 @@
#
# Analyze a given results directory for rcutorture progress.
#
-# Usage: sh kvm-recheck-rcu.sh resdir
+# Usage: kvm-recheck-rcu.sh resdir
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
index 3f6c9b78d177..4f5b20f367a9 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -4,7 +4,7 @@
# check the build and console output for errors. Given a directory
# containing results directories, this recursively checks them all.
#
-# Usage: sh kvm-recheck.sh resdir ...
+# Usage: kvm-recheck.sh resdir ...
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 0f69dcbf9def..f6b2b4771b78 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -6,7 +6,7 @@
# Execute this in the source tree. Do not run it as a background task
# because qemu does not seem to like that much.
#
-# Usage: sh kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
+# Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
#
# qemu-args defaults to "-nographic", along with arguments specifying the
# number of CPUs and other options generated from
@@ -140,6 +140,7 @@ fi
# Generate -smp qemu argument.
qemu_args="-nographic $qemu_args"
cpu_count=`configNR_CPUS.sh $config_template`
+cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
vcpus=`identify_qemu_vcpus`
if test $cpu_count -gt $vcpus
then
@@ -214,7 +215,7 @@ then
fi
if test $kruntime -ge $((seconds + grace))
then
- echo "!!! Hang at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1
+ echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1
kill -KILL $qemu_pid
break
fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 589e9c38413b..e527dc952eb0 100644..100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -7,7 +7,7 @@
# Edit the definitions below to set the locations of the various directories,
# as well as the test duration.
#
-# Usage: sh kvm.sh [ options ]
+# Usage: kvm.sh [ options ]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -188,7 +188,9 @@ for CF in $configs
do
if test -f "$CONFIGFRAG/$kversion/$CF"
then
- echo $CF `configNR_CPUS.sh $CONFIGFRAG/$kversion/$CF` >> $T/cfgcpu
+ cpu_count=`configNR_CPUS.sh $CONFIGFRAG/$kversion/$CF`
+ cpu_count=`configfrag_boot_cpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$kversion/$CF" "$cpu_count"`
+ echo $CF $cpu_count >> $T/cfgcpu
else
echo "The --configs file $CF does not exist, terminating."
exit 1
diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh
index 543230951c38..499d1e598e42 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
#
# Check the build output from an rcutorture run for goodness.
# The "file" is a pathname on the local system, and "title" is
@@ -6,8 +6,7 @@
#
# The file must contain kernel build output.
#
-# Usage:
-# sh parse-build.sh file title
+# Usage: parse-build.sh file title
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh
index 4185d4cab32e..f962ba4cf68b 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-console.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh
@@ -1,11 +1,10 @@
-#!/bin/sh
+#!/bin/bash
#
# Check the console output from an rcutorture run for oopses.
# The "file" is a pathname on the local system, and "title" is
# a text string for error-message purposes.
#
-# Usage:
-# sh parse-console.sh file title
+# Usage: parse-console.sh file title
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -33,6 +32,10 @@ title="$2"
. functions.sh
+if grep -Pq '\x00' < $file
+then
+ print_warning Console output contains nul bytes, old qemu still running?
+fi
egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
if test -s $T
then
diff --git a/tools/testing/selftests/rcutorture/bin/parse-torture.sh b/tools/testing/selftests/rcutorture/bin/parse-torture.sh
index 3455560ab4e4..e3c5f0705696 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-torture.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-torture.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
#
# Check the console output from a torture run for goodness.
# The "file" is a pathname on the local system, and "title" is
@@ -7,8 +7,7 @@
# The file must contain torture output, but can be interspersed
# with other dmesg text, as in console-log output.
#
-# Usage:
-# sh parse-torture.sh file title
+# Usage: parse-torture.sh file title
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
diff --git a/tools/testing/selftests/rcutorture/configs/lock/CFLIST b/tools/testing/selftests/rcutorture/configs/lock/CFLIST
index a061b22d1892..6910b7370761 100644
--- a/tools/testing/selftests/rcutorture/configs/lock/CFLIST
+++ b/tools/testing/selftests/rcutorture/configs/lock/CFLIST
@@ -1 +1,4 @@
LOCK01
+LOCK02
+LOCK03
+LOCK04 \ No newline at end of file
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK02 b/tools/testing/selftests/rcutorture/configs/lock/LOCK02
new file mode 100644
index 000000000000..1d1da1477fc3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK02
@@ -0,0 +1,6 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK02.boot b/tools/testing/selftests/rcutorture/configs/lock/LOCK02.boot
new file mode 100644
index 000000000000..5aa44b4f1b51
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK02.boot
@@ -0,0 +1 @@
+locktorture.torture_type=mutex_lock
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK03 b/tools/testing/selftests/rcutorture/configs/lock/LOCK03
new file mode 100644
index 000000000000..1d1da1477fc3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK03
@@ -0,0 +1,6 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK03.boot b/tools/testing/selftests/rcutorture/configs/lock/LOCK03.boot
new file mode 100644
index 000000000000..a67bbe0245c9
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK03.boot
@@ -0,0 +1 @@
+locktorture.torture_type=rwsem_lock
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK04 b/tools/testing/selftests/rcutorture/configs/lock/LOCK04
new file mode 100644
index 000000000000..1d1da1477fc3
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK04
@@ -0,0 +1,6 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK04.boot b/tools/testing/selftests/rcutorture/configs/lock/LOCK04.boot
new file mode 100644
index 000000000000..48c04fe47fb4
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/lock/LOCK04.boot
@@ -0,0 +1 @@
+locktorture.torture_type=rw_lock
diff --git a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
index 9746ea1cd6c7..252aae618984 100644
--- a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
@@ -38,6 +38,6 @@ per_version_boot_params () {
echo $1 `locktorture_param_onoff "$1" "$2"` \
locktorture.stat_interval=15 \
locktorture.shutdown_secs=$3 \
- locktorture.locktorture_runnable=1 \
+ locktorture.torture_runnable=1 \
locktorture.verbose=1
}
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/CFLIST b/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
index cd3d29cb0a47..a3a1a05a2b5c 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
+++ b/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
@@ -11,3 +11,6 @@ SRCU-N
SRCU-P
TINY01
TINY02
+TASKS01
+TASKS02
+TASKS03
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01
new file mode 100644
index 000000000000..97f0a0b27ef7
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01
@@ -0,0 +1,9 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_PROVE_RCU=y
+CONFIG_TASKS_RCU=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot
new file mode 100644
index 000000000000..cd2a188eeb6d
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot
@@ -0,0 +1 @@
+rcutorture.torture_type=tasks
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS02 b/tools/testing/selftests/rcutorture/configs/rcu/TASKS02
new file mode 100644
index 000000000000..696d2ea74d13
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS02
@@ -0,0 +1,5 @@
+CONFIG_SMP=n
+CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=n
+CONFIG_TASKS_RCU=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS02.boot b/tools/testing/selftests/rcutorture/configs/rcu/TASKS02.boot
new file mode 100644
index 000000000000..cd2a188eeb6d
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS02.boot
@@ -0,0 +1 @@
+rcutorture.torture_type=tasks
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS03 b/tools/testing/selftests/rcutorture/configs/rcu/TASKS03
new file mode 100644
index 000000000000..9c60da5b5d1d
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS03
@@ -0,0 +1,13 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+CONFIG_TASKS_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=n
+CONFIG_NO_HZ_FULL=y
+CONFIG_NO_HZ_FULL_ALL=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS03.boot b/tools/testing/selftests/rcutorture/configs/rcu/TASKS03.boot
new file mode 100644
index 000000000000..cd2a188eeb6d
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS03.boot
@@ -0,0 +1 @@
+rcutorture.torture_type=tasks
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01 b/tools/testing/selftests/rcutorture/configs/rcu/TREE01
index 063b7079c621..38e3895759dd 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01
@@ -1,5 +1,4 @@
CONFIG_SMP=y
-CONFIG_NR_CPUS=8
CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=y
@@ -10,8 +9,7 @@ CONFIG_NO_HZ_FULL=n
CONFIG_RCU_FAST_NO_HZ=y
CONFIG_RCU_TRACE=y
CONFIG_HOTPLUG_CPU=y
-CONFIG_RCU_FANOUT=8
-CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_MAXSMP=y
CONFIG_RCU_NOCB_CPU=y
CONFIG_RCU_NOCB_CPU_ZERO=y
CONFIG_DEBUG_LOCK_ALLOC=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot
index 0fc8a3428938..adc3abc82fb8 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot
@@ -1 +1 @@
-rcutorture.torture_type=rcu_bh
+rcutorture.torture_type=rcu_bh maxcpus=8
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 b/tools/testing/selftests/rcutorture/configs/rcu/TREE07
index ab6225506909..8f1017666aa7 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE07
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE07
@@ -1,5 +1,6 @@
CONFIG_SMP=y
CONFIG_NR_CPUS=16
+CONFIG_CPUMASK_OFFSTACK=y
CONFIG_PREEMPT_NONE=y
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=n
@@ -7,7 +8,7 @@ CONFIG_PREEMPT=n
CONFIG_HZ_PERIODIC=n
CONFIG_NO_HZ_IDLE=n
CONFIG_NO_HZ_FULL=y
-CONFIG_NO_HZ_FULL_ALL=y
+CONFIG_NO_HZ_FULL_ALL=n
CONFIG_NO_HZ_FULL_SYSIDLE=y
CONFIG_RCU_FAST_NO_HZ=n
CONFIG_RCU_TRACE=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE07.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE07.boot
new file mode 100644
index 000000000000..d44609937503
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE07.boot
@@ -0,0 +1 @@
+nohz_full=2-9
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
index 8977d8d31b19..ffb85ed786fa 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
@@ -51,7 +51,7 @@ per_version_boot_params () {
`rcutorture_param_n_barrier_cbs "$1"` \
rcutorture.stat_interval=15 \
rcutorture.shutdown_secs=$3 \
- rcutorture.rcutorture_runnable=1 \
+ rcutorture.torture_runnable=1 \
rcutorture.test_no_idle_hz=1 \
rcutorture.verbose=1
}
diff --git a/tools/testing/selftests/rcutorture/doc/initrd.txt b/tools/testing/selftests/rcutorture/doc/initrd.txt
index 49d134c25c04..4170e714f044 100644
--- a/tools/testing/selftests/rcutorture/doc/initrd.txt
+++ b/tools/testing/selftests/rcutorture/doc/initrd.txt
@@ -6,6 +6,7 @@ this case. There are probably much better ways of doing this.
That said, here are the commands:
------------------------------------------------------------------------
+cd tools/testing/selftests/rcutorture
zcat /initrd.img > /tmp/initrd.img.zcat
mkdir initrd
cd initrd